xref: /netbsd-src/external/gpl3/gdb/dist/bfd/elf32-spu.c (revision d90047b5d07facf36e6c01dcc0bded8997ce9cc2)
1 /* SPU specific support for 32-bit ELF
2 
3    Copyright (C) 2006-2019 Free Software Foundation, Inc.
4 
5    This file is part of BFD, the Binary File Descriptor library.
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License as published by
9    the Free Software Foundation; either version 3 of the License, or
10    (at your option) any later version.
11 
12    This program is distributed in the hope that it will be useful,
13    but WITHOUT ANY WARRANTY; without even the implied warranty of
14    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15    GNU General Public License for more details.
16 
17    You should have received a copy of the GNU General Public License along
18    with this program; if not, write to the Free Software Foundation, Inc.,
19    51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA.  */
20 
21 #include "sysdep.h"
22 #include "libiberty.h"
23 #include "bfd.h"
24 #include "bfdlink.h"
25 #include "libbfd.h"
26 #include "elf-bfd.h"
27 #include "elf/spu.h"
28 #include "elf32-spu.h"
29 
30 /* We use RELA style relocs.  Don't define USE_REL.  */
31 
32 static bfd_reloc_status_type spu_elf_rel9 (bfd *, arelent *, asymbol *,
33 					   void *, asection *,
34 					   bfd *, char **);
35 
36 /* Values of type 'enum elf_spu_reloc_type' are used to index this
37    array, so it must be declared in the order of that type.  */
38 
39 static reloc_howto_type elf_howto_table[] = {
40   HOWTO (R_SPU_NONE,	   0, 3,  0, FALSE,  0, complain_overflow_dont,
41 	 bfd_elf_generic_reloc, "SPU_NONE",
42 	 FALSE, 0, 0x00000000, FALSE),
43   HOWTO (R_SPU_ADDR10,	   4, 2, 10, FALSE, 14, complain_overflow_bitfield,
44 	 bfd_elf_generic_reloc, "SPU_ADDR10",
45 	 FALSE, 0, 0x00ffc000, FALSE),
46   HOWTO (R_SPU_ADDR16,	   2, 2, 16, FALSE,  7, complain_overflow_bitfield,
47 	 bfd_elf_generic_reloc, "SPU_ADDR16",
48 	 FALSE, 0, 0x007fff80, FALSE),
49   HOWTO (R_SPU_ADDR16_HI, 16, 2, 16, FALSE,  7, complain_overflow_bitfield,
50 	 bfd_elf_generic_reloc, "SPU_ADDR16_HI",
51 	 FALSE, 0, 0x007fff80, FALSE),
52   HOWTO (R_SPU_ADDR16_LO,  0, 2, 16, FALSE,  7, complain_overflow_dont,
53 	 bfd_elf_generic_reloc, "SPU_ADDR16_LO",
54 	 FALSE, 0, 0x007fff80, FALSE),
55   HOWTO (R_SPU_ADDR18,	   0, 2, 18, FALSE,  7, complain_overflow_bitfield,
56 	 bfd_elf_generic_reloc, "SPU_ADDR18",
57 	 FALSE, 0, 0x01ffff80, FALSE),
58   HOWTO (R_SPU_ADDR32,	   0, 2, 32, FALSE,  0, complain_overflow_dont,
59 	 bfd_elf_generic_reloc, "SPU_ADDR32",
60 	 FALSE, 0, 0xffffffff, FALSE),
61   HOWTO (R_SPU_REL16,	   2, 2, 16,  TRUE,  7, complain_overflow_bitfield,
62 	 bfd_elf_generic_reloc, "SPU_REL16",
63 	 FALSE, 0, 0x007fff80, TRUE),
64   HOWTO (R_SPU_ADDR7,	   0, 2,  7, FALSE, 14, complain_overflow_dont,
65 	 bfd_elf_generic_reloc, "SPU_ADDR7",
66 	 FALSE, 0, 0x001fc000, FALSE),
67   HOWTO (R_SPU_REL9,	   2, 2,  9,  TRUE,  0, complain_overflow_signed,
68 	 spu_elf_rel9,		"SPU_REL9",
69 	 FALSE, 0, 0x0180007f, TRUE),
70   HOWTO (R_SPU_REL9I,	   2, 2,  9,  TRUE,  0, complain_overflow_signed,
71 	 spu_elf_rel9,		"SPU_REL9I",
72 	 FALSE, 0, 0x0000c07f, TRUE),
73   HOWTO (R_SPU_ADDR10I,	   0, 2, 10, FALSE, 14, complain_overflow_signed,
74 	 bfd_elf_generic_reloc, "SPU_ADDR10I",
75 	 FALSE, 0, 0x00ffc000, FALSE),
76   HOWTO (R_SPU_ADDR16I,	   0, 2, 16, FALSE,  7, complain_overflow_signed,
77 	 bfd_elf_generic_reloc, "SPU_ADDR16I",
78 	 FALSE, 0, 0x007fff80, FALSE),
79   HOWTO (R_SPU_REL32,	   0, 2, 32, TRUE,  0, complain_overflow_dont,
80 	 bfd_elf_generic_reloc, "SPU_REL32",
81 	 FALSE, 0, 0xffffffff, TRUE),
82   HOWTO (R_SPU_ADDR16X,	   0, 2, 16, FALSE,  7, complain_overflow_bitfield,
83 	 bfd_elf_generic_reloc, "SPU_ADDR16X",
84 	 FALSE, 0, 0x007fff80, FALSE),
85   HOWTO (R_SPU_PPU32,	   0, 2, 32, FALSE,  0, complain_overflow_dont,
86 	 bfd_elf_generic_reloc, "SPU_PPU32",
87 	 FALSE, 0, 0xffffffff, FALSE),
88   HOWTO (R_SPU_PPU64,	   0, 4, 64, FALSE,  0, complain_overflow_dont,
89 	 bfd_elf_generic_reloc, "SPU_PPU64",
90 	 FALSE, 0, -1, FALSE),
91   HOWTO (R_SPU_ADD_PIC,	     0, 0, 0, FALSE,  0, complain_overflow_dont,
92 	 bfd_elf_generic_reloc, "SPU_ADD_PIC",
93 	 FALSE, 0, 0x00000000, FALSE),
94 };
95 
96 static struct bfd_elf_special_section const spu_elf_special_sections[] = {
97   { "._ea", 4, 0, SHT_PROGBITS, SHF_WRITE },
98   { ".toe", 4, 0, SHT_NOBITS, SHF_ALLOC },
99   { NULL, 0, 0, 0, 0 }
100 };
101 
102 static enum elf_spu_reloc_type
103 spu_elf_bfd_to_reloc_type (bfd_reloc_code_real_type code)
104 {
105   switch (code)
106     {
107     default:
108       return (enum elf_spu_reloc_type) -1;
109     case BFD_RELOC_NONE:
110       return R_SPU_NONE;
111     case BFD_RELOC_SPU_IMM10W:
112       return R_SPU_ADDR10;
113     case BFD_RELOC_SPU_IMM16W:
114       return R_SPU_ADDR16;
115     case BFD_RELOC_SPU_LO16:
116       return R_SPU_ADDR16_LO;
117     case BFD_RELOC_SPU_HI16:
118       return R_SPU_ADDR16_HI;
119     case BFD_RELOC_SPU_IMM18:
120       return R_SPU_ADDR18;
121     case BFD_RELOC_SPU_PCREL16:
122       return R_SPU_REL16;
123     case BFD_RELOC_SPU_IMM7:
124       return R_SPU_ADDR7;
125     case BFD_RELOC_SPU_IMM8:
126       return R_SPU_NONE;
127     case BFD_RELOC_SPU_PCREL9a:
128       return R_SPU_REL9;
129     case BFD_RELOC_SPU_PCREL9b:
130       return R_SPU_REL9I;
131     case BFD_RELOC_SPU_IMM10:
132       return R_SPU_ADDR10I;
133     case BFD_RELOC_SPU_IMM16:
134       return R_SPU_ADDR16I;
135     case BFD_RELOC_32:
136       return R_SPU_ADDR32;
137     case BFD_RELOC_32_PCREL:
138       return R_SPU_REL32;
139     case BFD_RELOC_SPU_PPU32:
140       return R_SPU_PPU32;
141     case BFD_RELOC_SPU_PPU64:
142       return R_SPU_PPU64;
143     case BFD_RELOC_SPU_ADD_PIC:
144       return R_SPU_ADD_PIC;
145     }
146 }
147 
148 static bfd_boolean
149 spu_elf_info_to_howto (bfd *abfd,
150 		       arelent *cache_ptr,
151 		       Elf_Internal_Rela *dst)
152 {
153   enum elf_spu_reloc_type r_type;
154 
155   r_type = (enum elf_spu_reloc_type) ELF32_R_TYPE (dst->r_info);
156   /* PR 17512: file: 90c2a92e.  */
157   if (r_type >= R_SPU_max)
158     {
159       /* xgettext:c-format */
160       _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
161 			  abfd, r_type);
162       bfd_set_error (bfd_error_bad_value);
163       return FALSE;
164     }
165   cache_ptr->howto = &elf_howto_table[(int) r_type];
166   return TRUE;
167 }
168 
169 static reloc_howto_type *
170 spu_elf_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
171 			   bfd_reloc_code_real_type code)
172 {
173   enum elf_spu_reloc_type r_type = spu_elf_bfd_to_reloc_type (code);
174 
175   if (r_type == (enum elf_spu_reloc_type) -1)
176     return NULL;
177 
178   return elf_howto_table + r_type;
179 }
180 
181 static reloc_howto_type *
182 spu_elf_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
183 			   const char *r_name)
184 {
185   unsigned int i;
186 
187   for (i = 0; i < sizeof (elf_howto_table) / sizeof (elf_howto_table[0]); i++)
188     if (elf_howto_table[i].name != NULL
189 	&& strcasecmp (elf_howto_table[i].name, r_name) == 0)
190       return &elf_howto_table[i];
191 
192   return NULL;
193 }
194 
195 /* Apply R_SPU_REL9 and R_SPU_REL9I relocs.  */
196 
197 static bfd_reloc_status_type
198 spu_elf_rel9 (bfd *abfd, arelent *reloc_entry, asymbol *symbol,
199 	      void *data, asection *input_section,
200 	      bfd *output_bfd, char **error_message)
201 {
202   bfd_size_type octets;
203   bfd_vma val;
204   long insn;
205 
206   /* If this is a relocatable link (output_bfd test tells us), just
207      call the generic function.  Any adjustment will be done at final
208      link time.  */
209   if (output_bfd != NULL)
210     return bfd_elf_generic_reloc (abfd, reloc_entry, symbol, data,
211 				  input_section, output_bfd, error_message);
212 
213   if (reloc_entry->address > bfd_get_section_limit (abfd, input_section))
214     return bfd_reloc_outofrange;
215   octets = reloc_entry->address * bfd_octets_per_byte (abfd);
216 
217   /* Get symbol value.  */
218   val = 0;
219   if (!bfd_is_com_section (symbol->section))
220     val = symbol->value;
221   if (symbol->section->output_section)
222     val += symbol->section->output_section->vma;
223 
224   val += reloc_entry->addend;
225 
226   /* Make it pc-relative.  */
227   val -= input_section->output_section->vma + input_section->output_offset;
228 
229   val >>= 2;
230   if (val + 256 >= 512)
231     return bfd_reloc_overflow;
232 
233   insn = bfd_get_32 (abfd, (bfd_byte *) data + octets);
234 
235   /* Move two high bits of value to REL9I and REL9 position.
236      The mask will take care of selecting the right field.  */
237   val = (val & 0x7f) | ((val & 0x180) << 7) | ((val & 0x180) << 16);
238   insn &= ~reloc_entry->howto->dst_mask;
239   insn |= val & reloc_entry->howto->dst_mask;
240   bfd_put_32 (abfd, insn, (bfd_byte *) data + octets);
241   return bfd_reloc_ok;
242 }
243 
244 static bfd_boolean
245 spu_elf_new_section_hook (bfd *abfd, asection *sec)
246 {
247   if (!sec->used_by_bfd)
248     {
249       struct _spu_elf_section_data *sdata;
250 
251       sdata = bfd_zalloc (abfd, sizeof (*sdata));
252       if (sdata == NULL)
253 	return FALSE;
254       sec->used_by_bfd = sdata;
255     }
256 
257   return _bfd_elf_new_section_hook (abfd, sec);
258 }
259 
260 /* Set up overlay info for executables.  */
261 
262 static bfd_boolean
263 spu_elf_object_p (bfd *abfd)
264 {
265   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
266     {
267       unsigned int i, num_ovl, num_buf;
268       Elf_Internal_Phdr *phdr = elf_tdata (abfd)->phdr;
269       Elf_Internal_Ehdr *ehdr = elf_elfheader (abfd);
270       Elf_Internal_Phdr *last_phdr = NULL;
271 
272       for (num_buf = 0, num_ovl = 0, i = 0; i < ehdr->e_phnum; i++, phdr++)
273 	if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_OVERLAY) != 0)
274 	  {
275 	    unsigned int j;
276 
277 	    ++num_ovl;
278 	    if (last_phdr == NULL
279 		|| ((last_phdr->p_vaddr ^ phdr->p_vaddr) & 0x3ffff) != 0)
280 	      ++num_buf;
281 	    last_phdr = phdr;
282 	    for (j = 1; j < elf_numsections (abfd); j++)
283 	      {
284 		Elf_Internal_Shdr *shdr = elf_elfsections (abfd)[j];
285 
286 		if (ELF_SECTION_SIZE (shdr, phdr) != 0
287 		    && ELF_SECTION_IN_SEGMENT (shdr, phdr))
288 		  {
289 		    asection *sec = shdr->bfd_section;
290 		    spu_elf_section_data (sec)->u.o.ovl_index = num_ovl;
291 		    spu_elf_section_data (sec)->u.o.ovl_buf = num_buf;
292 		  }
293 	      }
294 	  }
295     }
296   return TRUE;
297 }
298 
299 /* Specially mark defined symbols named _EAR_* with BSF_KEEP so that
300    strip --strip-unneeded will not remove them.  */
301 
302 static void
303 spu_elf_backend_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, asymbol *sym)
304 {
305   if (sym->name != NULL
306       && sym->section != bfd_abs_section_ptr
307       && strncmp (sym->name, "_EAR_", 5) == 0)
308     sym->flags |= BSF_KEEP;
309 }
310 
311 /* SPU ELF linker hash table.  */
312 
313 struct spu_link_hash_table
314 {
315   struct elf_link_hash_table elf;
316 
317   struct spu_elf_params *params;
318 
319   /* Shortcuts to overlay sections.  */
320   asection *ovtab;
321   asection *init;
322   asection *toe;
323   asection **ovl_sec;
324 
325   /* Count of stubs in each overlay section.  */
326   unsigned int *stub_count;
327 
328   /* The stub section for each overlay section.  */
329   asection **stub_sec;
330 
331   struct elf_link_hash_entry *ovly_entry[2];
332 
333   /* Number of overlay buffers.  */
334   unsigned int num_buf;
335 
336   /* Total number of overlays.  */
337   unsigned int num_overlays;
338 
339   /* For soft icache.  */
340   unsigned int line_size_log2;
341   unsigned int num_lines_log2;
342   unsigned int fromelem_size_log2;
343 
344   /* How much memory we have.  */
345   unsigned int local_store;
346 
347   /* Count of overlay stubs needed in non-overlay area.  */
348   unsigned int non_ovly_stub;
349 
350   /* Pointer to the fixup section */
351   asection *sfixup;
352 
353   /* Set on error.  */
354   unsigned int stub_err : 1;
355 };
356 
357 /* Hijack the generic got fields for overlay stub accounting.  */
358 
359 struct got_entry
360 {
361   struct got_entry *next;
362   unsigned int ovl;
363   union {
364     bfd_vma addend;
365     bfd_vma br_addr;
366   };
367   bfd_vma stub_addr;
368 };
369 
370 #define spu_hash_table(p) \
371   (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \
372   == SPU_ELF_DATA ? ((struct spu_link_hash_table *) ((p)->hash)) : NULL)
373 
374 struct call_info
375 {
376   struct function_info *fun;
377   struct call_info *next;
378   unsigned int count;
379   unsigned int max_depth;
380   unsigned int is_tail : 1;
381   unsigned int is_pasted : 1;
382   unsigned int broken_cycle : 1;
383   unsigned int priority : 13;
384 };
385 
386 struct function_info
387 {
388   /* List of functions called.  Also branches to hot/cold part of
389      function.  */
390   struct call_info *call_list;
391   /* For hot/cold part of function, point to owner.  */
392   struct function_info *start;
393   /* Symbol at start of function.  */
394   union {
395     Elf_Internal_Sym *sym;
396     struct elf_link_hash_entry *h;
397   } u;
398   /* Function section.  */
399   asection *sec;
400   asection *rodata;
401   /* Where last called from, and number of sections called from.  */
402   asection *last_caller;
403   unsigned int call_count;
404   /* Address range of (this part of) function.  */
405   bfd_vma lo, hi;
406   /* Offset where we found a store of lr, or -1 if none found.  */
407   bfd_vma lr_store;
408   /* Offset where we found the stack adjustment insn.  */
409   bfd_vma sp_adjust;
410   /* Stack usage.  */
411   int stack;
412   /* Distance from root of call tree.  Tail and hot/cold branches
413      count as one deeper.  We aren't counting stack frames here.  */
414   unsigned int depth;
415   /* Set if global symbol.  */
416   unsigned int global : 1;
417   /* Set if known to be start of function (as distinct from a hunk
418      in hot/cold section.  */
419   unsigned int is_func : 1;
420   /* Set if not a root node.  */
421   unsigned int non_root : 1;
422   /* Flags used during call tree traversal.  It's cheaper to replicate
423      the visit flags than have one which needs clearing after a traversal.  */
424   unsigned int visit1 : 1;
425   unsigned int visit2 : 1;
426   unsigned int marking : 1;
427   unsigned int visit3 : 1;
428   unsigned int visit4 : 1;
429   unsigned int visit5 : 1;
430   unsigned int visit6 : 1;
431   unsigned int visit7 : 1;
432 };
433 
434 struct spu_elf_stack_info
435 {
436   int num_fun;
437   int max_fun;
438   /* Variable size array describing functions, one per contiguous
439      address range belonging to a function.  */
440   struct function_info fun[1];
441 };
442 
443 static struct function_info *find_function (asection *, bfd_vma,
444 					    struct bfd_link_info *);
445 
446 /* Create a spu ELF linker hash table.  */
447 
448 static struct bfd_link_hash_table *
449 spu_elf_link_hash_table_create (bfd *abfd)
450 {
451   struct spu_link_hash_table *htab;
452 
453   htab = bfd_zmalloc (sizeof (*htab));
454   if (htab == NULL)
455     return NULL;
456 
457   if (!_bfd_elf_link_hash_table_init (&htab->elf, abfd,
458 				      _bfd_elf_link_hash_newfunc,
459 				      sizeof (struct elf_link_hash_entry),
460 				      SPU_ELF_DATA))
461     {
462       free (htab);
463       return NULL;
464     }
465 
466   htab->elf.init_got_refcount.refcount = 0;
467   htab->elf.init_got_refcount.glist = NULL;
468   htab->elf.init_got_offset.offset = 0;
469   htab->elf.init_got_offset.glist = NULL;
470   return &htab->elf.root;
471 }
472 
473 void
474 spu_elf_setup (struct bfd_link_info *info, struct spu_elf_params *params)
475 {
476   bfd_vma max_branch_log2;
477 
478   struct spu_link_hash_table *htab = spu_hash_table (info);
479   htab->params = params;
480   htab->line_size_log2 = bfd_log2 (htab->params->line_size);
481   htab->num_lines_log2 = bfd_log2 (htab->params->num_lines);
482 
483   /* For the software i-cache, we provide a "from" list whose size
484      is a power-of-two number of quadwords, big enough to hold one
485      byte per outgoing branch.  Compute this number here.  */
486   max_branch_log2 = bfd_log2 (htab->params->max_branch);
487   htab->fromelem_size_log2 = max_branch_log2 > 4 ? max_branch_log2 - 4 : 0;
488 }
489 
490 /* Find the symbol for the given R_SYMNDX in IBFD and set *HP and *SYMP
491    to (hash, NULL) for global symbols, and (NULL, sym) for locals.  Set
492    *SYMSECP to the symbol's section.  *LOCSYMSP caches local syms.  */
493 
494 static bfd_boolean
495 get_sym_h (struct elf_link_hash_entry **hp,
496 	   Elf_Internal_Sym **symp,
497 	   asection **symsecp,
498 	   Elf_Internal_Sym **locsymsp,
499 	   unsigned long r_symndx,
500 	   bfd *ibfd)
501 {
502   Elf_Internal_Shdr *symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
503 
504   if (r_symndx >= symtab_hdr->sh_info)
505     {
506       struct elf_link_hash_entry **sym_hashes = elf_sym_hashes (ibfd);
507       struct elf_link_hash_entry *h;
508 
509       h = sym_hashes[r_symndx - symtab_hdr->sh_info];
510       while (h->root.type == bfd_link_hash_indirect
511 	     || h->root.type == bfd_link_hash_warning)
512 	h = (struct elf_link_hash_entry *) h->root.u.i.link;
513 
514       if (hp != NULL)
515 	*hp = h;
516 
517       if (symp != NULL)
518 	*symp = NULL;
519 
520       if (symsecp != NULL)
521 	{
522 	  asection *symsec = NULL;
523 	  if (h->root.type == bfd_link_hash_defined
524 	      || h->root.type == bfd_link_hash_defweak)
525 	    symsec = h->root.u.def.section;
526 	  *symsecp = symsec;
527 	}
528     }
529   else
530     {
531       Elf_Internal_Sym *sym;
532       Elf_Internal_Sym *locsyms = *locsymsp;
533 
534       if (locsyms == NULL)
535 	{
536 	  locsyms = (Elf_Internal_Sym *) symtab_hdr->contents;
537 	  if (locsyms == NULL)
538 	    locsyms = bfd_elf_get_elf_syms (ibfd, symtab_hdr,
539 					    symtab_hdr->sh_info,
540 					    0, NULL, NULL, NULL);
541 	  if (locsyms == NULL)
542 	    return FALSE;
543 	  *locsymsp = locsyms;
544 	}
545       sym = locsyms + r_symndx;
546 
547       if (hp != NULL)
548 	*hp = NULL;
549 
550       if (symp != NULL)
551 	*symp = sym;
552 
553       if (symsecp != NULL)
554 	*symsecp = bfd_section_from_elf_index (ibfd, sym->st_shndx);
555     }
556 
557   return TRUE;
558 }
559 
560 /* Create the note section if not already present.  This is done early so
561    that the linker maps the sections to the right place in the output.  */
562 
563 bfd_boolean
564 spu_elf_create_sections (struct bfd_link_info *info)
565 {
566   struct spu_link_hash_table *htab = spu_hash_table (info);
567   bfd *ibfd;
568 
569   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
570     if (bfd_get_section_by_name (ibfd, SPU_PTNOTE_SPUNAME) != NULL)
571       break;
572 
573   if (ibfd == NULL)
574     {
575       /* Make SPU_PTNOTE_SPUNAME section.  */
576       asection *s;
577       size_t name_len;
578       size_t size;
579       bfd_byte *data;
580       flagword flags;
581 
582       ibfd = info->input_bfds;
583       /* This should really be SEC_LINKER_CREATED, but then we'd need
584 	 to write out the section ourselves.  */
585       flags = SEC_LOAD | SEC_READONLY | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
586       s = bfd_make_section_anyway_with_flags (ibfd, SPU_PTNOTE_SPUNAME, flags);
587       if (s == NULL
588 	  || !bfd_set_section_alignment (ibfd, s, 4))
589 	return FALSE;
590       /* Because we didn't set SEC_LINKER_CREATED we need to set the
591 	 proper section type.  */
592       elf_section_type (s) = SHT_NOTE;
593 
594       name_len = strlen (bfd_get_filename (info->output_bfd)) + 1;
595       size = 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4);
596       size += (name_len + 3) & -4;
597 
598       if (!bfd_set_section_size (ibfd, s, size))
599 	return FALSE;
600 
601       data = bfd_zalloc (ibfd, size);
602       if (data == NULL)
603 	return FALSE;
604 
605       bfd_put_32 (ibfd, sizeof (SPU_PLUGIN_NAME), data + 0);
606       bfd_put_32 (ibfd, name_len, data + 4);
607       bfd_put_32 (ibfd, 1, data + 8);
608       memcpy (data + 12, SPU_PLUGIN_NAME, sizeof (SPU_PLUGIN_NAME));
609       memcpy (data + 12 + ((sizeof (SPU_PLUGIN_NAME) + 3) & -4),
610 	      bfd_get_filename (info->output_bfd), name_len);
611       s->contents = data;
612     }
613 
614   if (htab->params->emit_fixups)
615     {
616       asection *s;
617       flagword flags;
618 
619       if (htab->elf.dynobj == NULL)
620 	htab->elf.dynobj = ibfd;
621       ibfd = htab->elf.dynobj;
622       flags = (SEC_LOAD | SEC_ALLOC | SEC_READONLY | SEC_HAS_CONTENTS
623 	       | SEC_IN_MEMORY | SEC_LINKER_CREATED);
624       s = bfd_make_section_anyway_with_flags (ibfd, ".fixup", flags);
625       if (s == NULL || !bfd_set_section_alignment (ibfd, s, 2))
626 	return FALSE;
627       htab->sfixup = s;
628     }
629 
630   return TRUE;
631 }
632 
633 /* qsort predicate to sort sections by vma.  */
634 
635 static int
636 sort_sections (const void *a, const void *b)
637 {
638   const asection *const *s1 = a;
639   const asection *const *s2 = b;
640   bfd_signed_vma delta = (*s1)->vma - (*s2)->vma;
641 
642   if (delta != 0)
643     return delta < 0 ? -1 : 1;
644 
645   return (*s1)->index - (*s2)->index;
646 }
647 
648 /* Identify overlays in the output bfd, and number them.
649    Returns 0 on error, 1 if no overlays, 2 if overlays.  */
650 
651 int
652 spu_elf_find_overlays (struct bfd_link_info *info)
653 {
654   struct spu_link_hash_table *htab = spu_hash_table (info);
655   asection **alloc_sec;
656   unsigned int i, n, ovl_index, num_buf;
657   asection *s;
658   bfd_vma ovl_end;
659   static const char *const entry_names[2][2] = {
660     { "__ovly_load", "__icache_br_handler" },
661     { "__ovly_return", "__icache_call_handler" }
662   };
663 
664   if (info->output_bfd->section_count < 2)
665     return 1;
666 
667   alloc_sec
668     = bfd_malloc (info->output_bfd->section_count * sizeof (*alloc_sec));
669   if (alloc_sec == NULL)
670     return 0;
671 
672   /* Pick out all the alloced sections.  */
673   for (n = 0, s = info->output_bfd->sections; s != NULL; s = s->next)
674     if ((s->flags & SEC_ALLOC) != 0
675 	&& (s->flags & (SEC_LOAD | SEC_THREAD_LOCAL)) != SEC_THREAD_LOCAL
676 	&& s->size != 0)
677       alloc_sec[n++] = s;
678 
679   if (n == 0)
680     {
681       free (alloc_sec);
682       return 1;
683     }
684 
685   /* Sort them by vma.  */
686   qsort (alloc_sec, n, sizeof (*alloc_sec), sort_sections);
687 
688   ovl_end = alloc_sec[0]->vma + alloc_sec[0]->size;
689   if (htab->params->ovly_flavour == ovly_soft_icache)
690     {
691       unsigned int prev_buf = 0, set_id = 0;
692 
693       /* Look for an overlapping vma to find the first overlay section.  */
694       bfd_vma vma_start = 0;
695 
696       for (i = 1; i < n; i++)
697 	{
698 	  s = alloc_sec[i];
699 	  if (s->vma < ovl_end)
700 	    {
701 	      asection *s0 = alloc_sec[i - 1];
702 	      vma_start = s0->vma;
703 	      ovl_end = (s0->vma
704 			 + ((bfd_vma) 1
705 			    << (htab->num_lines_log2 + htab->line_size_log2)));
706 	      --i;
707 	      break;
708 	    }
709 	  else
710 	    ovl_end = s->vma + s->size;
711 	}
712 
713       /* Now find any sections within the cache area.  */
714       for (ovl_index = 0, num_buf = 0; i < n; i++)
715 	{
716 	  s = alloc_sec[i];
717 	  if (s->vma >= ovl_end)
718 	    break;
719 
720 	  /* A section in an overlay area called .ovl.init is not
721 	     an overlay, in the sense that it might be loaded in
722 	     by the overlay manager, but rather the initial
723 	     section contents for the overlay buffer.  */
724 	  if (strncmp (s->name, ".ovl.init", 9) != 0)
725 	    {
726 	      num_buf = ((s->vma - vma_start) >> htab->line_size_log2) + 1;
727 	      set_id = (num_buf == prev_buf)? set_id + 1 : 0;
728 	      prev_buf = num_buf;
729 
730 	      if ((s->vma - vma_start) & (htab->params->line_size - 1))
731 		{
732 		  info->callbacks->einfo (_("%X%P: overlay section %pA "
733 					    "does not start on a cache line\n"),
734 					  s);
735 		  bfd_set_error (bfd_error_bad_value);
736 		  return 0;
737 		}
738 	      else if (s->size > htab->params->line_size)
739 		{
740 		  info->callbacks->einfo (_("%X%P: overlay section %pA "
741 					    "is larger than a cache line\n"),
742 					  s);
743 		  bfd_set_error (bfd_error_bad_value);
744 		  return 0;
745 		}
746 
747 	      alloc_sec[ovl_index++] = s;
748 	      spu_elf_section_data (s)->u.o.ovl_index
749 		= (set_id << htab->num_lines_log2) + num_buf;
750 	      spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
751 	    }
752 	}
753 
754       /* Ensure there are no more overlay sections.  */
755       for ( ; i < n; i++)
756 	{
757 	  s = alloc_sec[i];
758 	  if (s->vma < ovl_end)
759 	    {
760 	      info->callbacks->einfo (_("%X%P: overlay section %pA "
761 					"is not in cache area\n"),
762 				      alloc_sec[i-1]);
763 	      bfd_set_error (bfd_error_bad_value);
764 	      return 0;
765 	    }
766 	  else
767 	    ovl_end = s->vma + s->size;
768 	}
769     }
770   else
771     {
772       /* Look for overlapping vmas.  Any with overlap must be overlays.
773 	 Count them.  Also count the number of overlay regions.  */
774       for (ovl_index = 0, num_buf = 0, i = 1; i < n; i++)
775 	{
776 	  s = alloc_sec[i];
777 	  if (s->vma < ovl_end)
778 	    {
779 	      asection *s0 = alloc_sec[i - 1];
780 
781 	      if (spu_elf_section_data (s0)->u.o.ovl_index == 0)
782 		{
783 		  ++num_buf;
784 		  if (strncmp (s0->name, ".ovl.init", 9) != 0)
785 		    {
786 		      alloc_sec[ovl_index] = s0;
787 		      spu_elf_section_data (s0)->u.o.ovl_index = ++ovl_index;
788 		      spu_elf_section_data (s0)->u.o.ovl_buf = num_buf;
789 		    }
790 		  else
791 		    ovl_end = s->vma + s->size;
792 		}
793 	      if (strncmp (s->name, ".ovl.init", 9) != 0)
794 		{
795 		  alloc_sec[ovl_index] = s;
796 		  spu_elf_section_data (s)->u.o.ovl_index = ++ovl_index;
797 		  spu_elf_section_data (s)->u.o.ovl_buf = num_buf;
798 		  if (s0->vma != s->vma)
799 		    {
800 		      /* xgettext:c-format */
801 		      info->callbacks->einfo (_("%X%P: overlay sections %pA "
802 						"and %pA do not start at the "
803 						"same address\n"),
804 					      s0, s);
805 		      bfd_set_error (bfd_error_bad_value);
806 		      return 0;
807 		    }
808 		  if (ovl_end < s->vma + s->size)
809 		    ovl_end = s->vma + s->size;
810 		}
811 	    }
812 	  else
813 	    ovl_end = s->vma + s->size;
814 	}
815     }
816 
817   htab->num_overlays = ovl_index;
818   htab->num_buf = num_buf;
819   htab->ovl_sec = alloc_sec;
820 
821   if (ovl_index == 0)
822     return 1;
823 
824   for (i = 0; i < 2; i++)
825     {
826       const char *name;
827       struct elf_link_hash_entry *h;
828 
829       name = entry_names[i][htab->params->ovly_flavour];
830       h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
831       if (h == NULL)
832 	return 0;
833 
834       if (h->root.type == bfd_link_hash_new)
835 	{
836 	  h->root.type = bfd_link_hash_undefined;
837 	  h->ref_regular = 1;
838 	  h->ref_regular_nonweak = 1;
839 	  h->non_elf = 0;
840 	}
841       htab->ovly_entry[i] = h;
842     }
843 
844   return 2;
845 }
846 
847 /* Non-zero to use bra in overlay stubs rather than br.  */
848 #define BRA_STUBS 0
849 
850 #define BRA	0x30000000
851 #define BRASL	0x31000000
852 #define BR	0x32000000
853 #define BRSL	0x33000000
854 #define NOP	0x40200000
855 #define LNOP	0x00200000
856 #define ILA	0x42000000
857 
858 /* Return true for all relative and absolute branch instructions.
859    bra   00110000 0..
860    brasl 00110001 0..
861    br    00110010 0..
862    brsl  00110011 0..
863    brz   00100000 0..
864    brnz  00100001 0..
865    brhz  00100010 0..
866    brhnz 00100011 0..  */
867 
868 static bfd_boolean
869 is_branch (const unsigned char *insn)
870 {
871   return (insn[0] & 0xec) == 0x20 && (insn[1] & 0x80) == 0;
872 }
873 
874 /* Return true for all indirect branch instructions.
875    bi     00110101 000
876    bisl   00110101 001
877    iret   00110101 010
878    bisled 00110101 011
879    biz    00100101 000
880    binz   00100101 001
881    bihz   00100101 010
882    bihnz  00100101 011  */
883 
884 static bfd_boolean
885 is_indirect_branch (const unsigned char *insn)
886 {
887   return (insn[0] & 0xef) == 0x25 && (insn[1] & 0x80) == 0;
888 }
889 
890 /* Return true for branch hint instructions.
891    hbra  0001000..
892    hbrr  0001001..  */
893 
894 static bfd_boolean
895 is_hint (const unsigned char *insn)
896 {
897   return (insn[0] & 0xfc) == 0x10;
898 }
899 
900 /* True if INPUT_SECTION might need overlay stubs.  */
901 
902 static bfd_boolean
903 maybe_needs_stubs (asection *input_section)
904 {
905   /* No stubs for debug sections and suchlike.  */
906   if ((input_section->flags & SEC_ALLOC) == 0)
907     return FALSE;
908 
909   /* No stubs for link-once sections that will be discarded.  */
910   if (input_section->output_section == bfd_abs_section_ptr)
911     return FALSE;
912 
913   /* Don't create stubs for .eh_frame references.  */
914   if (strcmp (input_section->name, ".eh_frame") == 0)
915     return FALSE;
916 
917   return TRUE;
918 }
919 
920 enum _stub_type
921 {
922   no_stub,
923   call_ovl_stub,
924   br000_ovl_stub,
925   br001_ovl_stub,
926   br010_ovl_stub,
927   br011_ovl_stub,
928   br100_ovl_stub,
929   br101_ovl_stub,
930   br110_ovl_stub,
931   br111_ovl_stub,
932   nonovl_stub,
933   stub_error
934 };
935 
936 /* Return non-zero if this reloc symbol should go via an overlay stub.
937    Return 2 if the stub must be in non-overlay area.  */
938 
939 static enum _stub_type
940 needs_ovl_stub (struct elf_link_hash_entry *h,
941 		Elf_Internal_Sym *sym,
942 		asection *sym_sec,
943 		asection *input_section,
944 		Elf_Internal_Rela *irela,
945 		bfd_byte *contents,
946 		struct bfd_link_info *info)
947 {
948   struct spu_link_hash_table *htab = spu_hash_table (info);
949   enum elf_spu_reloc_type r_type;
950   unsigned int sym_type;
951   bfd_boolean branch, hint, call;
952   enum _stub_type ret = no_stub;
953   bfd_byte insn[4];
954 
955   if (sym_sec == NULL
956       || sym_sec->output_section == bfd_abs_section_ptr
957       || spu_elf_section_data (sym_sec->output_section) == NULL)
958     return ret;
959 
960   if (h != NULL)
961     {
962       /* Ensure no stubs for user supplied overlay manager syms.  */
963       if (h == htab->ovly_entry[0] || h == htab->ovly_entry[1])
964 	return ret;
965 
966       /* setjmp always goes via an overlay stub, because then the return
967 	 and hence the longjmp goes via __ovly_return.  That magically
968 	 makes setjmp/longjmp between overlays work.  */
969       if (strncmp (h->root.root.string, "setjmp", 6) == 0
970 	  && (h->root.root.string[6] == '\0' || h->root.root.string[6] == '@'))
971 	ret = call_ovl_stub;
972     }
973 
974   if (h != NULL)
975     sym_type = h->type;
976   else
977     sym_type = ELF_ST_TYPE (sym->st_info);
978 
979   r_type = ELF32_R_TYPE (irela->r_info);
980   branch = FALSE;
981   hint = FALSE;
982   call = FALSE;
983   if (r_type == R_SPU_REL16 || r_type == R_SPU_ADDR16)
984     {
985       if (contents == NULL)
986 	{
987 	  contents = insn;
988 	  if (!bfd_get_section_contents (input_section->owner,
989 					 input_section,
990 					 contents,
991 					 irela->r_offset, 4))
992 	    return stub_error;
993 	}
994       else
995 	contents += irela->r_offset;
996 
997       branch = is_branch (contents);
998       hint = is_hint (contents);
999       if (branch || hint)
1000 	{
1001 	  call = (contents[0] & 0xfd) == 0x31;
1002 	  if (call
1003 	      && sym_type != STT_FUNC
1004 	      && contents != insn)
1005 	    {
1006 	      /* It's common for people to write assembly and forget
1007 		 to give function symbols the right type.  Handle
1008 		 calls to such symbols, but warn so that (hopefully)
1009 		 people will fix their code.  We need the symbol
1010 		 type to be correct to distinguish function pointer
1011 		 initialisation from other pointer initialisations.  */
1012 	      const char *sym_name;
1013 
1014 	      if (h != NULL)
1015 		sym_name = h->root.root.string;
1016 	      else
1017 		{
1018 		  Elf_Internal_Shdr *symtab_hdr;
1019 		  symtab_hdr = &elf_tdata (input_section->owner)->symtab_hdr;
1020 		  sym_name = bfd_elf_sym_name (input_section->owner,
1021 					       symtab_hdr,
1022 					       sym,
1023 					       sym_sec);
1024 		}
1025 	      _bfd_error_handler
1026 		/* xgettext:c-format */
1027 		(_("warning: call to non-function symbol %s defined in %pB"),
1028 		 sym_name, sym_sec->owner);
1029 
1030 	    }
1031 	}
1032     }
1033 
1034   if ((!branch && htab->params->ovly_flavour == ovly_soft_icache)
1035       || (sym_type != STT_FUNC
1036 	  && !(branch || hint)
1037 	  && (sym_sec->flags & SEC_CODE) == 0))
1038     return no_stub;
1039 
1040   /* Usually, symbols in non-overlay sections don't need stubs.  */
1041   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index == 0
1042       && !htab->params->non_overlay_stubs)
1043     return ret;
1044 
1045   /* A reference from some other section to a symbol in an overlay
1046      section needs a stub.  */
1047   if (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index
1048        != spu_elf_section_data (input_section->output_section)->u.o.ovl_index)
1049     {
1050       unsigned int lrlive = 0;
1051       if (branch)
1052 	lrlive = (contents[1] & 0x70) >> 4;
1053 
1054       if (!lrlive && (call || sym_type == STT_FUNC))
1055 	ret = call_ovl_stub;
1056       else
1057 	ret = br000_ovl_stub + lrlive;
1058     }
1059 
1060   /* If this insn isn't a branch then we are possibly taking the
1061      address of a function and passing it out somehow.  Soft-icache code
1062      always generates inline code to do indirect branches.  */
1063   if (!(branch || hint)
1064       && sym_type == STT_FUNC
1065       && htab->params->ovly_flavour != ovly_soft_icache)
1066     ret = nonovl_stub;
1067 
1068   return ret;
1069 }
1070 
1071 static bfd_boolean
1072 count_stub (struct spu_link_hash_table *htab,
1073 	    bfd *ibfd,
1074 	    asection *isec,
1075 	    enum _stub_type stub_type,
1076 	    struct elf_link_hash_entry *h,
1077 	    const Elf_Internal_Rela *irela)
1078 {
1079   unsigned int ovl = 0;
1080   struct got_entry *g, **head;
1081   bfd_vma addend;
1082 
1083   /* If this instruction is a branch or call, we need a stub
1084      for it.  One stub per function per overlay.
1085      If it isn't a branch, then we are taking the address of
1086      this function so need a stub in the non-overlay area
1087      for it.  One stub per function.  */
1088   if (stub_type != nonovl_stub)
1089     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1090 
1091   if (h != NULL)
1092     head = &h->got.glist;
1093   else
1094     {
1095       if (elf_local_got_ents (ibfd) == NULL)
1096 	{
1097 	  bfd_size_type amt = (elf_tdata (ibfd)->symtab_hdr.sh_info
1098 			       * sizeof (*elf_local_got_ents (ibfd)));
1099 	  elf_local_got_ents (ibfd) = bfd_zmalloc (amt);
1100 	  if (elf_local_got_ents (ibfd) == NULL)
1101 	    return FALSE;
1102 	}
1103       head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1104     }
1105 
1106   if (htab->params->ovly_flavour == ovly_soft_icache)
1107     {
1108       htab->stub_count[ovl] += 1;
1109       return TRUE;
1110     }
1111 
1112   addend = 0;
1113   if (irela != NULL)
1114     addend = irela->r_addend;
1115 
1116   if (ovl == 0)
1117     {
1118       struct got_entry *gnext;
1119 
1120       for (g = *head; g != NULL; g = g->next)
1121 	if (g->addend == addend && g->ovl == 0)
1122 	  break;
1123 
1124       if (g == NULL)
1125 	{
1126 	  /* Need a new non-overlay area stub.  Zap other stubs.  */
1127 	  for (g = *head; g != NULL; g = gnext)
1128 	    {
1129 	      gnext = g->next;
1130 	      if (g->addend == addend)
1131 		{
1132 		  htab->stub_count[g->ovl] -= 1;
1133 		  free (g);
1134 		}
1135 	    }
1136 	}
1137     }
1138   else
1139     {
1140       for (g = *head; g != NULL; g = g->next)
1141 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1142 	  break;
1143     }
1144 
1145   if (g == NULL)
1146     {
1147       g = bfd_malloc (sizeof *g);
1148       if (g == NULL)
1149 	return FALSE;
1150       g->ovl = ovl;
1151       g->addend = addend;
1152       g->stub_addr = (bfd_vma) -1;
1153       g->next = *head;
1154       *head = g;
1155 
1156       htab->stub_count[ovl] += 1;
1157     }
1158 
1159   return TRUE;
1160 }
1161 
1162 /* Support two sizes of overlay stubs, a slower more compact stub of two
1163    instructions, and a faster stub of four instructions.
1164    Soft-icache stubs are four or eight words.  */
1165 
1166 static unsigned int
1167 ovl_stub_size (struct spu_elf_params *params)
1168 {
1169   return 16 << params->ovly_flavour >> params->compact_stub;
1170 }
1171 
1172 static unsigned int
1173 ovl_stub_size_log2 (struct spu_elf_params *params)
1174 {
1175   return 4 + params->ovly_flavour - params->compact_stub;
1176 }
1177 
1178 /* Two instruction overlay stubs look like:
1179 
1180    brsl $75,__ovly_load
1181    .word target_ovl_and_address
1182 
1183    ovl_and_address is a word with the overlay number in the top 14 bits
1184    and local store address in the bottom 18 bits.
1185 
1186    Four instruction overlay stubs look like:
1187 
1188    ila $78,ovl_number
1189    lnop
1190    ila $79,target_address
1191    br __ovly_load
1192 
1193    Software icache stubs are:
1194 
1195    .word target_index
1196    .word target_ia;
1197    .word lrlive_branchlocalstoreaddr;
1198    brasl $75,__icache_br_handler
1199    .quad xor_pattern
1200 */
1201 
1202 static bfd_boolean
1203 build_stub (struct bfd_link_info *info,
1204 	    bfd *ibfd,
1205 	    asection *isec,
1206 	    enum _stub_type stub_type,
1207 	    struct elf_link_hash_entry *h,
1208 	    const Elf_Internal_Rela *irela,
1209 	    bfd_vma dest,
1210 	    asection *dest_sec)
1211 {
1212   struct spu_link_hash_table *htab = spu_hash_table (info);
1213   unsigned int ovl, dest_ovl, set_id;
1214   struct got_entry *g, **head;
1215   asection *sec;
1216   bfd_vma addend, from, to, br_dest, patt;
1217   unsigned int lrlive;
1218 
1219   ovl = 0;
1220   if (stub_type != nonovl_stub)
1221     ovl = spu_elf_section_data (isec->output_section)->u.o.ovl_index;
1222 
1223   if (h != NULL)
1224     head = &h->got.glist;
1225   else
1226     head = elf_local_got_ents (ibfd) + ELF32_R_SYM (irela->r_info);
1227 
1228   addend = 0;
1229   if (irela != NULL)
1230     addend = irela->r_addend;
1231 
1232   if (htab->params->ovly_flavour == ovly_soft_icache)
1233     {
1234       g = bfd_malloc (sizeof *g);
1235       if (g == NULL)
1236 	return FALSE;
1237       g->ovl = ovl;
1238       g->br_addr = 0;
1239       if (irela != NULL)
1240 	g->br_addr = (irela->r_offset
1241 		      + isec->output_offset
1242 		      + isec->output_section->vma);
1243       g->next = *head;
1244       *head = g;
1245     }
1246   else
1247     {
1248       for (g = *head; g != NULL; g = g->next)
1249 	if (g->addend == addend && (g->ovl == ovl || g->ovl == 0))
1250 	  break;
1251       if (g == NULL)
1252 	abort ();
1253 
1254       if (g->ovl == 0 && ovl != 0)
1255 	return TRUE;
1256 
1257       if (g->stub_addr != (bfd_vma) -1)
1258 	return TRUE;
1259     }
1260 
1261   sec = htab->stub_sec[ovl];
1262   dest += dest_sec->output_offset + dest_sec->output_section->vma;
1263   from = sec->size + sec->output_offset + sec->output_section->vma;
1264   g->stub_addr = from;
1265   to = (htab->ovly_entry[0]->root.u.def.value
1266 	+ htab->ovly_entry[0]->root.u.def.section->output_offset
1267 	+ htab->ovly_entry[0]->root.u.def.section->output_section->vma);
1268 
1269   if (((dest | to | from) & 3) != 0)
1270     {
1271       htab->stub_err = 1;
1272       return FALSE;
1273     }
1274   dest_ovl = spu_elf_section_data (dest_sec->output_section)->u.o.ovl_index;
1275 
1276   if (htab->params->ovly_flavour == ovly_normal
1277       && !htab->params->compact_stub)
1278     {
1279       bfd_put_32 (sec->owner, ILA + ((dest_ovl << 7) & 0x01ffff80) + 78,
1280 		  sec->contents + sec->size);
1281       bfd_put_32 (sec->owner, LNOP,
1282 		  sec->contents + sec->size + 4);
1283       bfd_put_32 (sec->owner, ILA + ((dest << 7) & 0x01ffff80) + 79,
1284 		  sec->contents + sec->size + 8);
1285       if (!BRA_STUBS)
1286 	bfd_put_32 (sec->owner, BR + (((to - (from + 12)) << 5) & 0x007fff80),
1287 		    sec->contents + sec->size + 12);
1288       else
1289 	bfd_put_32 (sec->owner, BRA + ((to << 5) & 0x007fff80),
1290 		    sec->contents + sec->size + 12);
1291     }
1292   else if (htab->params->ovly_flavour == ovly_normal
1293 	   && htab->params->compact_stub)
1294     {
1295       if (!BRA_STUBS)
1296 	bfd_put_32 (sec->owner, BRSL + (((to - from) << 5) & 0x007fff80) + 75,
1297 		    sec->contents + sec->size);
1298       else
1299 	bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1300 		    sec->contents + sec->size);
1301       bfd_put_32 (sec->owner, (dest & 0x3ffff) | (dest_ovl << 18),
1302 		  sec->contents + sec->size + 4);
1303     }
1304   else if (htab->params->ovly_flavour == ovly_soft_icache
1305 	   && htab->params->compact_stub)
1306     {
1307       lrlive = 0;
1308       if (stub_type == nonovl_stub)
1309 	;
1310       else if (stub_type == call_ovl_stub)
1311 	/* A brsl makes lr live and *(*sp+16) is live.
1312 	   Tail calls have the same liveness.  */
1313 	lrlive = 5;
1314       else if (!htab->params->lrlive_analysis)
1315 	/* Assume stack frame and lr save.  */
1316 	lrlive = 1;
1317       else if (irela != NULL)
1318 	{
1319 	  /* Analyse branch instructions.  */
1320 	  struct function_info *caller;
1321 	  bfd_vma off;
1322 
1323 	  caller = find_function (isec, irela->r_offset, info);
1324 	  if (caller->start == NULL)
1325 	    off = irela->r_offset;
1326 	  else
1327 	    {
1328 	      struct function_info *found = NULL;
1329 
1330 	      /* Find the earliest piece of this function that
1331 		 has frame adjusting instructions.  We might
1332 		 see dynamic frame adjustment (eg. for alloca)
1333 		 in some later piece, but functions using
1334 		 alloca always set up a frame earlier.  Frame
1335 		 setup instructions are always in one piece.  */
1336 	      if (caller->lr_store != (bfd_vma) -1
1337 		  || caller->sp_adjust != (bfd_vma) -1)
1338 		found = caller;
1339 	      while (caller->start != NULL)
1340 		{
1341 		  caller = caller->start;
1342 		  if (caller->lr_store != (bfd_vma) -1
1343 		      || caller->sp_adjust != (bfd_vma) -1)
1344 		    found = caller;
1345 		}
1346 	      if (found != NULL)
1347 		caller = found;
1348 	      off = (bfd_vma) -1;
1349 	    }
1350 
1351 	  if (off > caller->sp_adjust)
1352 	    {
1353 	      if (off > caller->lr_store)
1354 		/* Only *(*sp+16) is live.  */
1355 		lrlive = 1;
1356 	      else
1357 		/* If no lr save, then we must be in a
1358 		   leaf function with a frame.
1359 		   lr is still live.  */
1360 		lrlive = 4;
1361 	    }
1362 	  else if (off > caller->lr_store)
1363 	    {
1364 	      /* Between lr save and stack adjust.  */
1365 	      lrlive = 3;
1366 	      /* This should never happen since prologues won't
1367 		 be split here.  */
1368 	      BFD_ASSERT (0);
1369 	    }
1370 	  else
1371 	    /* On entry to function.  */
1372 	    lrlive = 5;
1373 
1374 	  if (stub_type != br000_ovl_stub
1375 	      && lrlive != stub_type - br000_ovl_stub)
1376 	    /* xgettext:c-format */
1377 	    info->callbacks->einfo (_("%pA:0x%v lrlive .brinfo (%u) differs "
1378 				      "from analysis (%u)\n"),
1379 				    isec, irela->r_offset, lrlive,
1380 				    stub_type - br000_ovl_stub);
1381 	}
1382 
1383       /* If given lrlive info via .brinfo, use it.  */
1384       if (stub_type > br000_ovl_stub)
1385 	lrlive = stub_type - br000_ovl_stub;
1386 
1387       if (ovl == 0)
1388 	to = (htab->ovly_entry[1]->root.u.def.value
1389 	      + htab->ovly_entry[1]->root.u.def.section->output_offset
1390 	      + htab->ovly_entry[1]->root.u.def.section->output_section->vma);
1391 
1392       /* The branch that uses this stub goes to stub_addr + 4.  We'll
1393 	 set up an xor pattern that can be used by the icache manager
1394 	 to modify this branch to go directly to its destination.  */
1395       g->stub_addr += 4;
1396       br_dest = g->stub_addr;
1397       if (irela == NULL)
1398 	{
1399 	  /* Except in the case of _SPUEAR_ stubs, the branch in
1400 	     question is the one in the stub itself.  */
1401 	  BFD_ASSERT (stub_type == nonovl_stub);
1402 	  g->br_addr = g->stub_addr;
1403 	  br_dest = to;
1404 	}
1405 
1406       set_id = ((dest_ovl - 1) >> htab->num_lines_log2) + 1;
1407       bfd_put_32 (sec->owner, (set_id << 18) | (dest & 0x3ffff),
1408 		  sec->contents + sec->size);
1409       bfd_put_32 (sec->owner, BRASL + ((to << 5) & 0x007fff80) + 75,
1410 		  sec->contents + sec->size + 4);
1411       bfd_put_32 (sec->owner, (lrlive << 29) | (g->br_addr & 0x3ffff),
1412 		  sec->contents + sec->size + 8);
1413       patt = dest ^ br_dest;
1414       if (irela != NULL && ELF32_R_TYPE (irela->r_info) == R_SPU_REL16)
1415 	patt = (dest - g->br_addr) ^ (br_dest - g->br_addr);
1416       bfd_put_32 (sec->owner, (patt << 5) & 0x007fff80,
1417 		  sec->contents + sec->size + 12);
1418 
1419       if (ovl == 0)
1420 	/* Extra space for linked list entries.  */
1421 	sec->size += 16;
1422     }
1423   else
1424     abort ();
1425 
1426   sec->size += ovl_stub_size (htab->params);
1427 
1428   if (htab->params->emit_stub_syms)
1429     {
1430       size_t len;
1431       char *name;
1432       int add;
1433 
1434       len = 8 + sizeof (".ovl_call.") - 1;
1435       if (h != NULL)
1436 	len += strlen (h->root.root.string);
1437       else
1438 	len += 8 + 1 + 8;
1439       add = 0;
1440       if (irela != NULL)
1441 	add = (int) irela->r_addend & 0xffffffff;
1442       if (add != 0)
1443 	len += 1 + 8;
1444       name = bfd_malloc (len + 1);
1445       if (name == NULL)
1446 	return FALSE;
1447 
1448       sprintf (name, "%08x.ovl_call.", g->ovl);
1449       if (h != NULL)
1450 	strcpy (name + 8 + sizeof (".ovl_call.") - 1, h->root.root.string);
1451       else
1452 	sprintf (name + 8 + sizeof (".ovl_call.") - 1, "%x:%x",
1453 		 dest_sec->id & 0xffffffff,
1454 		 (int) ELF32_R_SYM (irela->r_info) & 0xffffffff);
1455       if (add != 0)
1456 	sprintf (name + len - 9, "+%x", add);
1457 
1458       h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
1459       free (name);
1460       if (h == NULL)
1461 	return FALSE;
1462       if (h->root.type == bfd_link_hash_new)
1463 	{
1464 	  h->root.type = bfd_link_hash_defined;
1465 	  h->root.u.def.section = sec;
1466 	  h->size = ovl_stub_size (htab->params);
1467 	  h->root.u.def.value = sec->size - h->size;
1468 	  h->type = STT_FUNC;
1469 	  h->ref_regular = 1;
1470 	  h->def_regular = 1;
1471 	  h->ref_regular_nonweak = 1;
1472 	  h->forced_local = 1;
1473 	  h->non_elf = 0;
1474 	}
1475     }
1476 
1477   return TRUE;
1478 }
1479 
1480 /* Called via elf_link_hash_traverse to allocate stubs for any _SPUEAR_
1481    symbols.  */
1482 
1483 static bfd_boolean
1484 allocate_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1485 {
1486   /* Symbols starting with _SPUEAR_ need a stub because they may be
1487      invoked by the PPU.  */
1488   struct bfd_link_info *info = inf;
1489   struct spu_link_hash_table *htab = spu_hash_table (info);
1490   asection *sym_sec;
1491 
1492   if ((h->root.type == bfd_link_hash_defined
1493        || h->root.type == bfd_link_hash_defweak)
1494       && h->def_regular
1495       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1496       && (sym_sec = h->root.u.def.section) != NULL
1497       && sym_sec->output_section != bfd_abs_section_ptr
1498       && spu_elf_section_data (sym_sec->output_section) != NULL
1499       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1500 	  || htab->params->non_overlay_stubs))
1501     {
1502       return count_stub (htab, NULL, NULL, nonovl_stub, h, NULL);
1503     }
1504 
1505   return TRUE;
1506 }
1507 
1508 static bfd_boolean
1509 build_spuear_stubs (struct elf_link_hash_entry *h, void *inf)
1510 {
1511   /* Symbols starting with _SPUEAR_ need a stub because they may be
1512      invoked by the PPU.  */
1513   struct bfd_link_info *info = inf;
1514   struct spu_link_hash_table *htab = spu_hash_table (info);
1515   asection *sym_sec;
1516 
1517   if ((h->root.type == bfd_link_hash_defined
1518        || h->root.type == bfd_link_hash_defweak)
1519       && h->def_regular
1520       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0
1521       && (sym_sec = h->root.u.def.section) != NULL
1522       && sym_sec->output_section != bfd_abs_section_ptr
1523       && spu_elf_section_data (sym_sec->output_section) != NULL
1524       && (spu_elf_section_data (sym_sec->output_section)->u.o.ovl_index != 0
1525 	  || htab->params->non_overlay_stubs))
1526     {
1527       return build_stub (info, NULL, NULL, nonovl_stub, h, NULL,
1528 			 h->root.u.def.value, sym_sec);
1529     }
1530 
1531   return TRUE;
1532 }
1533 
1534 /* Size or build stubs.  */
1535 
1536 static bfd_boolean
1537 process_stubs (struct bfd_link_info *info, bfd_boolean build)
1538 {
1539   struct spu_link_hash_table *htab = spu_hash_table (info);
1540   bfd *ibfd;
1541 
1542   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
1543     {
1544       extern const bfd_target spu_elf32_vec;
1545       Elf_Internal_Shdr *symtab_hdr;
1546       asection *isec;
1547       Elf_Internal_Sym *local_syms = NULL;
1548 
1549       if (ibfd->xvec != &spu_elf32_vec)
1550 	continue;
1551 
1552       /* We'll need the symbol table in a second.  */
1553       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
1554       if (symtab_hdr->sh_info == 0)
1555 	continue;
1556 
1557       /* Walk over each section attached to the input bfd.  */
1558       for (isec = ibfd->sections; isec != NULL; isec = isec->next)
1559 	{
1560 	  Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
1561 
1562 	  /* If there aren't any relocs, then there's nothing more to do.  */
1563 	  if ((isec->flags & SEC_RELOC) == 0
1564 	      || isec->reloc_count == 0)
1565 	    continue;
1566 
1567 	  if (!maybe_needs_stubs (isec))
1568 	    continue;
1569 
1570 	  /* Get the relocs.  */
1571 	  internal_relocs = _bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
1572 						       info->keep_memory);
1573 	  if (internal_relocs == NULL)
1574 	    goto error_ret_free_local;
1575 
1576 	  /* Now examine each relocation.  */
1577 	  irela = internal_relocs;
1578 	  irelaend = irela + isec->reloc_count;
1579 	  for (; irela < irelaend; irela++)
1580 	    {
1581 	      enum elf_spu_reloc_type r_type;
1582 	      unsigned int r_indx;
1583 	      asection *sym_sec;
1584 	      Elf_Internal_Sym *sym;
1585 	      struct elf_link_hash_entry *h;
1586 	      enum _stub_type stub_type;
1587 
1588 	      r_type = ELF32_R_TYPE (irela->r_info);
1589 	      r_indx = ELF32_R_SYM (irela->r_info);
1590 
1591 	      if (r_type >= R_SPU_max)
1592 		{
1593 		  bfd_set_error (bfd_error_bad_value);
1594 		error_ret_free_internal:
1595 		  if (elf_section_data (isec)->relocs != internal_relocs)
1596 		    free (internal_relocs);
1597 		error_ret_free_local:
1598 		  if (local_syms != NULL
1599 		      && (symtab_hdr->contents
1600 			  != (unsigned char *) local_syms))
1601 		    free (local_syms);
1602 		  return FALSE;
1603 		}
1604 
1605 	      /* Determine the reloc target section.  */
1606 	      if (!get_sym_h (&h, &sym, &sym_sec, &local_syms, r_indx, ibfd))
1607 		goto error_ret_free_internal;
1608 
1609 	      stub_type = needs_ovl_stub (h, sym, sym_sec, isec, irela,
1610 					  NULL, info);
1611 	      if (stub_type == no_stub)
1612 		continue;
1613 	      else if (stub_type == stub_error)
1614 		goto error_ret_free_internal;
1615 
1616 	      if (htab->stub_count == NULL)
1617 		{
1618 		  bfd_size_type amt;
1619 		  amt = (htab->num_overlays + 1) * sizeof (*htab->stub_count);
1620 		  htab->stub_count = bfd_zmalloc (amt);
1621 		  if (htab->stub_count == NULL)
1622 		    goto error_ret_free_internal;
1623 		}
1624 
1625 	      if (!build)
1626 		{
1627 		  if (!count_stub (htab, ibfd, isec, stub_type, h, irela))
1628 		    goto error_ret_free_internal;
1629 		}
1630 	      else
1631 		{
1632 		  bfd_vma dest;
1633 
1634 		  if (h != NULL)
1635 		    dest = h->root.u.def.value;
1636 		  else
1637 		    dest = sym->st_value;
1638 		  dest += irela->r_addend;
1639 		  if (!build_stub (info, ibfd, isec, stub_type, h, irela,
1640 				   dest, sym_sec))
1641 		    goto error_ret_free_internal;
1642 		}
1643 	    }
1644 
1645 	  /* We're done with the internal relocs, free them.  */
1646 	  if (elf_section_data (isec)->relocs != internal_relocs)
1647 	    free (internal_relocs);
1648 	}
1649 
1650       if (local_syms != NULL
1651 	  && symtab_hdr->contents != (unsigned char *) local_syms)
1652 	{
1653 	  if (!info->keep_memory)
1654 	    free (local_syms);
1655 	  else
1656 	    symtab_hdr->contents = (unsigned char *) local_syms;
1657 	}
1658     }
1659 
1660   return TRUE;
1661 }
1662 
1663 /* Allocate space for overlay call and return stubs.
1664    Return 0 on error, 1 if no overlays, 2 otherwise.  */
1665 
1666 int
1667 spu_elf_size_stubs (struct bfd_link_info *info)
1668 {
1669   struct spu_link_hash_table *htab;
1670   bfd *ibfd;
1671   bfd_size_type amt;
1672   flagword flags;
1673   unsigned int i;
1674   asection *stub;
1675 
1676   if (!process_stubs (info, FALSE))
1677     return 0;
1678 
1679   htab = spu_hash_table (info);
1680   elf_link_hash_traverse (&htab->elf, allocate_spuear_stubs, info);
1681   if (htab->stub_err)
1682     return 0;
1683 
1684   ibfd = info->input_bfds;
1685   if (htab->stub_count != NULL)
1686     {
1687       amt = (htab->num_overlays + 1) * sizeof (*htab->stub_sec);
1688       htab->stub_sec = bfd_zmalloc (amt);
1689       if (htab->stub_sec == NULL)
1690 	return 0;
1691 
1692       flags = (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_READONLY
1693 	       | SEC_HAS_CONTENTS | SEC_IN_MEMORY);
1694       stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1695       htab->stub_sec[0] = stub;
1696       if (stub == NULL
1697 	  || !bfd_set_section_alignment (ibfd, stub,
1698 					 ovl_stub_size_log2 (htab->params)))
1699 	return 0;
1700       stub->size = htab->stub_count[0] * ovl_stub_size (htab->params);
1701       if (htab->params->ovly_flavour == ovly_soft_icache)
1702 	/* Extra space for linked list entries.  */
1703 	stub->size += htab->stub_count[0] * 16;
1704 
1705       for (i = 0; i < htab->num_overlays; ++i)
1706 	{
1707 	  asection *osec = htab->ovl_sec[i];
1708 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1709 	  stub = bfd_make_section_anyway_with_flags (ibfd, ".stub", flags);
1710 	  htab->stub_sec[ovl] = stub;
1711 	  if (stub == NULL
1712 	      || !bfd_set_section_alignment (ibfd, stub,
1713 					     ovl_stub_size_log2 (htab->params)))
1714 	    return 0;
1715 	  stub->size = htab->stub_count[ovl] * ovl_stub_size (htab->params);
1716 	}
1717     }
1718 
1719   if (htab->params->ovly_flavour == ovly_soft_icache)
1720     {
1721       /* Space for icache manager tables.
1722 	 a) Tag array, one quadword per cache line.
1723 	 b) Rewrite "to" list, one quadword per cache line.
1724 	 c) Rewrite "from" list, one byte per outgoing branch (rounded up to
1725 	    a power-of-two number of full quadwords) per cache line.  */
1726 
1727       flags = SEC_ALLOC;
1728       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1729       if (htab->ovtab == NULL
1730 	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1731 	return 0;
1732 
1733       htab->ovtab->size = (16 + 16 + (16 << htab->fromelem_size_log2))
1734 			  << htab->num_lines_log2;
1735 
1736       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1737       htab->init = bfd_make_section_anyway_with_flags (ibfd, ".ovini", flags);
1738       if (htab->init == NULL
1739 	  || !bfd_set_section_alignment (ibfd, htab->init, 4))
1740 	return 0;
1741 
1742       htab->init->size = 16;
1743     }
1744   else if (htab->stub_count == NULL)
1745     return 1;
1746   else
1747     {
1748       /* htab->ovtab consists of two arrays.
1749 	 .	struct {
1750 	 .	  u32 vma;
1751 	 .	  u32 size;
1752 	 .	  u32 file_off;
1753 	 .	  u32 buf;
1754 	 .	} _ovly_table[];
1755 	 .
1756 	 .	struct {
1757 	 .	  u32 mapped;
1758 	 .	} _ovly_buf_table[];
1759 	 .  */
1760 
1761       flags = SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY;
1762       htab->ovtab = bfd_make_section_anyway_with_flags (ibfd, ".ovtab", flags);
1763       if (htab->ovtab == NULL
1764 	  || !bfd_set_section_alignment (ibfd, htab->ovtab, 4))
1765 	return 0;
1766 
1767       htab->ovtab->size = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
1768     }
1769 
1770   htab->toe = bfd_make_section_anyway_with_flags (ibfd, ".toe", SEC_ALLOC);
1771   if (htab->toe == NULL
1772       || !bfd_set_section_alignment (ibfd, htab->toe, 4))
1773     return 0;
1774   htab->toe->size = 16;
1775 
1776   return 2;
1777 }
1778 
1779 /* Called from ld to place overlay manager data sections.  This is done
1780    after the overlay manager itself is loaded, mainly so that the
1781    linker's htab->init section is placed after any other .ovl.init
1782    sections.  */
1783 
1784 void
1785 spu_elf_place_overlay_data (struct bfd_link_info *info)
1786 {
1787   struct spu_link_hash_table *htab = spu_hash_table (info);
1788   unsigned int i;
1789 
1790   if (htab->stub_sec != NULL)
1791     {
1792       (*htab->params->place_spu_section) (htab->stub_sec[0], NULL, ".text");
1793 
1794       for (i = 0; i < htab->num_overlays; ++i)
1795 	{
1796 	  asection *osec = htab->ovl_sec[i];
1797 	  unsigned int ovl = spu_elf_section_data (osec)->u.o.ovl_index;
1798 	  (*htab->params->place_spu_section) (htab->stub_sec[ovl], osec, NULL);
1799 	}
1800     }
1801 
1802   if (htab->params->ovly_flavour == ovly_soft_icache)
1803     (*htab->params->place_spu_section) (htab->init, NULL, ".ovl.init");
1804 
1805   if (htab->ovtab != NULL)
1806     {
1807       const char *ovout = ".data";
1808       if (htab->params->ovly_flavour == ovly_soft_icache)
1809 	ovout = ".bss";
1810       (*htab->params->place_spu_section) (htab->ovtab, NULL, ovout);
1811     }
1812 
1813   if (htab->toe != NULL)
1814     (*htab->params->place_spu_section) (htab->toe, NULL, ".toe");
1815 }
1816 
1817 /* Functions to handle embedded spu_ovl.o object.  */
1818 
1819 static void *
1820 ovl_mgr_open (struct bfd *nbfd ATTRIBUTE_UNUSED, void *stream)
1821 {
1822   return stream;
1823 }
1824 
1825 static file_ptr
1826 ovl_mgr_pread (struct bfd *abfd ATTRIBUTE_UNUSED,
1827 	       void *stream,
1828 	       void *buf,
1829 	       file_ptr nbytes,
1830 	       file_ptr offset)
1831 {
1832   struct _ovl_stream *os;
1833   size_t count;
1834   size_t max;
1835 
1836   os = (struct _ovl_stream *) stream;
1837   max = (const char *) os->end - (const char *) os->start;
1838 
1839   if ((ufile_ptr) offset >= max)
1840     return 0;
1841 
1842   count = nbytes;
1843   if (count > max - offset)
1844     count = max - offset;
1845 
1846   memcpy (buf, (const char *) os->start + offset, count);
1847   return count;
1848 }
1849 
1850 static int
1851 ovl_mgr_stat (struct bfd *abfd ATTRIBUTE_UNUSED,
1852 	      void *stream,
1853 	      struct stat *sb)
1854 {
1855   struct _ovl_stream *os = (struct _ovl_stream *) stream;
1856 
1857   memset (sb, 0, sizeof (*sb));
1858   sb->st_size = (const char *) os->end - (const char *) os->start;
1859   return 0;
1860 }
1861 
1862 bfd_boolean
1863 spu_elf_open_builtin_lib (bfd **ovl_bfd, const struct _ovl_stream *stream)
1864 {
1865   *ovl_bfd = bfd_openr_iovec ("builtin ovl_mgr",
1866 			      "elf32-spu",
1867 			      ovl_mgr_open,
1868 			      (void *) stream,
1869 			      ovl_mgr_pread,
1870 			      NULL,
1871 			      ovl_mgr_stat);
1872   return *ovl_bfd != NULL;
1873 }
1874 
1875 static unsigned int
1876 overlay_index (asection *sec)
1877 {
1878   if (sec == NULL
1879       || sec->output_section == bfd_abs_section_ptr)
1880     return 0;
1881   return spu_elf_section_data (sec->output_section)->u.o.ovl_index;
1882 }
1883 
1884 /* Define an STT_OBJECT symbol.  */
1885 
1886 static struct elf_link_hash_entry *
1887 define_ovtab_symbol (struct spu_link_hash_table *htab, const char *name)
1888 {
1889   struct elf_link_hash_entry *h;
1890 
1891   h = elf_link_hash_lookup (&htab->elf, name, TRUE, FALSE, FALSE);
1892   if (h == NULL)
1893     return NULL;
1894 
1895   if (h->root.type != bfd_link_hash_defined
1896       || !h->def_regular)
1897     {
1898       h->root.type = bfd_link_hash_defined;
1899       h->root.u.def.section = htab->ovtab;
1900       h->type = STT_OBJECT;
1901       h->ref_regular = 1;
1902       h->def_regular = 1;
1903       h->ref_regular_nonweak = 1;
1904       h->non_elf = 0;
1905     }
1906   else if (h->root.u.def.section->owner != NULL)
1907     {
1908       /* xgettext:c-format */
1909       _bfd_error_handler (_("%pB is not allowed to define %s"),
1910 			  h->root.u.def.section->owner,
1911 			  h->root.root.string);
1912       bfd_set_error (bfd_error_bad_value);
1913       return NULL;
1914     }
1915   else
1916     {
1917       _bfd_error_handler (_("you are not allowed to define %s in a script"),
1918 			  h->root.root.string);
1919       bfd_set_error (bfd_error_bad_value);
1920       return NULL;
1921     }
1922 
1923   return h;
1924 }
1925 
1926 /* Fill in all stubs and the overlay tables.  */
1927 
1928 static bfd_boolean
1929 spu_elf_build_stubs (struct bfd_link_info *info)
1930 {
1931   struct spu_link_hash_table *htab = spu_hash_table (info);
1932   struct elf_link_hash_entry *h;
1933   bfd_byte *p;
1934   asection *s;
1935   bfd *obfd;
1936   unsigned int i;
1937 
1938   if (htab->num_overlays != 0)
1939     {
1940       for (i = 0; i < 2; i++)
1941 	{
1942 	  h = htab->ovly_entry[i];
1943 	  if (h != NULL
1944 	      && (h->root.type == bfd_link_hash_defined
1945 		  || h->root.type == bfd_link_hash_defweak)
1946 	      && h->def_regular)
1947 	    {
1948 	      s = h->root.u.def.section->output_section;
1949 	      if (spu_elf_section_data (s)->u.o.ovl_index)
1950 		{
1951 		  _bfd_error_handler (_("%s in overlay section"),
1952 				      h->root.root.string);
1953 		  bfd_set_error (bfd_error_bad_value);
1954 		  return FALSE;
1955 		}
1956 	    }
1957 	}
1958     }
1959 
1960   if (htab->stub_sec != NULL)
1961     {
1962       for (i = 0; i <= htab->num_overlays; i++)
1963 	if (htab->stub_sec[i]->size != 0)
1964 	  {
1965 	    htab->stub_sec[i]->contents = bfd_zalloc (htab->stub_sec[i]->owner,
1966 						      htab->stub_sec[i]->size);
1967 	    if (htab->stub_sec[i]->contents == NULL)
1968 	      return FALSE;
1969 	    htab->stub_sec[i]->rawsize = htab->stub_sec[i]->size;
1970 	    htab->stub_sec[i]->size = 0;
1971 	  }
1972 
1973       /* Fill in all the stubs.  */
1974       process_stubs (info, TRUE);
1975       if (!htab->stub_err)
1976 	elf_link_hash_traverse (&htab->elf, build_spuear_stubs, info);
1977 
1978       if (htab->stub_err)
1979 	{
1980 	  _bfd_error_handler (_("overlay stub relocation overflow"));
1981 	  bfd_set_error (bfd_error_bad_value);
1982 	  return FALSE;
1983 	}
1984 
1985       for (i = 0; i <= htab->num_overlays; i++)
1986 	{
1987 	  if (htab->stub_sec[i]->size != htab->stub_sec[i]->rawsize)
1988 	    {
1989 	      _bfd_error_handler  (_("stubs don't match calculated size"));
1990 	      bfd_set_error (bfd_error_bad_value);
1991 	      return FALSE;
1992 	    }
1993 	  htab->stub_sec[i]->rawsize = 0;
1994 	}
1995     }
1996 
1997   if (htab->ovtab == NULL || htab->ovtab->size == 0)
1998     return TRUE;
1999 
2000   htab->ovtab->contents = bfd_zalloc (htab->ovtab->owner, htab->ovtab->size);
2001   if (htab->ovtab->contents == NULL)
2002     return FALSE;
2003 
2004   p = htab->ovtab->contents;
2005   if (htab->params->ovly_flavour == ovly_soft_icache)
2006     {
2007       bfd_vma off;
2008 
2009       h = define_ovtab_symbol (htab, "__icache_tag_array");
2010       if (h == NULL)
2011 	return FALSE;
2012       h->root.u.def.value = 0;
2013       h->size = 16 << htab->num_lines_log2;
2014       off = h->size;
2015 
2016       h = define_ovtab_symbol (htab, "__icache_tag_array_size");
2017       if (h == NULL)
2018 	return FALSE;
2019       h->root.u.def.value = 16 << htab->num_lines_log2;
2020       h->root.u.def.section = bfd_abs_section_ptr;
2021 
2022       h = define_ovtab_symbol (htab, "__icache_rewrite_to");
2023       if (h == NULL)
2024 	return FALSE;
2025       h->root.u.def.value = off;
2026       h->size = 16 << htab->num_lines_log2;
2027       off += h->size;
2028 
2029       h = define_ovtab_symbol (htab, "__icache_rewrite_to_size");
2030       if (h == NULL)
2031 	return FALSE;
2032       h->root.u.def.value = 16 << htab->num_lines_log2;
2033       h->root.u.def.section = bfd_abs_section_ptr;
2034 
2035       h = define_ovtab_symbol (htab, "__icache_rewrite_from");
2036       if (h == NULL)
2037 	return FALSE;
2038       h->root.u.def.value = off;
2039       h->size = 16 << (htab->fromelem_size_log2 + htab->num_lines_log2);
2040       off += h->size;
2041 
2042       h = define_ovtab_symbol (htab, "__icache_rewrite_from_size");
2043       if (h == NULL)
2044 	return FALSE;
2045       h->root.u.def.value = 16 << (htab->fromelem_size_log2
2046 				   + htab->num_lines_log2);
2047       h->root.u.def.section = bfd_abs_section_ptr;
2048 
2049       h = define_ovtab_symbol (htab, "__icache_log2_fromelemsize");
2050       if (h == NULL)
2051 	return FALSE;
2052       h->root.u.def.value = htab->fromelem_size_log2;
2053       h->root.u.def.section = bfd_abs_section_ptr;
2054 
2055       h = define_ovtab_symbol (htab, "__icache_base");
2056       if (h == NULL)
2057 	return FALSE;
2058       h->root.u.def.value = htab->ovl_sec[0]->vma;
2059       h->root.u.def.section = bfd_abs_section_ptr;
2060       h->size = htab->num_buf << htab->line_size_log2;
2061 
2062       h = define_ovtab_symbol (htab, "__icache_linesize");
2063       if (h == NULL)
2064 	return FALSE;
2065       h->root.u.def.value = 1 << htab->line_size_log2;
2066       h->root.u.def.section = bfd_abs_section_ptr;
2067 
2068       h = define_ovtab_symbol (htab, "__icache_log2_linesize");
2069       if (h == NULL)
2070 	return FALSE;
2071       h->root.u.def.value = htab->line_size_log2;
2072       h->root.u.def.section = bfd_abs_section_ptr;
2073 
2074       h = define_ovtab_symbol (htab, "__icache_neg_log2_linesize");
2075       if (h == NULL)
2076 	return FALSE;
2077       h->root.u.def.value = -htab->line_size_log2;
2078       h->root.u.def.section = bfd_abs_section_ptr;
2079 
2080       h = define_ovtab_symbol (htab, "__icache_cachesize");
2081       if (h == NULL)
2082 	return FALSE;
2083       h->root.u.def.value = 1 << (htab->num_lines_log2 + htab->line_size_log2);
2084       h->root.u.def.section = bfd_abs_section_ptr;
2085 
2086       h = define_ovtab_symbol (htab, "__icache_log2_cachesize");
2087       if (h == NULL)
2088 	return FALSE;
2089       h->root.u.def.value = htab->num_lines_log2 + htab->line_size_log2;
2090       h->root.u.def.section = bfd_abs_section_ptr;
2091 
2092       h = define_ovtab_symbol (htab, "__icache_neg_log2_cachesize");
2093       if (h == NULL)
2094 	return FALSE;
2095       h->root.u.def.value = -(htab->num_lines_log2 + htab->line_size_log2);
2096       h->root.u.def.section = bfd_abs_section_ptr;
2097 
2098       if (htab->init != NULL && htab->init->size != 0)
2099 	{
2100 	  htab->init->contents = bfd_zalloc (htab->init->owner,
2101 					     htab->init->size);
2102 	  if (htab->init->contents == NULL)
2103 	    return FALSE;
2104 
2105 	  h = define_ovtab_symbol (htab, "__icache_fileoff");
2106 	  if (h == NULL)
2107 	    return FALSE;
2108 	  h->root.u.def.value = 0;
2109 	  h->root.u.def.section = htab->init;
2110 	  h->size = 8;
2111 	}
2112     }
2113   else
2114     {
2115       /* Write out _ovly_table.  */
2116       /* set low bit of .size to mark non-overlay area as present.  */
2117       p[7] = 1;
2118       obfd = htab->ovtab->output_section->owner;
2119       for (s = obfd->sections; s != NULL; s = s->next)
2120 	{
2121 	  unsigned int ovl_index = spu_elf_section_data (s)->u.o.ovl_index;
2122 
2123 	  if (ovl_index != 0)
2124 	    {
2125 	      unsigned long off = ovl_index * 16;
2126 	      unsigned int ovl_buf = spu_elf_section_data (s)->u.o.ovl_buf;
2127 
2128 	      bfd_put_32 (htab->ovtab->owner, s->vma, p + off);
2129 	      bfd_put_32 (htab->ovtab->owner, (s->size + 15) & -16,
2130 			  p + off + 4);
2131 	      /* file_off written later in spu_elf_modify_program_headers.  */
2132 	      bfd_put_32 (htab->ovtab->owner, ovl_buf, p + off + 12);
2133 	    }
2134 	}
2135 
2136       h = define_ovtab_symbol (htab, "_ovly_table");
2137       if (h == NULL)
2138 	return FALSE;
2139       h->root.u.def.value = 16;
2140       h->size = htab->num_overlays * 16;
2141 
2142       h = define_ovtab_symbol (htab, "_ovly_table_end");
2143       if (h == NULL)
2144 	return FALSE;
2145       h->root.u.def.value = htab->num_overlays * 16 + 16;
2146       h->size = 0;
2147 
2148       h = define_ovtab_symbol (htab, "_ovly_buf_table");
2149       if (h == NULL)
2150 	return FALSE;
2151       h->root.u.def.value = htab->num_overlays * 16 + 16;
2152       h->size = htab->num_buf * 4;
2153 
2154       h = define_ovtab_symbol (htab, "_ovly_buf_table_end");
2155       if (h == NULL)
2156 	return FALSE;
2157       h->root.u.def.value = htab->num_overlays * 16 + 16 + htab->num_buf * 4;
2158       h->size = 0;
2159     }
2160 
2161   h = define_ovtab_symbol (htab, "_EAR_");
2162   if (h == NULL)
2163     return FALSE;
2164   h->root.u.def.section = htab->toe;
2165   h->root.u.def.value = 0;
2166   h->size = 16;
2167 
2168   return TRUE;
2169 }
2170 
2171 /* Check that all loadable section VMAs lie in the range
2172    LO .. HI inclusive, and stash some parameters for --auto-overlay.  */
2173 
2174 asection *
2175 spu_elf_check_vma (struct bfd_link_info *info)
2176 {
2177   struct elf_segment_map *m;
2178   unsigned int i;
2179   struct spu_link_hash_table *htab = spu_hash_table (info);
2180   bfd *abfd = info->output_bfd;
2181   bfd_vma hi = htab->params->local_store_hi;
2182   bfd_vma lo = htab->params->local_store_lo;
2183 
2184   htab->local_store = hi + 1 - lo;
2185 
2186   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
2187     if (m->p_type == PT_LOAD)
2188       for (i = 0; i < m->count; i++)
2189 	if (m->sections[i]->size != 0
2190 	    && (m->sections[i]->vma < lo
2191 		|| m->sections[i]->vma > hi
2192 		|| m->sections[i]->vma + m->sections[i]->size - 1 > hi))
2193 	  return m->sections[i];
2194 
2195   return NULL;
2196 }
2197 
2198 /* OFFSET in SEC (presumably) is the beginning of a function prologue.
2199    Search for stack adjusting insns, and return the sp delta.
2200    If a store of lr is found save the instruction offset to *LR_STORE.
2201    If a stack adjusting instruction is found, save that offset to
2202    *SP_ADJUST.  */
2203 
2204 static int
2205 find_function_stack_adjust (asection *sec,
2206 			    bfd_vma offset,
2207 			    bfd_vma *lr_store,
2208 			    bfd_vma *sp_adjust)
2209 {
2210   int reg[128];
2211 
2212   memset (reg, 0, sizeof (reg));
2213   for ( ; offset + 4 <= sec->size; offset += 4)
2214     {
2215       unsigned char buf[4];
2216       int rt, ra;
2217       int imm;
2218 
2219       /* Assume no relocs on stack adjusing insns.  */
2220       if (!bfd_get_section_contents (sec->owner, sec, buf, offset, 4))
2221 	break;
2222 
2223       rt = buf[3] & 0x7f;
2224       ra = ((buf[2] & 0x3f) << 1) | (buf[3] >> 7);
2225 
2226       if (buf[0] == 0x24 /* stqd */)
2227 	{
2228 	  if (rt == 0 /* lr */ && ra == 1 /* sp */)
2229 	    *lr_store = offset;
2230 	  continue;
2231 	}
2232 
2233       /* Partly decoded immediate field.  */
2234       imm = (buf[1] << 9) | (buf[2] << 1) | (buf[3] >> 7);
2235 
2236       if (buf[0] == 0x1c /* ai */)
2237 	{
2238 	  imm >>= 7;
2239 	  imm = (imm ^ 0x200) - 0x200;
2240 	  reg[rt] = reg[ra] + imm;
2241 
2242 	  if (rt == 1 /* sp */)
2243 	    {
2244 	      if (reg[rt] > 0)
2245 		break;
2246 	      *sp_adjust = offset;
2247 	      return reg[rt];
2248 	    }
2249 	}
2250       else if (buf[0] == 0x18 && (buf[1] & 0xe0) == 0 /* a */)
2251 	{
2252 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2253 
2254 	  reg[rt] = reg[ra] + reg[rb];
2255 	  if (rt == 1)
2256 	    {
2257 	      if (reg[rt] > 0)
2258 		break;
2259 	      *sp_adjust = offset;
2260 	      return reg[rt];
2261 	    }
2262 	}
2263       else if (buf[0] == 0x08 && (buf[1] & 0xe0) == 0 /* sf */)
2264 	{
2265 	  int rb = ((buf[1] & 0x1f) << 2) | ((buf[2] & 0xc0) >> 6);
2266 
2267 	  reg[rt] = reg[rb] - reg[ra];
2268 	  if (rt == 1)
2269 	    {
2270 	      if (reg[rt] > 0)
2271 		break;
2272 	      *sp_adjust = offset;
2273 	      return reg[rt];
2274 	    }
2275 	}
2276       else if ((buf[0] & 0xfc) == 0x40 /* il, ilh, ilhu, ila */)
2277 	{
2278 	  if (buf[0] >= 0x42 /* ila */)
2279 	    imm |= (buf[0] & 1) << 17;
2280 	  else
2281 	    {
2282 	      imm &= 0xffff;
2283 
2284 	      if (buf[0] == 0x40 /* il */)
2285 		{
2286 		  if ((buf[1] & 0x80) == 0)
2287 		    continue;
2288 		  imm = (imm ^ 0x8000) - 0x8000;
2289 		}
2290 	      else if ((buf[1] & 0x80) == 0 /* ilhu */)
2291 		imm <<= 16;
2292 	    }
2293 	  reg[rt] = imm;
2294 	  continue;
2295 	}
2296       else if (buf[0] == 0x60 && (buf[1] & 0x80) != 0 /* iohl */)
2297 	{
2298 	  reg[rt] |= imm & 0xffff;
2299 	  continue;
2300 	}
2301       else if (buf[0] == 0x04 /* ori */)
2302 	{
2303 	  imm >>= 7;
2304 	  imm = (imm ^ 0x200) - 0x200;
2305 	  reg[rt] = reg[ra] | imm;
2306 	  continue;
2307 	}
2308       else if (buf[0] == 0x32 && (buf[1] & 0x80) != 0 /* fsmbi */)
2309 	{
2310 	  reg[rt] = (  ((imm & 0x8000) ? 0xff000000 : 0)
2311 		     | ((imm & 0x4000) ? 0x00ff0000 : 0)
2312 		     | ((imm & 0x2000) ? 0x0000ff00 : 0)
2313 		     | ((imm & 0x1000) ? 0x000000ff : 0));
2314 	  continue;
2315 	}
2316       else if (buf[0] == 0x16 /* andbi */)
2317 	{
2318 	  imm >>= 7;
2319 	  imm &= 0xff;
2320 	  imm |= imm << 8;
2321 	  imm |= imm << 16;
2322 	  reg[rt] = reg[ra] & imm;
2323 	  continue;
2324 	}
2325       else if (buf[0] == 0x33 && imm == 1 /* brsl .+4 */)
2326 	{
2327 	  /* Used in pic reg load.  Say rt is trashed.  Won't be used
2328 	     in stack adjust, but we need to continue past this branch.  */
2329 	  reg[rt] = 0;
2330 	  continue;
2331 	}
2332       else if (is_branch (buf) || is_indirect_branch (buf))
2333 	/* If we hit a branch then we must be out of the prologue.  */
2334 	break;
2335     }
2336 
2337   return 0;
2338 }
2339 
2340 /* qsort predicate to sort symbols by section and value.  */
2341 
2342 static Elf_Internal_Sym *sort_syms_syms;
2343 static asection **sort_syms_psecs;
2344 
2345 static int
2346 sort_syms (const void *a, const void *b)
2347 {
2348   Elf_Internal_Sym *const *s1 = a;
2349   Elf_Internal_Sym *const *s2 = b;
2350   asection *sec1,*sec2;
2351   bfd_signed_vma delta;
2352 
2353   sec1 = sort_syms_psecs[*s1 - sort_syms_syms];
2354   sec2 = sort_syms_psecs[*s2 - sort_syms_syms];
2355 
2356   if (sec1 != sec2)
2357     return sec1->index - sec2->index;
2358 
2359   delta = (*s1)->st_value - (*s2)->st_value;
2360   if (delta != 0)
2361     return delta < 0 ? -1 : 1;
2362 
2363   delta = (*s2)->st_size - (*s1)->st_size;
2364   if (delta != 0)
2365     return delta < 0 ? -1 : 1;
2366 
2367   return *s1 < *s2 ? -1 : 1;
2368 }
2369 
2370 /* Allocate a struct spu_elf_stack_info with MAX_FUN struct function_info
2371    entries for section SEC.  */
2372 
2373 static struct spu_elf_stack_info *
2374 alloc_stack_info (asection *sec, int max_fun)
2375 {
2376   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2377   bfd_size_type amt;
2378 
2379   amt = sizeof (struct spu_elf_stack_info);
2380   amt += (max_fun - 1) * sizeof (struct function_info);
2381   sec_data->u.i.stack_info = bfd_zmalloc (amt);
2382   if (sec_data->u.i.stack_info != NULL)
2383     sec_data->u.i.stack_info->max_fun = max_fun;
2384   return sec_data->u.i.stack_info;
2385 }
2386 
2387 /* Add a new struct function_info describing a (part of a) function
2388    starting at SYM_H.  Keep the array sorted by address.  */
2389 
2390 static struct function_info *
2391 maybe_insert_function (asection *sec,
2392 		       void *sym_h,
2393 		       bfd_boolean global,
2394 		       bfd_boolean is_func)
2395 {
2396   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2397   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2398   int i;
2399   bfd_vma off, size;
2400 
2401   if (sinfo == NULL)
2402     {
2403       sinfo = alloc_stack_info (sec, 20);
2404       if (sinfo == NULL)
2405 	return NULL;
2406     }
2407 
2408   if (!global)
2409     {
2410       Elf_Internal_Sym *sym = sym_h;
2411       off = sym->st_value;
2412       size = sym->st_size;
2413     }
2414   else
2415     {
2416       struct elf_link_hash_entry *h = sym_h;
2417       off = h->root.u.def.value;
2418       size = h->size;
2419     }
2420 
2421   for (i = sinfo->num_fun; --i >= 0; )
2422     if (sinfo->fun[i].lo <= off)
2423       break;
2424 
2425   if (i >= 0)
2426     {
2427       /* Don't add another entry for an alias, but do update some
2428 	 info.  */
2429       if (sinfo->fun[i].lo == off)
2430 	{
2431 	  /* Prefer globals over local syms.  */
2432 	  if (global && !sinfo->fun[i].global)
2433 	    {
2434 	      sinfo->fun[i].global = TRUE;
2435 	      sinfo->fun[i].u.h = sym_h;
2436 	    }
2437 	  if (is_func)
2438 	    sinfo->fun[i].is_func = TRUE;
2439 	  return &sinfo->fun[i];
2440 	}
2441       /* Ignore a zero-size symbol inside an existing function.  */
2442       else if (sinfo->fun[i].hi > off && size == 0)
2443 	return &sinfo->fun[i];
2444     }
2445 
2446   if (sinfo->num_fun >= sinfo->max_fun)
2447     {
2448       bfd_size_type amt = sizeof (struct spu_elf_stack_info);
2449       bfd_size_type old = amt;
2450 
2451       old += (sinfo->max_fun - 1) * sizeof (struct function_info);
2452       sinfo->max_fun += 20 + (sinfo->max_fun >> 1);
2453       amt += (sinfo->max_fun - 1) * sizeof (struct function_info);
2454       sinfo = bfd_realloc (sinfo, amt);
2455       if (sinfo == NULL)
2456 	return NULL;
2457       memset ((char *) sinfo + old, 0, amt - old);
2458       sec_data->u.i.stack_info = sinfo;
2459     }
2460 
2461   if (++i < sinfo->num_fun)
2462     memmove (&sinfo->fun[i + 1], &sinfo->fun[i],
2463 	     (sinfo->num_fun - i) * sizeof (sinfo->fun[i]));
2464   sinfo->fun[i].is_func = is_func;
2465   sinfo->fun[i].global = global;
2466   sinfo->fun[i].sec = sec;
2467   if (global)
2468     sinfo->fun[i].u.h = sym_h;
2469   else
2470     sinfo->fun[i].u.sym = sym_h;
2471   sinfo->fun[i].lo = off;
2472   sinfo->fun[i].hi = off + size;
2473   sinfo->fun[i].lr_store = -1;
2474   sinfo->fun[i].sp_adjust = -1;
2475   sinfo->fun[i].stack = -find_function_stack_adjust (sec, off,
2476 						     &sinfo->fun[i].lr_store,
2477 						     &sinfo->fun[i].sp_adjust);
2478   sinfo->num_fun += 1;
2479   return &sinfo->fun[i];
2480 }
2481 
2482 /* Return the name of FUN.  */
2483 
2484 static const char *
2485 func_name (struct function_info *fun)
2486 {
2487   asection *sec;
2488   bfd *ibfd;
2489   Elf_Internal_Shdr *symtab_hdr;
2490 
2491   while (fun->start != NULL)
2492     fun = fun->start;
2493 
2494   if (fun->global)
2495     return fun->u.h->root.root.string;
2496 
2497   sec = fun->sec;
2498   if (fun->u.sym->st_name == 0)
2499     {
2500       size_t len = strlen (sec->name);
2501       char *name = bfd_malloc (len + 10);
2502       if (name == NULL)
2503 	return "(null)";
2504       sprintf (name, "%s+%lx", sec->name,
2505 	       (unsigned long) fun->u.sym->st_value & 0xffffffff);
2506       return name;
2507     }
2508   ibfd = sec->owner;
2509   symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
2510   return bfd_elf_sym_name (ibfd, symtab_hdr, fun->u.sym, sec);
2511 }
2512 
2513 /* Read the instruction at OFF in SEC.  Return true iff the instruction
2514    is a nop, lnop, or stop 0 (all zero insn).  */
2515 
2516 static bfd_boolean
2517 is_nop (asection *sec, bfd_vma off)
2518 {
2519   unsigned char insn[4];
2520 
2521   if (off + 4 > sec->size
2522       || !bfd_get_section_contents (sec->owner, sec, insn, off, 4))
2523     return FALSE;
2524   if ((insn[0] & 0xbf) == 0 && (insn[1] & 0xe0) == 0x20)
2525     return TRUE;
2526   if (insn[0] == 0 && insn[1] == 0 && insn[2] == 0 && insn[3] == 0)
2527     return TRUE;
2528   return FALSE;
2529 }
2530 
2531 /* Extend the range of FUN to cover nop padding up to LIMIT.
2532    Return TRUE iff some instruction other than a NOP was found.  */
2533 
2534 static bfd_boolean
2535 insns_at_end (struct function_info *fun, bfd_vma limit)
2536 {
2537   bfd_vma off = (fun->hi + 3) & -4;
2538 
2539   while (off < limit && is_nop (fun->sec, off))
2540     off += 4;
2541   if (off < limit)
2542     {
2543       fun->hi = off;
2544       return TRUE;
2545     }
2546   fun->hi = limit;
2547   return FALSE;
2548 }
2549 
2550 /* Check and fix overlapping function ranges.  Return TRUE iff there
2551    are gaps in the current info we have about functions in SEC.  */
2552 
2553 static bfd_boolean
2554 check_function_ranges (asection *sec, struct bfd_link_info *info)
2555 {
2556   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2557   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2558   int i;
2559   bfd_boolean gaps = FALSE;
2560 
2561   if (sinfo == NULL)
2562     return FALSE;
2563 
2564   for (i = 1; i < sinfo->num_fun; i++)
2565     if (sinfo->fun[i - 1].hi > sinfo->fun[i].lo)
2566       {
2567 	/* Fix overlapping symbols.  */
2568 	const char *f1 = func_name (&sinfo->fun[i - 1]);
2569 	const char *f2 = func_name (&sinfo->fun[i]);
2570 
2571 	/* xgettext:c-format */
2572 	info->callbacks->einfo (_("warning: %s overlaps %s\n"), f1, f2);
2573 	sinfo->fun[i - 1].hi = sinfo->fun[i].lo;
2574       }
2575     else if (insns_at_end (&sinfo->fun[i - 1], sinfo->fun[i].lo))
2576       gaps = TRUE;
2577 
2578   if (sinfo->num_fun == 0)
2579     gaps = TRUE;
2580   else
2581     {
2582       if (sinfo->fun[0].lo != 0)
2583 	gaps = TRUE;
2584       if (sinfo->fun[sinfo->num_fun - 1].hi > sec->size)
2585 	{
2586 	  const char *f1 = func_name (&sinfo->fun[sinfo->num_fun - 1]);
2587 
2588 	  info->callbacks->einfo (_("warning: %s exceeds section size\n"), f1);
2589 	  sinfo->fun[sinfo->num_fun - 1].hi = sec->size;
2590 	}
2591       else if (insns_at_end (&sinfo->fun[sinfo->num_fun - 1], sec->size))
2592 	gaps = TRUE;
2593     }
2594   return gaps;
2595 }
2596 
2597 /* Search current function info for a function that contains address
2598    OFFSET in section SEC.  */
2599 
2600 static struct function_info *
2601 find_function (asection *sec, bfd_vma offset, struct bfd_link_info *info)
2602 {
2603   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
2604   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
2605   int lo, hi, mid;
2606 
2607   lo = 0;
2608   hi = sinfo->num_fun;
2609   while (lo < hi)
2610     {
2611       mid = (lo + hi) / 2;
2612       if (offset < sinfo->fun[mid].lo)
2613 	hi = mid;
2614       else if (offset >= sinfo->fun[mid].hi)
2615 	lo = mid + 1;
2616       else
2617 	return &sinfo->fun[mid];
2618     }
2619   /* xgettext:c-format */
2620   info->callbacks->einfo (_("%pA:0x%v not found in function table\n"),
2621 			  sec, offset);
2622   bfd_set_error (bfd_error_bad_value);
2623   return NULL;
2624 }
2625 
2626 /* Add CALLEE to CALLER call list if not already present.  Return TRUE
2627    if CALLEE was new.  If this function return FALSE, CALLEE should
2628    be freed.  */
2629 
2630 static bfd_boolean
2631 insert_callee (struct function_info *caller, struct call_info *callee)
2632 {
2633   struct call_info **pp, *p;
2634 
2635   for (pp = &caller->call_list; (p = *pp) != NULL; pp = &p->next)
2636     if (p->fun == callee->fun)
2637       {
2638 	/* Tail calls use less stack than normal calls.  Retain entry
2639 	   for normal call over one for tail call.  */
2640 	p->is_tail &= callee->is_tail;
2641 	if (!p->is_tail)
2642 	  {
2643 	    p->fun->start = NULL;
2644 	    p->fun->is_func = TRUE;
2645 	  }
2646 	p->count += callee->count;
2647 	/* Reorder list so most recent call is first.  */
2648 	*pp = p->next;
2649 	p->next = caller->call_list;
2650 	caller->call_list = p;
2651 	return FALSE;
2652       }
2653   callee->next = caller->call_list;
2654   caller->call_list = callee;
2655   return TRUE;
2656 }
2657 
2658 /* Copy CALL and insert the copy into CALLER.  */
2659 
2660 static bfd_boolean
2661 copy_callee (struct function_info *caller, const struct call_info *call)
2662 {
2663   struct call_info *callee;
2664   callee = bfd_malloc (sizeof (*callee));
2665   if (callee == NULL)
2666     return FALSE;
2667   *callee = *call;
2668   if (!insert_callee (caller, callee))
2669     free (callee);
2670   return TRUE;
2671 }
2672 
2673 /* We're only interested in code sections.  Testing SEC_IN_MEMORY excludes
2674    overlay stub sections.  */
2675 
2676 static bfd_boolean
2677 interesting_section (asection *s)
2678 {
2679   return (s->output_section != bfd_abs_section_ptr
2680 	  && ((s->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE | SEC_IN_MEMORY))
2681 	      == (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2682 	  && s->size != 0);
2683 }
2684 
2685 /* Rummage through the relocs for SEC, looking for function calls.
2686    If CALL_TREE is true, fill in call graph.  If CALL_TREE is false,
2687    mark destination symbols on calls as being functions.  Also
2688    look at branches, which may be tail calls or go to hot/cold
2689    section part of same function.  */
2690 
2691 static bfd_boolean
2692 mark_functions_via_relocs (asection *sec,
2693 			   struct bfd_link_info *info,
2694 			   int call_tree)
2695 {
2696   Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
2697   Elf_Internal_Shdr *symtab_hdr;
2698   void *psyms;
2699   unsigned int priority = 0;
2700   static bfd_boolean warned;
2701 
2702   if (!interesting_section (sec)
2703       || sec->reloc_count == 0)
2704     return TRUE;
2705 
2706   internal_relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
2707 					       info->keep_memory);
2708   if (internal_relocs == NULL)
2709     return FALSE;
2710 
2711   symtab_hdr = &elf_tdata (sec->owner)->symtab_hdr;
2712   psyms = &symtab_hdr->contents;
2713   irela = internal_relocs;
2714   irelaend = irela + sec->reloc_count;
2715   for (; irela < irelaend; irela++)
2716     {
2717       enum elf_spu_reloc_type r_type;
2718       unsigned int r_indx;
2719       asection *sym_sec;
2720       Elf_Internal_Sym *sym;
2721       struct elf_link_hash_entry *h;
2722       bfd_vma val;
2723       bfd_boolean nonbranch, is_call;
2724       struct function_info *caller;
2725       struct call_info *callee;
2726 
2727       r_type = ELF32_R_TYPE (irela->r_info);
2728       nonbranch = r_type != R_SPU_REL16 && r_type != R_SPU_ADDR16;
2729 
2730       r_indx = ELF32_R_SYM (irela->r_info);
2731       if (!get_sym_h (&h, &sym, &sym_sec, psyms, r_indx, sec->owner))
2732 	return FALSE;
2733 
2734       if (sym_sec == NULL
2735 	  || sym_sec->output_section == bfd_abs_section_ptr)
2736 	continue;
2737 
2738       is_call = FALSE;
2739       if (!nonbranch)
2740 	{
2741 	  unsigned char insn[4];
2742 
2743 	  if (!bfd_get_section_contents (sec->owner, sec, insn,
2744 					 irela->r_offset, 4))
2745 	    return FALSE;
2746 	  if (is_branch (insn))
2747 	    {
2748 	      is_call = (insn[0] & 0xfd) == 0x31;
2749 	      priority = insn[1] & 0x0f;
2750 	      priority <<= 8;
2751 	      priority |= insn[2];
2752 	      priority <<= 8;
2753 	      priority |= insn[3];
2754 	      priority >>= 7;
2755 	      if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2756 		  != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2757 		{
2758 		  if (!warned)
2759 		    info->callbacks->einfo
2760 		      /* xgettext:c-format */
2761 		      (_("%pB(%pA+0x%v): call to non-code section"
2762 			 " %pB(%pA), analysis incomplete\n"),
2763 		       sec->owner, sec, irela->r_offset,
2764 		       sym_sec->owner, sym_sec);
2765 		  warned = TRUE;
2766 		  continue;
2767 		}
2768 	    }
2769 	  else
2770 	    {
2771 	      nonbranch = TRUE;
2772 	      if (is_hint (insn))
2773 		continue;
2774 	    }
2775 	}
2776 
2777       if (nonbranch)
2778 	{
2779 	  /* For --auto-overlay, count possible stubs we need for
2780 	     function pointer references.  */
2781 	  unsigned int sym_type;
2782 	  if (h)
2783 	    sym_type = h->type;
2784 	  else
2785 	    sym_type = ELF_ST_TYPE (sym->st_info);
2786 	  if (sym_type == STT_FUNC)
2787 	    {
2788 	      if (call_tree && spu_hash_table (info)->params->auto_overlay)
2789 		spu_hash_table (info)->non_ovly_stub += 1;
2790 	      /* If the symbol type is STT_FUNC then this must be a
2791 		 function pointer initialisation.  */
2792 	      continue;
2793 	    }
2794 	  /* Ignore data references.  */
2795 	  if ((sym_sec->flags & (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2796 	      != (SEC_ALLOC | SEC_LOAD | SEC_CODE))
2797 	    continue;
2798 	  /* Otherwise we probably have a jump table reloc for
2799 	     a switch statement or some other reference to a
2800 	     code label.  */
2801 	}
2802 
2803       if (h)
2804 	val = h->root.u.def.value;
2805       else
2806 	val = sym->st_value;
2807       val += irela->r_addend;
2808 
2809       if (!call_tree)
2810 	{
2811 	  struct function_info *fun;
2812 
2813 	  if (irela->r_addend != 0)
2814 	    {
2815 	      Elf_Internal_Sym *fake = bfd_zmalloc (sizeof (*fake));
2816 	      if (fake == NULL)
2817 		return FALSE;
2818 	      fake->st_value = val;
2819 	      fake->st_shndx
2820 		= _bfd_elf_section_from_bfd_section (sym_sec->owner, sym_sec);
2821 	      sym = fake;
2822 	    }
2823 	  if (sym)
2824 	    fun = maybe_insert_function (sym_sec, sym, FALSE, is_call);
2825 	  else
2826 	    fun = maybe_insert_function (sym_sec, h, TRUE, is_call);
2827 	  if (fun == NULL)
2828 	    return FALSE;
2829 	  if (irela->r_addend != 0
2830 	      && fun->u.sym != sym)
2831 	    free (sym);
2832 	  continue;
2833 	}
2834 
2835       caller = find_function (sec, irela->r_offset, info);
2836       if (caller == NULL)
2837 	return FALSE;
2838       callee = bfd_malloc (sizeof *callee);
2839       if (callee == NULL)
2840 	return FALSE;
2841 
2842       callee->fun = find_function (sym_sec, val, info);
2843       if (callee->fun == NULL)
2844 	return FALSE;
2845       callee->is_tail = !is_call;
2846       callee->is_pasted = FALSE;
2847       callee->broken_cycle = FALSE;
2848       callee->priority = priority;
2849       callee->count = nonbranch? 0 : 1;
2850       if (callee->fun->last_caller != sec)
2851 	{
2852 	  callee->fun->last_caller = sec;
2853 	  callee->fun->call_count += 1;
2854 	}
2855       if (!insert_callee (caller, callee))
2856 	free (callee);
2857       else if (!is_call
2858 	       && !callee->fun->is_func
2859 	       && callee->fun->stack == 0)
2860 	{
2861 	  /* This is either a tail call or a branch from one part of
2862 	     the function to another, ie. hot/cold section.  If the
2863 	     destination has been called by some other function then
2864 	     it is a separate function.  We also assume that functions
2865 	     are not split across input files.  */
2866 	  if (sec->owner != sym_sec->owner)
2867 	    {
2868 	      callee->fun->start = NULL;
2869 	      callee->fun->is_func = TRUE;
2870 	    }
2871 	  else if (callee->fun->start == NULL)
2872 	    {
2873 	      struct function_info *caller_start = caller;
2874 	      while (caller_start->start)
2875 		caller_start = caller_start->start;
2876 
2877 	      if (caller_start != callee->fun)
2878 		callee->fun->start = caller_start;
2879 	    }
2880 	  else
2881 	    {
2882 	      struct function_info *callee_start;
2883 	      struct function_info *caller_start;
2884 	      callee_start = callee->fun;
2885 	      while (callee_start->start)
2886 		callee_start = callee_start->start;
2887 	      caller_start = caller;
2888 	      while (caller_start->start)
2889 		caller_start = caller_start->start;
2890 	      if (caller_start != callee_start)
2891 		{
2892 		  callee->fun->start = NULL;
2893 		  callee->fun->is_func = TRUE;
2894 		}
2895 	    }
2896 	}
2897     }
2898 
2899   return TRUE;
2900 }
2901 
2902 /* Handle something like .init or .fini, which has a piece of a function.
2903    These sections are pasted together to form a single function.  */
2904 
2905 static bfd_boolean
2906 pasted_function (asection *sec)
2907 {
2908   struct bfd_link_order *l;
2909   struct _spu_elf_section_data *sec_data;
2910   struct spu_elf_stack_info *sinfo;
2911   Elf_Internal_Sym *fake;
2912   struct function_info *fun, *fun_start;
2913 
2914   fake = bfd_zmalloc (sizeof (*fake));
2915   if (fake == NULL)
2916     return FALSE;
2917   fake->st_value = 0;
2918   fake->st_size = sec->size;
2919   fake->st_shndx
2920     = _bfd_elf_section_from_bfd_section (sec->owner, sec);
2921   fun = maybe_insert_function (sec, fake, FALSE, FALSE);
2922   if (!fun)
2923     return FALSE;
2924 
2925   /* Find a function immediately preceding this section.  */
2926   fun_start = NULL;
2927   for (l = sec->output_section->map_head.link_order; l != NULL; l = l->next)
2928     {
2929       if (l->u.indirect.section == sec)
2930 	{
2931 	  if (fun_start != NULL)
2932 	    {
2933 	      struct call_info *callee = bfd_malloc (sizeof *callee);
2934 	      if (callee == NULL)
2935 		return FALSE;
2936 
2937 	      fun->start = fun_start;
2938 	      callee->fun = fun;
2939 	      callee->is_tail = TRUE;
2940 	      callee->is_pasted = TRUE;
2941 	      callee->broken_cycle = FALSE;
2942 	      callee->priority = 0;
2943 	      callee->count = 1;
2944 	      if (!insert_callee (fun_start, callee))
2945 		free (callee);
2946 	      return TRUE;
2947 	    }
2948 	  break;
2949 	}
2950       if (l->type == bfd_indirect_link_order
2951 	  && (sec_data = spu_elf_section_data (l->u.indirect.section)) != NULL
2952 	  && (sinfo = sec_data->u.i.stack_info) != NULL
2953 	  && sinfo->num_fun != 0)
2954 	fun_start = &sinfo->fun[sinfo->num_fun - 1];
2955     }
2956 
2957   /* Don't return an error if we did not find a function preceding this
2958      section.  The section may have incorrect flags.  */
2959   return TRUE;
2960 }
2961 
2962 /* Map address ranges in code sections to functions.  */
2963 
2964 static bfd_boolean
2965 discover_functions (struct bfd_link_info *info)
2966 {
2967   bfd *ibfd;
2968   int bfd_idx;
2969   Elf_Internal_Sym ***psym_arr;
2970   asection ***sec_arr;
2971   bfd_boolean gaps = FALSE;
2972 
2973   bfd_idx = 0;
2974   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
2975     bfd_idx++;
2976 
2977   psym_arr = bfd_zmalloc (bfd_idx * sizeof (*psym_arr));
2978   if (psym_arr == NULL)
2979     return FALSE;
2980   sec_arr = bfd_zmalloc (bfd_idx * sizeof (*sec_arr));
2981   if (sec_arr == NULL)
2982     return FALSE;
2983 
2984   for (ibfd = info->input_bfds, bfd_idx = 0;
2985        ibfd != NULL;
2986        ibfd = ibfd->link.next, bfd_idx++)
2987     {
2988       extern const bfd_target spu_elf32_vec;
2989       Elf_Internal_Shdr *symtab_hdr;
2990       asection *sec;
2991       size_t symcount;
2992       Elf_Internal_Sym *syms, *sy, **psyms, **psy;
2993       asection **psecs, **p;
2994 
2995       if (ibfd->xvec != &spu_elf32_vec)
2996 	continue;
2997 
2998       /* Read all the symbols.  */
2999       symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3000       symcount = symtab_hdr->sh_size / symtab_hdr->sh_entsize;
3001       if (symcount == 0)
3002 	{
3003 	  if (!gaps)
3004 	    for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3005 	      if (interesting_section (sec))
3006 		{
3007 		  gaps = TRUE;
3008 		  break;
3009 		}
3010 	  continue;
3011 	}
3012 
3013       if (symtab_hdr->contents != NULL)
3014 	{
3015 	  /* Don't use cached symbols since the generic ELF linker
3016 	     code only reads local symbols, and we need globals too.  */
3017 	  free (symtab_hdr->contents);
3018 	  symtab_hdr->contents = NULL;
3019 	}
3020       syms = bfd_elf_get_elf_syms (ibfd, symtab_hdr, symcount, 0,
3021 				   NULL, NULL, NULL);
3022       symtab_hdr->contents = (void *) syms;
3023       if (syms == NULL)
3024 	return FALSE;
3025 
3026       /* Select defined function symbols that are going to be output.  */
3027       psyms = bfd_malloc ((symcount + 1) * sizeof (*psyms));
3028       if (psyms == NULL)
3029 	return FALSE;
3030       psym_arr[bfd_idx] = psyms;
3031       psecs = bfd_malloc (symcount * sizeof (*psecs));
3032       if (psecs == NULL)
3033 	return FALSE;
3034       sec_arr[bfd_idx] = psecs;
3035       for (psy = psyms, p = psecs, sy = syms; sy < syms + symcount; ++p, ++sy)
3036 	if (ELF_ST_TYPE (sy->st_info) == STT_NOTYPE
3037 	    || ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3038 	  {
3039 	    asection *s;
3040 
3041 	    *p = s = bfd_section_from_elf_index (ibfd, sy->st_shndx);
3042 	    if (s != NULL && interesting_section (s))
3043 	      *psy++ = sy;
3044 	  }
3045       symcount = psy - psyms;
3046       *psy = NULL;
3047 
3048       /* Sort them by section and offset within section.  */
3049       sort_syms_syms = syms;
3050       sort_syms_psecs = psecs;
3051       qsort (psyms, symcount, sizeof (*psyms), sort_syms);
3052 
3053       /* Now inspect the function symbols.  */
3054       for (psy = psyms; psy < psyms + symcount; )
3055 	{
3056 	  asection *s = psecs[*psy - syms];
3057 	  Elf_Internal_Sym **psy2;
3058 
3059 	  for (psy2 = psy; ++psy2 < psyms + symcount; )
3060 	    if (psecs[*psy2 - syms] != s)
3061 	      break;
3062 
3063 	  if (!alloc_stack_info (s, psy2 - psy))
3064 	    return FALSE;
3065 	  psy = psy2;
3066 	}
3067 
3068       /* First install info about properly typed and sized functions.
3069 	 In an ideal world this will cover all code sections, except
3070 	 when partitioning functions into hot and cold sections,
3071 	 and the horrible pasted together .init and .fini functions.  */
3072       for (psy = psyms; psy < psyms + symcount; ++psy)
3073 	{
3074 	  sy = *psy;
3075 	  if (ELF_ST_TYPE (sy->st_info) == STT_FUNC)
3076 	    {
3077 	      asection *s = psecs[sy - syms];
3078 	      if (!maybe_insert_function (s, sy, FALSE, TRUE))
3079 		return FALSE;
3080 	    }
3081 	}
3082 
3083       for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3084 	if (interesting_section (sec))
3085 	  gaps |= check_function_ranges (sec, info);
3086     }
3087 
3088   if (gaps)
3089     {
3090       /* See if we can discover more function symbols by looking at
3091 	 relocations.  */
3092       for (ibfd = info->input_bfds, bfd_idx = 0;
3093 	   ibfd != NULL;
3094 	   ibfd = ibfd->link.next, bfd_idx++)
3095 	{
3096 	  asection *sec;
3097 
3098 	  if (psym_arr[bfd_idx] == NULL)
3099 	    continue;
3100 
3101 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3102 	    if (!mark_functions_via_relocs (sec, info, FALSE))
3103 	      return FALSE;
3104 	}
3105 
3106       for (ibfd = info->input_bfds, bfd_idx = 0;
3107 	   ibfd != NULL;
3108 	   ibfd = ibfd->link.next, bfd_idx++)
3109 	{
3110 	  Elf_Internal_Shdr *symtab_hdr;
3111 	  asection *sec;
3112 	  Elf_Internal_Sym *syms, *sy, **psyms, **psy;
3113 	  asection **psecs;
3114 
3115 	  if ((psyms = psym_arr[bfd_idx]) == NULL)
3116 	    continue;
3117 
3118 	  psecs = sec_arr[bfd_idx];
3119 
3120 	  symtab_hdr = &elf_tdata (ibfd)->symtab_hdr;
3121 	  syms = (Elf_Internal_Sym *) symtab_hdr->contents;
3122 
3123 	  gaps = FALSE;
3124 	  for (sec = ibfd->sections; sec != NULL && !gaps; sec = sec->next)
3125 	    if (interesting_section (sec))
3126 	      gaps |= check_function_ranges (sec, info);
3127 	  if (!gaps)
3128 	    continue;
3129 
3130 	  /* Finally, install all globals.  */
3131 	  for (psy = psyms; (sy = *psy) != NULL; ++psy)
3132 	    {
3133 	      asection *s;
3134 
3135 	      s = psecs[sy - syms];
3136 
3137 	      /* Global syms might be improperly typed functions.  */
3138 	      if (ELF_ST_TYPE (sy->st_info) != STT_FUNC
3139 		  && ELF_ST_BIND (sy->st_info) == STB_GLOBAL)
3140 		{
3141 		  if (!maybe_insert_function (s, sy, FALSE, FALSE))
3142 		    return FALSE;
3143 		}
3144 	    }
3145 	}
3146 
3147       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3148 	{
3149 	  extern const bfd_target spu_elf32_vec;
3150 	  asection *sec;
3151 
3152 	  if (ibfd->xvec != &spu_elf32_vec)
3153 	    continue;
3154 
3155 	  /* Some of the symbols we've installed as marking the
3156 	     beginning of functions may have a size of zero.  Extend
3157 	     the range of such functions to the beginning of the
3158 	     next symbol of interest.  */
3159 	  for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3160 	    if (interesting_section (sec))
3161 	      {
3162 		struct _spu_elf_section_data *sec_data;
3163 		struct spu_elf_stack_info *sinfo;
3164 
3165 		sec_data = spu_elf_section_data (sec);
3166 		sinfo = sec_data->u.i.stack_info;
3167 		if (sinfo != NULL && sinfo->num_fun != 0)
3168 		  {
3169 		    int fun_idx;
3170 		    bfd_vma hi = sec->size;
3171 
3172 		    for (fun_idx = sinfo->num_fun; --fun_idx >= 0; )
3173 		      {
3174 			sinfo->fun[fun_idx].hi = hi;
3175 			hi = sinfo->fun[fun_idx].lo;
3176 		      }
3177 
3178 		    sinfo->fun[0].lo = 0;
3179 		  }
3180 		/* No symbols in this section.  Must be .init or .fini
3181 		   or something similar.  */
3182 		else if (!pasted_function (sec))
3183 		  return FALSE;
3184 	      }
3185 	}
3186     }
3187 
3188   for (ibfd = info->input_bfds, bfd_idx = 0;
3189        ibfd != NULL;
3190        ibfd = ibfd->link.next, bfd_idx++)
3191     {
3192       if (psym_arr[bfd_idx] == NULL)
3193 	continue;
3194 
3195       free (psym_arr[bfd_idx]);
3196       free (sec_arr[bfd_idx]);
3197     }
3198 
3199   free (psym_arr);
3200   free (sec_arr);
3201 
3202   return TRUE;
3203 }
3204 
3205 /* Iterate over all function_info we have collected, calling DOIT on
3206    each node if ROOT_ONLY is false.  Only call DOIT on root nodes
3207    if ROOT_ONLY.  */
3208 
3209 static bfd_boolean
3210 for_each_node (bfd_boolean (*doit) (struct function_info *,
3211 				    struct bfd_link_info *,
3212 				    void *),
3213 	       struct bfd_link_info *info,
3214 	       void *param,
3215 	       int root_only)
3216 {
3217   bfd *ibfd;
3218 
3219   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3220     {
3221       extern const bfd_target spu_elf32_vec;
3222       asection *sec;
3223 
3224       if (ibfd->xvec != &spu_elf32_vec)
3225 	continue;
3226 
3227       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3228 	{
3229 	  struct _spu_elf_section_data *sec_data;
3230 	  struct spu_elf_stack_info *sinfo;
3231 
3232 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
3233 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
3234 	    {
3235 	      int i;
3236 	      for (i = 0; i < sinfo->num_fun; ++i)
3237 		if (!root_only || !sinfo->fun[i].non_root)
3238 		  if (!doit (&sinfo->fun[i], info, param))
3239 		    return FALSE;
3240 	    }
3241 	}
3242     }
3243   return TRUE;
3244 }
3245 
3246 /* Transfer call info attached to struct function_info entries for
3247    all of a given function's sections to the first entry.  */
3248 
3249 static bfd_boolean
3250 transfer_calls (struct function_info *fun,
3251 		struct bfd_link_info *info ATTRIBUTE_UNUSED,
3252 		void *param ATTRIBUTE_UNUSED)
3253 {
3254   struct function_info *start = fun->start;
3255 
3256   if (start != NULL)
3257     {
3258       struct call_info *call, *call_next;
3259 
3260       while (start->start != NULL)
3261 	start = start->start;
3262       for (call = fun->call_list; call != NULL; call = call_next)
3263 	{
3264 	  call_next = call->next;
3265 	  if (!insert_callee (start, call))
3266 	    free (call);
3267 	}
3268       fun->call_list = NULL;
3269     }
3270   return TRUE;
3271 }
3272 
3273 /* Mark nodes in the call graph that are called by some other node.  */
3274 
3275 static bfd_boolean
3276 mark_non_root (struct function_info *fun,
3277 	       struct bfd_link_info *info ATTRIBUTE_UNUSED,
3278 	       void *param ATTRIBUTE_UNUSED)
3279 {
3280   struct call_info *call;
3281 
3282   if (fun->visit1)
3283     return TRUE;
3284   fun->visit1 = TRUE;
3285   for (call = fun->call_list; call; call = call->next)
3286     {
3287       call->fun->non_root = TRUE;
3288       mark_non_root (call->fun, 0, 0);
3289     }
3290   return TRUE;
3291 }
3292 
3293 /* Remove cycles from the call graph.  Set depth of nodes.  */
3294 
3295 static bfd_boolean
3296 remove_cycles (struct function_info *fun,
3297 	       struct bfd_link_info *info,
3298 	       void *param)
3299 {
3300   struct call_info **callp, *call;
3301   unsigned int depth = *(unsigned int *) param;
3302   unsigned int max_depth = depth;
3303 
3304   fun->depth = depth;
3305   fun->visit2 = TRUE;
3306   fun->marking = TRUE;
3307 
3308   callp = &fun->call_list;
3309   while ((call = *callp) != NULL)
3310     {
3311       call->max_depth = depth + !call->is_pasted;
3312       if (!call->fun->visit2)
3313 	{
3314 	  if (!remove_cycles (call->fun, info, &call->max_depth))
3315 	    return FALSE;
3316 	  if (max_depth < call->max_depth)
3317 	    max_depth = call->max_depth;
3318 	}
3319       else if (call->fun->marking)
3320 	{
3321 	  struct spu_link_hash_table *htab = spu_hash_table (info);
3322 
3323 	  if (!htab->params->auto_overlay
3324 	      && htab->params->stack_analysis)
3325 	    {
3326 	      const char *f1 = func_name (fun);
3327 	      const char *f2 = func_name (call->fun);
3328 
3329 	      /* xgettext:c-format */
3330 	      info->callbacks->info (_("stack analysis will ignore the call "
3331 				       "from %s to %s\n"),
3332 				     f1, f2);
3333 	    }
3334 
3335 	  call->broken_cycle = TRUE;
3336 	}
3337       callp = &call->next;
3338     }
3339   fun->marking = FALSE;
3340   *(unsigned int *) param = max_depth;
3341   return TRUE;
3342 }
3343 
3344 /* Check that we actually visited all nodes in remove_cycles.  If we
3345    didn't, then there is some cycle in the call graph not attached to
3346    any root node.  Arbitrarily choose a node in the cycle as a new
3347    root and break the cycle.  */
3348 
3349 static bfd_boolean
3350 mark_detached_root (struct function_info *fun,
3351 		    struct bfd_link_info *info,
3352 		    void *param)
3353 {
3354   if (fun->visit2)
3355     return TRUE;
3356   fun->non_root = FALSE;
3357   *(unsigned int *) param = 0;
3358   return remove_cycles (fun, info, param);
3359 }
3360 
3361 /* Populate call_list for each function.  */
3362 
3363 static bfd_boolean
3364 build_call_tree (struct bfd_link_info *info)
3365 {
3366   bfd *ibfd;
3367   unsigned int depth;
3368 
3369   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3370     {
3371       extern const bfd_target spu_elf32_vec;
3372       asection *sec;
3373 
3374       if (ibfd->xvec != &spu_elf32_vec)
3375 	continue;
3376 
3377       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3378 	if (!mark_functions_via_relocs (sec, info, TRUE))
3379 	  return FALSE;
3380     }
3381 
3382   /* Transfer call info from hot/cold section part of function
3383      to main entry.  */
3384   if (!spu_hash_table (info)->params->auto_overlay
3385       && !for_each_node (transfer_calls, info, 0, FALSE))
3386     return FALSE;
3387 
3388   /* Find the call graph root(s).  */
3389   if (!for_each_node (mark_non_root, info, 0, FALSE))
3390     return FALSE;
3391 
3392   /* Remove cycles from the call graph.  We start from the root node(s)
3393      so that we break cycles in a reasonable place.  */
3394   depth = 0;
3395   if (!for_each_node (remove_cycles, info, &depth, TRUE))
3396     return FALSE;
3397 
3398   return for_each_node (mark_detached_root, info, &depth, FALSE);
3399 }
3400 
3401 /* qsort predicate to sort calls by priority, max_depth then count.  */
3402 
3403 static int
3404 sort_calls (const void *a, const void *b)
3405 {
3406   struct call_info *const *c1 = a;
3407   struct call_info *const *c2 = b;
3408   int delta;
3409 
3410   delta = (*c2)->priority - (*c1)->priority;
3411   if (delta != 0)
3412     return delta;
3413 
3414   delta = (*c2)->max_depth - (*c1)->max_depth;
3415   if (delta != 0)
3416     return delta;
3417 
3418   delta = (*c2)->count - (*c1)->count;
3419   if (delta != 0)
3420     return delta;
3421 
3422   return (char *) c1 - (char *) c2;
3423 }
3424 
3425 struct _mos_param {
3426   unsigned int max_overlay_size;
3427 };
3428 
3429 /* Set linker_mark and gc_mark on any sections that we will put in
3430    overlays.  These flags are used by the generic ELF linker, but we
3431    won't be continuing on to bfd_elf_final_link so it is OK to use
3432    them.  linker_mark is clear before we get here.  Set segment_mark
3433    on sections that are part of a pasted function (excluding the last
3434    section).
3435 
3436    Set up function rodata section if --overlay-rodata.  We don't
3437    currently include merged string constant rodata sections since
3438 
3439    Sort the call graph so that the deepest nodes will be visited
3440    first.  */
3441 
3442 static bfd_boolean
3443 mark_overlay_section (struct function_info *fun,
3444 		      struct bfd_link_info *info,
3445 		      void *param)
3446 {
3447   struct call_info *call;
3448   unsigned int count;
3449   struct _mos_param *mos_param = param;
3450   struct spu_link_hash_table *htab = spu_hash_table (info);
3451 
3452   if (fun->visit4)
3453     return TRUE;
3454 
3455   fun->visit4 = TRUE;
3456   if (!fun->sec->linker_mark
3457       && (htab->params->ovly_flavour != ovly_soft_icache
3458 	  || htab->params->non_ia_text
3459 	  || strncmp (fun->sec->name, ".text.ia.", 9) == 0
3460 	  || strcmp (fun->sec->name, ".init") == 0
3461 	  || strcmp (fun->sec->name, ".fini") == 0))
3462     {
3463       unsigned int size;
3464 
3465       fun->sec->linker_mark = 1;
3466       fun->sec->gc_mark = 1;
3467       fun->sec->segment_mark = 0;
3468       /* Ensure SEC_CODE is set on this text section (it ought to
3469 	 be!), and SEC_CODE is clear on rodata sections.  We use
3470 	 this flag to differentiate the two overlay section types.  */
3471       fun->sec->flags |= SEC_CODE;
3472 
3473       size = fun->sec->size;
3474       if (htab->params->auto_overlay & OVERLAY_RODATA)
3475 	{
3476 	  char *name = NULL;
3477 
3478 	  /* Find the rodata section corresponding to this function's
3479 	     text section.  */
3480 	  if (strcmp (fun->sec->name, ".text") == 0)
3481 	    {
3482 	      name = bfd_malloc (sizeof (".rodata"));
3483 	      if (name == NULL)
3484 		return FALSE;
3485 	      memcpy (name, ".rodata", sizeof (".rodata"));
3486 	    }
3487 	  else if (strncmp (fun->sec->name, ".text.", 6) == 0)
3488 	    {
3489 	      size_t len = strlen (fun->sec->name);
3490 	      name = bfd_malloc (len + 3);
3491 	      if (name == NULL)
3492 		return FALSE;
3493 	      memcpy (name, ".rodata", sizeof (".rodata"));
3494 	      memcpy (name + 7, fun->sec->name + 5, len - 4);
3495 	    }
3496 	  else if (strncmp (fun->sec->name, ".gnu.linkonce.t.", 16) == 0)
3497 	    {
3498 	      size_t len = strlen (fun->sec->name) + 1;
3499 	      name = bfd_malloc (len);
3500 	      if (name == NULL)
3501 		return FALSE;
3502 	      memcpy (name, fun->sec->name, len);
3503 	      name[14] = 'r';
3504 	    }
3505 
3506 	  if (name != NULL)
3507 	    {
3508 	      asection *rodata = NULL;
3509 	      asection *group_sec = elf_section_data (fun->sec)->next_in_group;
3510 	      if (group_sec == NULL)
3511 		rodata = bfd_get_section_by_name (fun->sec->owner, name);
3512 	      else
3513 		while (group_sec != NULL && group_sec != fun->sec)
3514 		  {
3515 		    if (strcmp (group_sec->name, name) == 0)
3516 		      {
3517 			rodata = group_sec;
3518 			break;
3519 		      }
3520 		    group_sec = elf_section_data (group_sec)->next_in_group;
3521 		  }
3522 	      fun->rodata = rodata;
3523 	      if (fun->rodata)
3524 		{
3525 		  size += fun->rodata->size;
3526 		  if (htab->params->line_size != 0
3527 		      && size > htab->params->line_size)
3528 		    {
3529 		      size -= fun->rodata->size;
3530 		      fun->rodata = NULL;
3531 		    }
3532 		  else
3533 		    {
3534 		      fun->rodata->linker_mark = 1;
3535 		      fun->rodata->gc_mark = 1;
3536 		      fun->rodata->flags &= ~SEC_CODE;
3537 		    }
3538 		}
3539 	      free (name);
3540 	    }
3541 	}
3542       if (mos_param->max_overlay_size < size)
3543 	mos_param->max_overlay_size = size;
3544     }
3545 
3546   for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3547     count += 1;
3548 
3549   if (count > 1)
3550     {
3551       struct call_info **calls = bfd_malloc (count * sizeof (*calls));
3552       if (calls == NULL)
3553 	return FALSE;
3554 
3555       for (count = 0, call = fun->call_list; call != NULL; call = call->next)
3556 	calls[count++] = call;
3557 
3558       qsort (calls, count, sizeof (*calls), sort_calls);
3559 
3560       fun->call_list = NULL;
3561       while (count != 0)
3562 	{
3563 	  --count;
3564 	  calls[count]->next = fun->call_list;
3565 	  fun->call_list = calls[count];
3566 	}
3567       free (calls);
3568     }
3569 
3570   for (call = fun->call_list; call != NULL; call = call->next)
3571     {
3572       if (call->is_pasted)
3573 	{
3574 	  /* There can only be one is_pasted call per function_info.  */
3575 	  BFD_ASSERT (!fun->sec->segment_mark);
3576 	  fun->sec->segment_mark = 1;
3577 	}
3578       if (!call->broken_cycle
3579 	  && !mark_overlay_section (call->fun, info, param))
3580 	return FALSE;
3581     }
3582 
3583   /* Don't put entry code into an overlay.  The overlay manager needs
3584      a stack!  Also, don't mark .ovl.init as an overlay.  */
3585   if (fun->lo + fun->sec->output_offset + fun->sec->output_section->vma
3586       == info->output_bfd->start_address
3587       || strncmp (fun->sec->output_section->name, ".ovl.init", 9) == 0)
3588     {
3589       fun->sec->linker_mark = 0;
3590       if (fun->rodata != NULL)
3591 	fun->rodata->linker_mark = 0;
3592     }
3593   return TRUE;
3594 }
3595 
3596 /* If non-zero then unmark functions called from those within sections
3597    that we need to unmark.  Unfortunately this isn't reliable since the
3598    call graph cannot know the destination of function pointer calls.  */
3599 #define RECURSE_UNMARK 0
3600 
3601 struct _uos_param {
3602   asection *exclude_input_section;
3603   asection *exclude_output_section;
3604   unsigned long clearing;
3605 };
3606 
3607 /* Undo some of mark_overlay_section's work.  */
3608 
3609 static bfd_boolean
3610 unmark_overlay_section (struct function_info *fun,
3611 			struct bfd_link_info *info,
3612 			void *param)
3613 {
3614   struct call_info *call;
3615   struct _uos_param *uos_param = param;
3616   unsigned int excluded = 0;
3617 
3618   if (fun->visit5)
3619     return TRUE;
3620 
3621   fun->visit5 = TRUE;
3622 
3623   excluded = 0;
3624   if (fun->sec == uos_param->exclude_input_section
3625       || fun->sec->output_section == uos_param->exclude_output_section)
3626     excluded = 1;
3627 
3628   if (RECURSE_UNMARK)
3629     uos_param->clearing += excluded;
3630 
3631   if (RECURSE_UNMARK ? uos_param->clearing : excluded)
3632     {
3633       fun->sec->linker_mark = 0;
3634       if (fun->rodata)
3635 	fun->rodata->linker_mark = 0;
3636     }
3637 
3638   for (call = fun->call_list; call != NULL; call = call->next)
3639     if (!call->broken_cycle
3640 	&& !unmark_overlay_section (call->fun, info, param))
3641       return FALSE;
3642 
3643   if (RECURSE_UNMARK)
3644     uos_param->clearing -= excluded;
3645   return TRUE;
3646 }
3647 
3648 struct _cl_param {
3649   unsigned int lib_size;
3650   asection **lib_sections;
3651 };
3652 
3653 /* Add sections we have marked as belonging to overlays to an array
3654    for consideration as non-overlay sections.  The array consist of
3655    pairs of sections, (text,rodata), for functions in the call graph.  */
3656 
3657 static bfd_boolean
3658 collect_lib_sections (struct function_info *fun,
3659 		      struct bfd_link_info *info,
3660 		      void *param)
3661 {
3662   struct _cl_param *lib_param = param;
3663   struct call_info *call;
3664   unsigned int size;
3665 
3666   if (fun->visit6)
3667     return TRUE;
3668 
3669   fun->visit6 = TRUE;
3670   if (!fun->sec->linker_mark || !fun->sec->gc_mark || fun->sec->segment_mark)
3671     return TRUE;
3672 
3673   size = fun->sec->size;
3674   if (fun->rodata)
3675     size += fun->rodata->size;
3676 
3677   if (size <= lib_param->lib_size)
3678     {
3679       *lib_param->lib_sections++ = fun->sec;
3680       fun->sec->gc_mark = 0;
3681       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3682 	{
3683 	  *lib_param->lib_sections++ = fun->rodata;
3684 	  fun->rodata->gc_mark = 0;
3685 	}
3686       else
3687 	*lib_param->lib_sections++ = NULL;
3688     }
3689 
3690   for (call = fun->call_list; call != NULL; call = call->next)
3691     if (!call->broken_cycle)
3692       collect_lib_sections (call->fun, info, param);
3693 
3694   return TRUE;
3695 }
3696 
3697 /* qsort predicate to sort sections by call count.  */
3698 
3699 static int
3700 sort_lib (const void *a, const void *b)
3701 {
3702   asection *const *s1 = a;
3703   asection *const *s2 = b;
3704   struct _spu_elf_section_data *sec_data;
3705   struct spu_elf_stack_info *sinfo;
3706   int delta;
3707 
3708   delta = 0;
3709   if ((sec_data = spu_elf_section_data (*s1)) != NULL
3710       && (sinfo = sec_data->u.i.stack_info) != NULL)
3711     {
3712       int i;
3713       for (i = 0; i < sinfo->num_fun; ++i)
3714 	delta -= sinfo->fun[i].call_count;
3715     }
3716 
3717   if ((sec_data = spu_elf_section_data (*s2)) != NULL
3718       && (sinfo = sec_data->u.i.stack_info) != NULL)
3719     {
3720       int i;
3721       for (i = 0; i < sinfo->num_fun; ++i)
3722 	delta += sinfo->fun[i].call_count;
3723     }
3724 
3725   if (delta != 0)
3726     return delta;
3727 
3728   return s1 - s2;
3729 }
3730 
3731 /* Remove some sections from those marked to be in overlays.  Choose
3732    those that are called from many places, likely library functions.  */
3733 
3734 static unsigned int
3735 auto_ovl_lib_functions (struct bfd_link_info *info, unsigned int lib_size)
3736 {
3737   bfd *ibfd;
3738   asection **lib_sections;
3739   unsigned int i, lib_count;
3740   struct _cl_param collect_lib_param;
3741   struct function_info dummy_caller;
3742   struct spu_link_hash_table *htab;
3743 
3744   memset (&dummy_caller, 0, sizeof (dummy_caller));
3745   lib_count = 0;
3746   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
3747     {
3748       extern const bfd_target spu_elf32_vec;
3749       asection *sec;
3750 
3751       if (ibfd->xvec != &spu_elf32_vec)
3752 	continue;
3753 
3754       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
3755 	if (sec->linker_mark
3756 	    && sec->size < lib_size
3757 	    && (sec->flags & SEC_CODE) != 0)
3758 	  lib_count += 1;
3759     }
3760   lib_sections = bfd_malloc (lib_count * 2 * sizeof (*lib_sections));
3761   if (lib_sections == NULL)
3762     return (unsigned int) -1;
3763   collect_lib_param.lib_size = lib_size;
3764   collect_lib_param.lib_sections = lib_sections;
3765   if (!for_each_node (collect_lib_sections, info, &collect_lib_param,
3766 		      TRUE))
3767     return (unsigned int) -1;
3768   lib_count = (collect_lib_param.lib_sections - lib_sections) / 2;
3769 
3770   /* Sort sections so that those with the most calls are first.  */
3771   if (lib_count > 1)
3772     qsort (lib_sections, lib_count, 2 * sizeof (*lib_sections), sort_lib);
3773 
3774   htab = spu_hash_table (info);
3775   for (i = 0; i < lib_count; i++)
3776     {
3777       unsigned int tmp, stub_size;
3778       asection *sec;
3779       struct _spu_elf_section_data *sec_data;
3780       struct spu_elf_stack_info *sinfo;
3781 
3782       sec = lib_sections[2 * i];
3783       /* If this section is OK, its size must be less than lib_size.  */
3784       tmp = sec->size;
3785       /* If it has a rodata section, then add that too.  */
3786       if (lib_sections[2 * i + 1])
3787 	tmp += lib_sections[2 * i + 1]->size;
3788       /* Add any new overlay call stubs needed by the section.  */
3789       stub_size = 0;
3790       if (tmp < lib_size
3791 	  && (sec_data = spu_elf_section_data (sec)) != NULL
3792 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
3793 	{
3794 	  int k;
3795 	  struct call_info *call;
3796 
3797 	  for (k = 0; k < sinfo->num_fun; ++k)
3798 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
3799 	      if (call->fun->sec->linker_mark)
3800 		{
3801 		  struct call_info *p;
3802 		  for (p = dummy_caller.call_list; p; p = p->next)
3803 		    if (p->fun == call->fun)
3804 		      break;
3805 		  if (!p)
3806 		    stub_size += ovl_stub_size (htab->params);
3807 		}
3808 	}
3809       if (tmp + stub_size < lib_size)
3810 	{
3811 	  struct call_info **pp, *p;
3812 
3813 	  /* This section fits.  Mark it as non-overlay.  */
3814 	  lib_sections[2 * i]->linker_mark = 0;
3815 	  if (lib_sections[2 * i + 1])
3816 	    lib_sections[2 * i + 1]->linker_mark = 0;
3817 	  lib_size -= tmp + stub_size;
3818 	  /* Call stubs to the section we just added are no longer
3819 	     needed.  */
3820 	  pp = &dummy_caller.call_list;
3821 	  while ((p = *pp) != NULL)
3822 	    if (!p->fun->sec->linker_mark)
3823 	      {
3824 		lib_size += ovl_stub_size (htab->params);
3825 		*pp = p->next;
3826 		free (p);
3827 	      }
3828 	    else
3829 	      pp = &p->next;
3830 	  /* Add new call stubs to dummy_caller.  */
3831 	  if ((sec_data = spu_elf_section_data (sec)) != NULL
3832 	      && (sinfo = sec_data->u.i.stack_info) != NULL)
3833 	    {
3834 	      int k;
3835 	      struct call_info *call;
3836 
3837 	      for (k = 0; k < sinfo->num_fun; ++k)
3838 		for (call = sinfo->fun[k].call_list;
3839 		     call;
3840 		     call = call->next)
3841 		  if (call->fun->sec->linker_mark)
3842 		    {
3843 		      struct call_info *callee;
3844 		      callee = bfd_malloc (sizeof (*callee));
3845 		      if (callee == NULL)
3846 			return (unsigned int) -1;
3847 		      *callee = *call;
3848 		      if (!insert_callee (&dummy_caller, callee))
3849 			free (callee);
3850 		    }
3851 	    }
3852 	}
3853     }
3854   while (dummy_caller.call_list != NULL)
3855     {
3856       struct call_info *call = dummy_caller.call_list;
3857       dummy_caller.call_list = call->next;
3858       free (call);
3859     }
3860   for (i = 0; i < 2 * lib_count; i++)
3861     if (lib_sections[i])
3862       lib_sections[i]->gc_mark = 1;
3863   free (lib_sections);
3864   return lib_size;
3865 }
3866 
3867 /* Build an array of overlay sections.  The deepest node's section is
3868    added first, then its parent node's section, then everything called
3869    from the parent section.  The idea being to group sections to
3870    minimise calls between different overlays.  */
3871 
3872 static bfd_boolean
3873 collect_overlays (struct function_info *fun,
3874 		  struct bfd_link_info *info,
3875 		  void *param)
3876 {
3877   struct call_info *call;
3878   bfd_boolean added_fun;
3879   asection ***ovly_sections = param;
3880 
3881   if (fun->visit7)
3882     return TRUE;
3883 
3884   fun->visit7 = TRUE;
3885   for (call = fun->call_list; call != NULL; call = call->next)
3886     if (!call->is_pasted && !call->broken_cycle)
3887       {
3888 	if (!collect_overlays (call->fun, info, ovly_sections))
3889 	  return FALSE;
3890 	break;
3891       }
3892 
3893   added_fun = FALSE;
3894   if (fun->sec->linker_mark && fun->sec->gc_mark)
3895     {
3896       fun->sec->gc_mark = 0;
3897       *(*ovly_sections)++ = fun->sec;
3898       if (fun->rodata && fun->rodata->linker_mark && fun->rodata->gc_mark)
3899 	{
3900 	  fun->rodata->gc_mark = 0;
3901 	  *(*ovly_sections)++ = fun->rodata;
3902 	}
3903       else
3904 	*(*ovly_sections)++ = NULL;
3905       added_fun = TRUE;
3906 
3907       /* Pasted sections must stay with the first section.  We don't
3908 	 put pasted sections in the array, just the first section.
3909 	 Mark subsequent sections as already considered.  */
3910       if (fun->sec->segment_mark)
3911 	{
3912 	  struct function_info *call_fun = fun;
3913 	  do
3914 	    {
3915 	      for (call = call_fun->call_list; call != NULL; call = call->next)
3916 		if (call->is_pasted)
3917 		  {
3918 		    call_fun = call->fun;
3919 		    call_fun->sec->gc_mark = 0;
3920 		    if (call_fun->rodata)
3921 		      call_fun->rodata->gc_mark = 0;
3922 		    break;
3923 		  }
3924 	      if (call == NULL)
3925 		abort ();
3926 	    }
3927 	  while (call_fun->sec->segment_mark);
3928 	}
3929     }
3930 
3931   for (call = fun->call_list; call != NULL; call = call->next)
3932     if (!call->broken_cycle
3933 	&& !collect_overlays (call->fun, info, ovly_sections))
3934       return FALSE;
3935 
3936   if (added_fun)
3937     {
3938       struct _spu_elf_section_data *sec_data;
3939       struct spu_elf_stack_info *sinfo;
3940 
3941       if ((sec_data = spu_elf_section_data (fun->sec)) != NULL
3942 	  && (sinfo = sec_data->u.i.stack_info) != NULL)
3943 	{
3944 	  int i;
3945 	  for (i = 0; i < sinfo->num_fun; ++i)
3946 	    if (!collect_overlays (&sinfo->fun[i], info, ovly_sections))
3947 	      return FALSE;
3948 	}
3949     }
3950 
3951   return TRUE;
3952 }
3953 
3954 struct _sum_stack_param {
3955   size_t cum_stack;
3956   size_t overall_stack;
3957   bfd_boolean emit_stack_syms;
3958 };
3959 
3960 /* Descend the call graph for FUN, accumulating total stack required.  */
3961 
3962 static bfd_boolean
3963 sum_stack (struct function_info *fun,
3964 	   struct bfd_link_info *info,
3965 	   void *param)
3966 {
3967   struct call_info *call;
3968   struct function_info *max;
3969   size_t stack, cum_stack;
3970   const char *f1;
3971   bfd_boolean has_call;
3972   struct _sum_stack_param *sum_stack_param = param;
3973   struct spu_link_hash_table *htab;
3974 
3975   cum_stack = fun->stack;
3976   sum_stack_param->cum_stack = cum_stack;
3977   if (fun->visit3)
3978     return TRUE;
3979 
3980   has_call = FALSE;
3981   max = NULL;
3982   for (call = fun->call_list; call; call = call->next)
3983     {
3984       if (call->broken_cycle)
3985 	continue;
3986       if (!call->is_pasted)
3987 	has_call = TRUE;
3988       if (!sum_stack (call->fun, info, sum_stack_param))
3989 	return FALSE;
3990       stack = sum_stack_param->cum_stack;
3991       /* Include caller stack for normal calls, don't do so for
3992 	 tail calls.  fun->stack here is local stack usage for
3993 	 this function.  */
3994       if (!call->is_tail || call->is_pasted || call->fun->start != NULL)
3995 	stack += fun->stack;
3996       if (cum_stack < stack)
3997 	{
3998 	  cum_stack = stack;
3999 	  max = call->fun;
4000 	}
4001     }
4002 
4003   sum_stack_param->cum_stack = cum_stack;
4004   stack = fun->stack;
4005   /* Now fun->stack holds cumulative stack.  */
4006   fun->stack = cum_stack;
4007   fun->visit3 = TRUE;
4008 
4009   if (!fun->non_root
4010       && sum_stack_param->overall_stack < cum_stack)
4011     sum_stack_param->overall_stack = cum_stack;
4012 
4013   htab = spu_hash_table (info);
4014   if (htab->params->auto_overlay)
4015     return TRUE;
4016 
4017   f1 = func_name (fun);
4018   if (htab->params->stack_analysis)
4019     {
4020       if (!fun->non_root)
4021 	info->callbacks->info ("  %s: 0x%v\n", f1, (bfd_vma) cum_stack);
4022       info->callbacks->minfo ("%s: 0x%v 0x%v\n",
4023 			      f1, (bfd_vma) stack, (bfd_vma) cum_stack);
4024 
4025       if (has_call)
4026 	{
4027 	  info->callbacks->minfo (_("  calls:\n"));
4028 	  for (call = fun->call_list; call; call = call->next)
4029 	    if (!call->is_pasted && !call->broken_cycle)
4030 	      {
4031 		const char *f2 = func_name (call->fun);
4032 		const char *ann1 = call->fun == max ? "*" : " ";
4033 		const char *ann2 = call->is_tail ? "t" : " ";
4034 
4035 		info->callbacks->minfo ("   %s%s %s\n", ann1, ann2, f2);
4036 	      }
4037 	}
4038     }
4039 
4040   if (sum_stack_param->emit_stack_syms)
4041     {
4042       char *name = bfd_malloc (18 + strlen (f1));
4043       struct elf_link_hash_entry *h;
4044 
4045       if (name == NULL)
4046 	return FALSE;
4047 
4048       if (fun->global || ELF_ST_BIND (fun->u.sym->st_info) == STB_GLOBAL)
4049 	sprintf (name, "__stack_%s", f1);
4050       else
4051 	sprintf (name, "__stack_%x_%s", fun->sec->id & 0xffffffff, f1);
4052 
4053       h = elf_link_hash_lookup (&htab->elf, name, TRUE, TRUE, FALSE);
4054       free (name);
4055       if (h != NULL
4056 	  && (h->root.type == bfd_link_hash_new
4057 	      || h->root.type == bfd_link_hash_undefined
4058 	      || h->root.type == bfd_link_hash_undefweak))
4059 	{
4060 	  h->root.type = bfd_link_hash_defined;
4061 	  h->root.u.def.section = bfd_abs_section_ptr;
4062 	  h->root.u.def.value = cum_stack;
4063 	  h->size = 0;
4064 	  h->type = 0;
4065 	  h->ref_regular = 1;
4066 	  h->def_regular = 1;
4067 	  h->ref_regular_nonweak = 1;
4068 	  h->forced_local = 1;
4069 	  h->non_elf = 0;
4070 	}
4071     }
4072 
4073   return TRUE;
4074 }
4075 
4076 /* SEC is part of a pasted function.  Return the call_info for the
4077    next section of this function.  */
4078 
4079 static struct call_info *
4080 find_pasted_call (asection *sec)
4081 {
4082   struct _spu_elf_section_data *sec_data = spu_elf_section_data (sec);
4083   struct spu_elf_stack_info *sinfo = sec_data->u.i.stack_info;
4084   struct call_info *call;
4085   int k;
4086 
4087   for (k = 0; k < sinfo->num_fun; ++k)
4088     for (call = sinfo->fun[k].call_list; call != NULL; call = call->next)
4089       if (call->is_pasted)
4090 	return call;
4091   abort ();
4092   return 0;
4093 }
4094 
4095 /* qsort predicate to sort bfds by file name.  */
4096 
4097 static int
4098 sort_bfds (const void *a, const void *b)
4099 {
4100   bfd *const *abfd1 = a;
4101   bfd *const *abfd2 = b;
4102 
4103   return filename_cmp ((*abfd1)->filename, (*abfd2)->filename);
4104 }
4105 
4106 static unsigned int
4107 print_one_overlay_section (FILE *script,
4108 			   unsigned int base,
4109 			   unsigned int count,
4110 			   unsigned int ovlynum,
4111 			   unsigned int *ovly_map,
4112 			   asection **ovly_sections,
4113 			   struct bfd_link_info *info)
4114 {
4115   unsigned int j;
4116 
4117   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4118     {
4119       asection *sec = ovly_sections[2 * j];
4120 
4121       if (fprintf (script, "   %s%c%s (%s)\n",
4122 		   (sec->owner->my_archive != NULL
4123 		    ? sec->owner->my_archive->filename : ""),
4124 		   info->path_separator,
4125 		   sec->owner->filename,
4126 		   sec->name) <= 0)
4127 	return -1;
4128       if (sec->segment_mark)
4129 	{
4130 	  struct call_info *call = find_pasted_call (sec);
4131 	  while (call != NULL)
4132 	    {
4133 	      struct function_info *call_fun = call->fun;
4134 	      sec = call_fun->sec;
4135 	      if (fprintf (script, "   %s%c%s (%s)\n",
4136 			   (sec->owner->my_archive != NULL
4137 			    ? sec->owner->my_archive->filename : ""),
4138 			   info->path_separator,
4139 			   sec->owner->filename,
4140 			   sec->name) <= 0)
4141 		return -1;
4142 	      for (call = call_fun->call_list; call; call = call->next)
4143 		if (call->is_pasted)
4144 		  break;
4145 	    }
4146 	}
4147     }
4148 
4149   for (j = base; j < count && ovly_map[j] == ovlynum; j++)
4150     {
4151       asection *sec = ovly_sections[2 * j + 1];
4152       if (sec != NULL
4153 	  && fprintf (script, "   %s%c%s (%s)\n",
4154 		      (sec->owner->my_archive != NULL
4155 		       ? sec->owner->my_archive->filename : ""),
4156 		      info->path_separator,
4157 		      sec->owner->filename,
4158 		      sec->name) <= 0)
4159 	return -1;
4160 
4161       sec = ovly_sections[2 * j];
4162       if (sec->segment_mark)
4163 	{
4164 	  struct call_info *call = find_pasted_call (sec);
4165 	  while (call != NULL)
4166 	    {
4167 	      struct function_info *call_fun = call->fun;
4168 	      sec = call_fun->rodata;
4169 	      if (sec != NULL
4170 		  && fprintf (script, "   %s%c%s (%s)\n",
4171 			      (sec->owner->my_archive != NULL
4172 			       ? sec->owner->my_archive->filename : ""),
4173 			      info->path_separator,
4174 			      sec->owner->filename,
4175 			      sec->name) <= 0)
4176 		return -1;
4177 	      for (call = call_fun->call_list; call; call = call->next)
4178 		if (call->is_pasted)
4179 		  break;
4180 	    }
4181 	}
4182     }
4183 
4184   return j;
4185 }
4186 
4187 /* Handle --auto-overlay.  */
4188 
4189 static void
4190 spu_elf_auto_overlay (struct bfd_link_info *info)
4191 {
4192   bfd *ibfd;
4193   bfd **bfd_arr;
4194   struct elf_segment_map *m;
4195   unsigned int fixed_size, lo, hi;
4196   unsigned int reserved;
4197   struct spu_link_hash_table *htab;
4198   unsigned int base, i, count, bfd_count;
4199   unsigned int region, ovlynum;
4200   asection **ovly_sections, **ovly_p;
4201   unsigned int *ovly_map;
4202   FILE *script;
4203   unsigned int total_overlay_size, overlay_size;
4204   const char *ovly_mgr_entry;
4205   struct elf_link_hash_entry *h;
4206   struct _mos_param mos_param;
4207   struct _uos_param uos_param;
4208   struct function_info dummy_caller;
4209 
4210   /* Find the extents of our loadable image.  */
4211   lo = (unsigned int) -1;
4212   hi = 0;
4213   for (m = elf_seg_map (info->output_bfd); m != NULL; m = m->next)
4214     if (m->p_type == PT_LOAD)
4215       for (i = 0; i < m->count; i++)
4216 	if (m->sections[i]->size != 0)
4217 	  {
4218 	    if (m->sections[i]->vma < lo)
4219 	      lo = m->sections[i]->vma;
4220 	    if (m->sections[i]->vma + m->sections[i]->size - 1 > hi)
4221 	      hi = m->sections[i]->vma + m->sections[i]->size - 1;
4222 	  }
4223   fixed_size = hi + 1 - lo;
4224 
4225   if (!discover_functions (info))
4226     goto err_exit;
4227 
4228   if (!build_call_tree (info))
4229     goto err_exit;
4230 
4231   htab = spu_hash_table (info);
4232   reserved = htab->params->auto_overlay_reserved;
4233   if (reserved == 0)
4234     {
4235       struct _sum_stack_param sum_stack_param;
4236 
4237       sum_stack_param.emit_stack_syms = 0;
4238       sum_stack_param.overall_stack = 0;
4239       if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4240 	goto err_exit;
4241       reserved = (sum_stack_param.overall_stack
4242 		  + htab->params->extra_stack_space);
4243     }
4244 
4245   /* No need for overlays if everything already fits.  */
4246   if (fixed_size + reserved <= htab->local_store
4247       && htab->params->ovly_flavour != ovly_soft_icache)
4248     {
4249       htab->params->auto_overlay = 0;
4250       return;
4251     }
4252 
4253   uos_param.exclude_input_section = 0;
4254   uos_param.exclude_output_section
4255     = bfd_get_section_by_name (info->output_bfd, ".interrupt");
4256 
4257   ovly_mgr_entry = "__ovly_load";
4258   if (htab->params->ovly_flavour == ovly_soft_icache)
4259     ovly_mgr_entry = "__icache_br_handler";
4260   h = elf_link_hash_lookup (&htab->elf, ovly_mgr_entry,
4261 			    FALSE, FALSE, FALSE);
4262   if (h != NULL
4263       && (h->root.type == bfd_link_hash_defined
4264 	  || h->root.type == bfd_link_hash_defweak)
4265       && h->def_regular)
4266     {
4267       /* We have a user supplied overlay manager.  */
4268       uos_param.exclude_input_section = h->root.u.def.section;
4269     }
4270   else
4271     {
4272       /* If no user overlay manager, spu_elf_load_ovl_mgr will add our
4273 	 builtin version to .text, and will adjust .text size.  */
4274       fixed_size += (*htab->params->spu_elf_load_ovl_mgr) ();
4275     }
4276 
4277   /* Mark overlay sections, and find max overlay section size.  */
4278   mos_param.max_overlay_size = 0;
4279   if (!for_each_node (mark_overlay_section, info, &mos_param, TRUE))
4280     goto err_exit;
4281 
4282   /* We can't put the overlay manager or interrupt routines in
4283      overlays.  */
4284   uos_param.clearing = 0;
4285   if ((uos_param.exclude_input_section
4286        || uos_param.exclude_output_section)
4287       && !for_each_node (unmark_overlay_section, info, &uos_param, TRUE))
4288     goto err_exit;
4289 
4290   bfd_count = 0;
4291   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4292     ++bfd_count;
4293   bfd_arr = bfd_malloc (bfd_count * sizeof (*bfd_arr));
4294   if (bfd_arr == NULL)
4295     goto err_exit;
4296 
4297   /* Count overlay sections, and subtract their sizes from "fixed_size".  */
4298   count = 0;
4299   bfd_count = 0;
4300   total_overlay_size = 0;
4301   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
4302     {
4303       extern const bfd_target spu_elf32_vec;
4304       asection *sec;
4305       unsigned int old_count;
4306 
4307       if (ibfd->xvec != &spu_elf32_vec)
4308 	continue;
4309 
4310       old_count = count;
4311       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
4312 	if (sec->linker_mark)
4313 	  {
4314 	    if ((sec->flags & SEC_CODE) != 0)
4315 	      count += 1;
4316 	    fixed_size -= sec->size;
4317 	    total_overlay_size += sec->size;
4318 	  }
4319 	else if ((sec->flags & (SEC_ALLOC | SEC_LOAD)) == (SEC_ALLOC | SEC_LOAD)
4320 		 && sec->output_section->owner == info->output_bfd
4321 		 && strncmp (sec->output_section->name, ".ovl.init", 9) == 0)
4322 	  fixed_size -= sec->size;
4323       if (count != old_count)
4324 	bfd_arr[bfd_count++] = ibfd;
4325     }
4326 
4327   /* Since the overlay link script selects sections by file name and
4328      section name, ensure that file names are unique.  */
4329   if (bfd_count > 1)
4330     {
4331       bfd_boolean ok = TRUE;
4332 
4333       qsort (bfd_arr, bfd_count, sizeof (*bfd_arr), sort_bfds);
4334       for (i = 1; i < bfd_count; ++i)
4335 	if (filename_cmp (bfd_arr[i - 1]->filename, bfd_arr[i]->filename) == 0)
4336 	  {
4337 	    if (bfd_arr[i - 1]->my_archive == bfd_arr[i]->my_archive)
4338 	      {
4339 		if (bfd_arr[i - 1]->my_archive && bfd_arr[i]->my_archive)
4340 		  /* xgettext:c-format */
4341 		  info->callbacks->einfo (_("%s duplicated in %s\n"),
4342 					  bfd_arr[i]->filename,
4343 					  bfd_arr[i]->my_archive->filename);
4344 		else
4345 		  info->callbacks->einfo (_("%s duplicated\n"),
4346 					  bfd_arr[i]->filename);
4347 		ok = FALSE;
4348 	      }
4349 	  }
4350       if (!ok)
4351 	{
4352 	  info->callbacks->einfo (_("sorry, no support for duplicate "
4353 				    "object files in auto-overlay script\n"));
4354 	  bfd_set_error (bfd_error_bad_value);
4355 	  goto err_exit;
4356 	}
4357     }
4358   free (bfd_arr);
4359 
4360   fixed_size += reserved;
4361   fixed_size += htab->non_ovly_stub * ovl_stub_size (htab->params);
4362   if (fixed_size + mos_param.max_overlay_size <= htab->local_store)
4363     {
4364       if (htab->params->ovly_flavour == ovly_soft_icache)
4365 	{
4366 	  /* Stubs in the non-icache area are bigger.  */
4367 	  fixed_size += htab->non_ovly_stub * 16;
4368 	  /* Space for icache manager tables.
4369 	     a) Tag array, one quadword per cache line.
4370 	     - word 0: ia address of present line, init to zero.  */
4371 	  fixed_size += 16 << htab->num_lines_log2;
4372 	  /* b) Rewrite "to" list, one quadword per cache line.  */
4373 	  fixed_size += 16 << htab->num_lines_log2;
4374 	  /* c) Rewrite "from" list, one byte per outgoing branch (rounded up
4375 		to a power-of-two number of full quadwords) per cache line.  */
4376 	  fixed_size += 16 << (htab->fromelem_size_log2
4377 			       + htab->num_lines_log2);
4378 	  /* d) Pointer to __ea backing store (toe), 1 quadword.  */
4379 	  fixed_size += 16;
4380 	}
4381       else
4382 	{
4383 	  /* Guess number of overlays.  Assuming overlay buffer is on
4384 	     average only half full should be conservative.  */
4385 	  ovlynum = (total_overlay_size * 2 * htab->params->num_lines
4386 		     / (htab->local_store - fixed_size));
4387 	  /* Space for _ovly_table[], _ovly_buf_table[] and toe.  */
4388 	  fixed_size += ovlynum * 16 + 16 + 4 + 16;
4389 	}
4390     }
4391 
4392   if (fixed_size + mos_param.max_overlay_size > htab->local_store)
4393     /* xgettext:c-format */
4394     info->callbacks->einfo (_("non-overlay size of 0x%v plus maximum overlay "
4395 			      "size of 0x%v exceeds local store\n"),
4396 			    (bfd_vma) fixed_size,
4397 			    (bfd_vma) mos_param.max_overlay_size);
4398 
4399   /* Now see if we should put some functions in the non-overlay area.  */
4400   else if (fixed_size < htab->params->auto_overlay_fixed)
4401     {
4402       unsigned int max_fixed, lib_size;
4403 
4404       max_fixed = htab->local_store - mos_param.max_overlay_size;
4405       if (max_fixed > htab->params->auto_overlay_fixed)
4406 	max_fixed = htab->params->auto_overlay_fixed;
4407       lib_size = max_fixed - fixed_size;
4408       lib_size = auto_ovl_lib_functions (info, lib_size);
4409       if (lib_size == (unsigned int) -1)
4410 	goto err_exit;
4411       fixed_size = max_fixed - lib_size;
4412     }
4413 
4414   /* Build an array of sections, suitably sorted to place into
4415      overlays.  */
4416   ovly_sections = bfd_malloc (2 * count * sizeof (*ovly_sections));
4417   if (ovly_sections == NULL)
4418     goto err_exit;
4419   ovly_p = ovly_sections;
4420   if (!for_each_node (collect_overlays, info, &ovly_p, TRUE))
4421     goto err_exit;
4422   count = (size_t) (ovly_p - ovly_sections) / 2;
4423   ovly_map = bfd_malloc (count * sizeof (*ovly_map));
4424   if (ovly_map == NULL)
4425     goto err_exit;
4426 
4427   memset (&dummy_caller, 0, sizeof (dummy_caller));
4428   overlay_size = (htab->local_store - fixed_size) / htab->params->num_lines;
4429   if (htab->params->line_size != 0)
4430     overlay_size = htab->params->line_size;
4431   base = 0;
4432   ovlynum = 0;
4433   while (base < count)
4434     {
4435       unsigned int size = 0, rosize = 0, roalign = 0;
4436 
4437       for (i = base; i < count; i++)
4438 	{
4439 	  asection *sec, *rosec;
4440 	  unsigned int tmp, rotmp;
4441 	  unsigned int num_stubs;
4442 	  struct call_info *call, *pasty;
4443 	  struct _spu_elf_section_data *sec_data;
4444 	  struct spu_elf_stack_info *sinfo;
4445 	  unsigned int k;
4446 
4447 	  /* See whether we can add this section to the current
4448 	     overlay without overflowing our overlay buffer.  */
4449 	  sec = ovly_sections[2 * i];
4450 	  tmp = align_power (size, sec->alignment_power) + sec->size;
4451 	  rotmp = rosize;
4452 	  rosec = ovly_sections[2 * i + 1];
4453 	  if (rosec != NULL)
4454 	    {
4455 	      rotmp = align_power (rotmp, rosec->alignment_power) + rosec->size;
4456 	      if (roalign < rosec->alignment_power)
4457 		roalign = rosec->alignment_power;
4458 	    }
4459 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
4460 	    break;
4461 	  if (sec->segment_mark)
4462 	    {
4463 	      /* Pasted sections must stay together, so add their
4464 		 sizes too.  */
4465 	      pasty = find_pasted_call (sec);
4466 	      while (pasty != NULL)
4467 		{
4468 		  struct function_info *call_fun = pasty->fun;
4469 		  tmp = (align_power (tmp, call_fun->sec->alignment_power)
4470 			 + call_fun->sec->size);
4471 		  if (call_fun->rodata)
4472 		    {
4473 		      rotmp = (align_power (rotmp,
4474 					    call_fun->rodata->alignment_power)
4475 			       + call_fun->rodata->size);
4476 		      if (roalign < rosec->alignment_power)
4477 			roalign = rosec->alignment_power;
4478 		    }
4479 		  for (pasty = call_fun->call_list; pasty; pasty = pasty->next)
4480 		    if (pasty->is_pasted)
4481 		      break;
4482 		}
4483 	    }
4484 	  if (align_power (tmp, roalign) + rotmp > overlay_size)
4485 	    break;
4486 
4487 	  /* If we add this section, we might need new overlay call
4488 	     stubs.  Add any overlay section calls to dummy_call.  */
4489 	  pasty = NULL;
4490 	  sec_data = spu_elf_section_data (sec);
4491 	  sinfo = sec_data->u.i.stack_info;
4492 	  for (k = 0; k < (unsigned) sinfo->num_fun; ++k)
4493 	    for (call = sinfo->fun[k].call_list; call; call = call->next)
4494 	      if (call->is_pasted)
4495 		{
4496 		  BFD_ASSERT (pasty == NULL);
4497 		  pasty = call;
4498 		}
4499 	      else if (call->fun->sec->linker_mark)
4500 		{
4501 		  if (!copy_callee (&dummy_caller, call))
4502 		    goto err_exit;
4503 		}
4504 	  while (pasty != NULL)
4505 	    {
4506 	      struct function_info *call_fun = pasty->fun;
4507 	      pasty = NULL;
4508 	      for (call = call_fun->call_list; call; call = call->next)
4509 		if (call->is_pasted)
4510 		  {
4511 		    BFD_ASSERT (pasty == NULL);
4512 		    pasty = call;
4513 		  }
4514 		else if (!copy_callee (&dummy_caller, call))
4515 		  goto err_exit;
4516 	    }
4517 
4518 	  /* Calculate call stub size.  */
4519 	  num_stubs = 0;
4520 	  for (call = dummy_caller.call_list; call; call = call->next)
4521 	    {
4522 	      unsigned int stub_delta = 1;
4523 
4524 	      if (htab->params->ovly_flavour == ovly_soft_icache)
4525 		stub_delta = call->count;
4526 	      num_stubs += stub_delta;
4527 
4528 	      /* If the call is within this overlay, we won't need a
4529 		 stub.  */
4530 	      for (k = base; k < i + 1; k++)
4531 		if (call->fun->sec == ovly_sections[2 * k])
4532 		  {
4533 		    num_stubs -= stub_delta;
4534 		    break;
4535 		  }
4536 	    }
4537 	  if (htab->params->ovly_flavour == ovly_soft_icache
4538 	      && num_stubs > htab->params->max_branch)
4539 	    break;
4540 	  if (align_power (tmp, roalign) + rotmp
4541 	      + num_stubs * ovl_stub_size (htab->params) > overlay_size)
4542 	    break;
4543 	  size = tmp;
4544 	  rosize = rotmp;
4545 	}
4546 
4547       if (i == base)
4548 	{
4549 	  /* xgettext:c-format */
4550 	  info->callbacks->einfo (_("%pB:%pA%s exceeds overlay size\n"),
4551 				  ovly_sections[2 * i]->owner,
4552 				  ovly_sections[2 * i],
4553 				  ovly_sections[2 * i + 1] ? " + rodata" : "");
4554 	  bfd_set_error (bfd_error_bad_value);
4555 	  goto err_exit;
4556 	}
4557 
4558       while (dummy_caller.call_list != NULL)
4559 	{
4560 	  struct call_info *call = dummy_caller.call_list;
4561 	  dummy_caller.call_list = call->next;
4562 	  free (call);
4563 	}
4564 
4565       ++ovlynum;
4566       while (base < i)
4567 	ovly_map[base++] = ovlynum;
4568     }
4569 
4570   script = htab->params->spu_elf_open_overlay_script ();
4571 
4572   if (htab->params->ovly_flavour == ovly_soft_icache)
4573     {
4574       if (fprintf (script, "SECTIONS\n{\n") <= 0)
4575 	goto file_err;
4576 
4577       if (fprintf (script,
4578 		   " . = ALIGN (%u);\n"
4579 		   " .ovl.init : { *(.ovl.init) }\n"
4580 		   " . = ABSOLUTE (ADDR (.ovl.init));\n",
4581 		   htab->params->line_size) <= 0)
4582 	goto file_err;
4583 
4584       base = 0;
4585       ovlynum = 1;
4586       while (base < count)
4587 	{
4588 	  unsigned int indx = ovlynum - 1;
4589 	  unsigned int vma, lma;
4590 
4591 	  vma = (indx & (htab->params->num_lines - 1)) << htab->line_size_log2;
4592 	  lma = vma + (((indx >> htab->num_lines_log2) + 1) << 18);
4593 
4594 	  if (fprintf (script, " .ovly%u ABSOLUTE (ADDR (.ovl.init)) + %u "
4595 			       ": AT (LOADADDR (.ovl.init) + %u) {\n",
4596 		       ovlynum, vma, lma) <= 0)
4597 	    goto file_err;
4598 
4599 	  base = print_one_overlay_section (script, base, count, ovlynum,
4600 					    ovly_map, ovly_sections, info);
4601 	  if (base == (unsigned) -1)
4602 	    goto file_err;
4603 
4604 	  if (fprintf (script, "  }\n") <= 0)
4605 	    goto file_err;
4606 
4607 	  ovlynum++;
4608 	}
4609 
4610       if (fprintf (script, " . = ABSOLUTE (ADDR (.ovl.init)) + %u;\n",
4611 		   1 << (htab->num_lines_log2 + htab->line_size_log2)) <= 0)
4612 	goto file_err;
4613 
4614       if (fprintf (script, "}\nINSERT AFTER .toe;\n") <= 0)
4615 	goto file_err;
4616     }
4617   else
4618     {
4619       if (fprintf (script, "SECTIONS\n{\n") <= 0)
4620 	goto file_err;
4621 
4622       if (fprintf (script,
4623 		   " . = ALIGN (16);\n"
4624 		   " .ovl.init : { *(.ovl.init) }\n"
4625 		   " . = ABSOLUTE (ADDR (.ovl.init));\n") <= 0)
4626 	goto file_err;
4627 
4628       for (region = 1; region <= htab->params->num_lines; region++)
4629 	{
4630 	  ovlynum = region;
4631 	  base = 0;
4632 	  while (base < count && ovly_map[base] < ovlynum)
4633 	    base++;
4634 
4635 	  if (base == count)
4636 	    break;
4637 
4638 	  if (region == 1)
4639 	    {
4640 	      /* We need to set lma since we are overlaying .ovl.init.  */
4641 	      if (fprintf (script,
4642 			   " OVERLAY : AT (ALIGN (LOADADDR (.ovl.init) + SIZEOF (.ovl.init), 16))\n {\n") <= 0)
4643 		goto file_err;
4644 	    }
4645 	  else
4646 	    {
4647 	      if (fprintf (script, " OVERLAY :\n {\n") <= 0)
4648 		goto file_err;
4649 	    }
4650 
4651 	  while (base < count)
4652 	    {
4653 	      if (fprintf (script, "  .ovly%u {\n", ovlynum) <= 0)
4654 		goto file_err;
4655 
4656 	      base = print_one_overlay_section (script, base, count, ovlynum,
4657 						ovly_map, ovly_sections, info);
4658 	      if (base == (unsigned) -1)
4659 		goto file_err;
4660 
4661 	      if (fprintf (script, "  }\n") <= 0)
4662 		goto file_err;
4663 
4664 	      ovlynum += htab->params->num_lines;
4665 	      while (base < count && ovly_map[base] < ovlynum)
4666 		base++;
4667 	    }
4668 
4669 	  if (fprintf (script, " }\n") <= 0)
4670 	    goto file_err;
4671 	}
4672 
4673       if (fprintf (script, "}\nINSERT BEFORE .text;\n") <= 0)
4674 	goto file_err;
4675     }
4676 
4677   free (ovly_map);
4678   free (ovly_sections);
4679 
4680   if (fclose (script) != 0)
4681     goto file_err;
4682 
4683   if (htab->params->auto_overlay & AUTO_RELINK)
4684     (*htab->params->spu_elf_relink) ();
4685 
4686   xexit (0);
4687 
4688  file_err:
4689   bfd_set_error (bfd_error_system_call);
4690  err_exit:
4691   info->callbacks->einfo (_("%F%P: auto overlay error: %E\n"));
4692   xexit (1);
4693 }
4694 
4695 /* Provide an estimate of total stack required.  */
4696 
4697 static bfd_boolean
4698 spu_elf_stack_analysis (struct bfd_link_info *info)
4699 {
4700   struct spu_link_hash_table *htab;
4701   struct _sum_stack_param sum_stack_param;
4702 
4703   if (!discover_functions (info))
4704     return FALSE;
4705 
4706   if (!build_call_tree (info))
4707     return FALSE;
4708 
4709   htab = spu_hash_table (info);
4710   if (htab->params->stack_analysis)
4711     {
4712       info->callbacks->info (_("Stack size for call graph root nodes.\n"));
4713       info->callbacks->minfo (_("\nStack size for functions.  "
4714 				"Annotations: '*' max stack, 't' tail call\n"));
4715     }
4716 
4717   sum_stack_param.emit_stack_syms = htab->params->emit_stack_syms;
4718   sum_stack_param.overall_stack = 0;
4719   if (!for_each_node (sum_stack, info, &sum_stack_param, TRUE))
4720     return FALSE;
4721 
4722   if (htab->params->stack_analysis)
4723     info->callbacks->info (_("Maximum stack required is 0x%v\n"),
4724 			   (bfd_vma) sum_stack_param.overall_stack);
4725   return TRUE;
4726 }
4727 
4728 /* Perform a final link.  */
4729 
4730 static bfd_boolean
4731 spu_elf_final_link (bfd *output_bfd, struct bfd_link_info *info)
4732 {
4733   struct spu_link_hash_table *htab = spu_hash_table (info);
4734 
4735   if (htab->params->auto_overlay)
4736     spu_elf_auto_overlay (info);
4737 
4738   if ((htab->params->stack_analysis
4739        || (htab->params->ovly_flavour == ovly_soft_icache
4740 	   && htab->params->lrlive_analysis))
4741       && !spu_elf_stack_analysis (info))
4742     info->callbacks->einfo (_("%X%P: stack/lrlive analysis error: %E\n"));
4743 
4744   if (!spu_elf_build_stubs (info))
4745     info->callbacks->einfo (_("%F%P: can not build overlay stubs: %E\n"));
4746 
4747   return bfd_elf_final_link (output_bfd, info);
4748 }
4749 
4750 /* Called when not normally emitting relocs, ie. !bfd_link_relocatable (info)
4751    and !info->emitrelocations.  Returns a count of special relocs
4752    that need to be emitted.  */
4753 
4754 static unsigned int
4755 spu_elf_count_relocs (struct bfd_link_info *info, asection *sec)
4756 {
4757   Elf_Internal_Rela *relocs;
4758   unsigned int count = 0;
4759 
4760   relocs = _bfd_elf_link_read_relocs (sec->owner, sec, NULL, NULL,
4761 				      info->keep_memory);
4762   if (relocs != NULL)
4763     {
4764       Elf_Internal_Rela *rel;
4765       Elf_Internal_Rela *relend = relocs + sec->reloc_count;
4766 
4767       for (rel = relocs; rel < relend; rel++)
4768 	{
4769 	  int r_type = ELF32_R_TYPE (rel->r_info);
4770 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
4771 	    ++count;
4772 	}
4773 
4774       if (elf_section_data (sec)->relocs != relocs)
4775 	free (relocs);
4776     }
4777 
4778   return count;
4779 }
4780 
4781 /* Functions for adding fixup records to .fixup */
4782 
4783 #define FIXUP_RECORD_SIZE 4
4784 
4785 #define FIXUP_PUT(output_bfd,htab,index,addr) \
4786 	  bfd_put_32 (output_bfd, addr, \
4787 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4788 #define FIXUP_GET(output_bfd,htab,index) \
4789 	  bfd_get_32 (output_bfd, \
4790 		      htab->sfixup->contents + FIXUP_RECORD_SIZE * (index))
4791 
4792 /* Store OFFSET in .fixup.  This assumes it will be called with an
4793    increasing OFFSET.  When this OFFSET fits with the last base offset,
4794    it just sets a bit, otherwise it adds a new fixup record.  */
4795 static void
4796 spu_elf_emit_fixup (bfd * output_bfd, struct bfd_link_info *info,
4797 		    bfd_vma offset)
4798 {
4799   struct spu_link_hash_table *htab = spu_hash_table (info);
4800   asection *sfixup = htab->sfixup;
4801   bfd_vma qaddr = offset & ~(bfd_vma) 15;
4802   bfd_vma bit = ((bfd_vma) 8) >> ((offset & 15) >> 2);
4803   if (sfixup->reloc_count == 0)
4804     {
4805       FIXUP_PUT (output_bfd, htab, 0, qaddr | bit);
4806       sfixup->reloc_count++;
4807     }
4808   else
4809     {
4810       bfd_vma base = FIXUP_GET (output_bfd, htab, sfixup->reloc_count - 1);
4811       if (qaddr != (base & ~(bfd_vma) 15))
4812 	{
4813 	  if ((sfixup->reloc_count + 1) * FIXUP_RECORD_SIZE > sfixup->size)
4814 	    _bfd_error_handler (_("fatal error while creating .fixup"));
4815 	  FIXUP_PUT (output_bfd, htab, sfixup->reloc_count, qaddr | bit);
4816 	  sfixup->reloc_count++;
4817 	}
4818       else
4819 	FIXUP_PUT (output_bfd, htab, sfixup->reloc_count - 1, base | bit);
4820     }
4821 }
4822 
4823 /* Apply RELOCS to CONTENTS of INPUT_SECTION from INPUT_BFD.  */
4824 
4825 static int
4826 spu_elf_relocate_section (bfd *output_bfd,
4827 			  struct bfd_link_info *info,
4828 			  bfd *input_bfd,
4829 			  asection *input_section,
4830 			  bfd_byte *contents,
4831 			  Elf_Internal_Rela *relocs,
4832 			  Elf_Internal_Sym *local_syms,
4833 			  asection **local_sections)
4834 {
4835   Elf_Internal_Shdr *symtab_hdr;
4836   struct elf_link_hash_entry **sym_hashes;
4837   Elf_Internal_Rela *rel, *relend;
4838   struct spu_link_hash_table *htab;
4839   asection *ea;
4840   int ret = TRUE;
4841   bfd_boolean emit_these_relocs = FALSE;
4842   bfd_boolean is_ea_sym;
4843   bfd_boolean stubs;
4844   unsigned int iovl = 0;
4845 
4846   htab = spu_hash_table (info);
4847   stubs = (htab->stub_sec != NULL
4848 	   && maybe_needs_stubs (input_section));
4849   iovl = overlay_index (input_section);
4850   ea = bfd_get_section_by_name (output_bfd, "._ea");
4851   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
4852   sym_hashes = (struct elf_link_hash_entry **) (elf_sym_hashes (input_bfd));
4853 
4854   rel = relocs;
4855   relend = relocs + input_section->reloc_count;
4856   for (; rel < relend; rel++)
4857     {
4858       int r_type;
4859       reloc_howto_type *howto;
4860       unsigned int r_symndx;
4861       Elf_Internal_Sym *sym;
4862       asection *sec;
4863       struct elf_link_hash_entry *h;
4864       const char *sym_name;
4865       bfd_vma relocation;
4866       bfd_vma addend;
4867       bfd_reloc_status_type r;
4868       bfd_boolean unresolved_reloc;
4869       enum _stub_type stub_type;
4870 
4871       r_symndx = ELF32_R_SYM (rel->r_info);
4872       r_type = ELF32_R_TYPE (rel->r_info);
4873       howto = elf_howto_table + r_type;
4874       unresolved_reloc = FALSE;
4875       h = NULL;
4876       sym = NULL;
4877       sec = NULL;
4878       if (r_symndx < symtab_hdr->sh_info)
4879 	{
4880 	  sym = local_syms + r_symndx;
4881 	  sec = local_sections[r_symndx];
4882 	  sym_name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, sec);
4883 	  relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
4884 	}
4885       else
4886 	{
4887 	  if (sym_hashes == NULL)
4888 	    return FALSE;
4889 
4890 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
4891 
4892 	  if (info->wrap_hash != NULL
4893 	      && (input_section->flags & SEC_DEBUGGING) != 0)
4894 	    h = ((struct elf_link_hash_entry *)
4895 		 unwrap_hash_lookup (info, input_bfd, &h->root));
4896 
4897 	  while (h->root.type == bfd_link_hash_indirect
4898 		 || h->root.type == bfd_link_hash_warning)
4899 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
4900 
4901 	  relocation = 0;
4902 	  if (h->root.type == bfd_link_hash_defined
4903 	      || h->root.type == bfd_link_hash_defweak)
4904 	    {
4905 	      sec = h->root.u.def.section;
4906 	      if (sec == NULL
4907 		  || sec->output_section == NULL)
4908 		/* Set a flag that will be cleared later if we find a
4909 		   relocation value for this symbol.  output_section
4910 		   is typically NULL for symbols satisfied by a shared
4911 		   library.  */
4912 		unresolved_reloc = TRUE;
4913 	      else
4914 		relocation = (h->root.u.def.value
4915 			      + sec->output_section->vma
4916 			      + sec->output_offset);
4917 	    }
4918 	  else if (h->root.type == bfd_link_hash_undefweak)
4919 	    ;
4920 	  else if (info->unresolved_syms_in_objects == RM_IGNORE
4921 		   && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
4922 	    ;
4923 	  else if (!bfd_link_relocatable (info)
4924 		   && !(r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64))
4925 	    {
4926 	      bfd_boolean err;
4927 	      err = (info->unresolved_syms_in_objects == RM_GENERATE_ERROR
4928 		     || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT);
4929 	      (*info->callbacks->undefined_symbol) (info,
4930 						    h->root.root.string,
4931 						    input_bfd,
4932 						    input_section,
4933 						    rel->r_offset, err);
4934 	    }
4935 	  sym_name = h->root.root.string;
4936 	}
4937 
4938       if (sec != NULL && discarded_section (sec))
4939 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
4940 					 rel, 1, relend, howto, 0, contents);
4941 
4942       if (bfd_link_relocatable (info))
4943 	continue;
4944 
4945       /* Change "a rt,ra,rb" to "ai rt,ra,0". */
4946       if (r_type == R_SPU_ADD_PIC
4947 	  && h != NULL
4948 	  && !(h->def_regular || ELF_COMMON_DEF_P (h)))
4949 	{
4950 	  bfd_byte *loc = contents + rel->r_offset;
4951 	  loc[0] = 0x1c;
4952 	  loc[1] = 0x00;
4953 	  loc[2] &= 0x3f;
4954 	}
4955 
4956       is_ea_sym = (ea != NULL
4957 		   && sec != NULL
4958 		   && sec->output_section == ea);
4959 
4960       /* If this symbol is in an overlay area, we may need to relocate
4961 	 to the overlay stub.  */
4962       addend = rel->r_addend;
4963       if (stubs
4964 	  && !is_ea_sym
4965 	  && (stub_type = needs_ovl_stub (h, sym, sec, input_section, rel,
4966 					  contents, info)) != no_stub)
4967 	{
4968 	  unsigned int ovl = 0;
4969 	  struct got_entry *g, **head;
4970 
4971 	  if (stub_type != nonovl_stub)
4972 	    ovl = iovl;
4973 
4974 	  if (h != NULL)
4975 	    head = &h->got.glist;
4976 	  else
4977 	    head = elf_local_got_ents (input_bfd) + r_symndx;
4978 
4979 	  for (g = *head; g != NULL; g = g->next)
4980 	    if (htab->params->ovly_flavour == ovly_soft_icache
4981 		? (g->ovl == ovl
4982 		   && g->br_addr == (rel->r_offset
4983 				     + input_section->output_offset
4984 				     + input_section->output_section->vma))
4985 		: g->addend == addend && (g->ovl == ovl || g->ovl == 0))
4986 	      break;
4987 	  if (g == NULL)
4988 	    abort ();
4989 
4990 	  relocation = g->stub_addr;
4991 	  addend = 0;
4992 	}
4993       else
4994 	{
4995 	  /* For soft icache, encode the overlay index into addresses.  */
4996 	  if (htab->params->ovly_flavour == ovly_soft_icache
4997 	      && (r_type == R_SPU_ADDR16_HI
4998 		  || r_type == R_SPU_ADDR32 || r_type == R_SPU_REL32)
4999 	      && !is_ea_sym)
5000 	    {
5001 	      unsigned int ovl = overlay_index (sec);
5002 	      if (ovl != 0)
5003 		{
5004 		  unsigned int set_id = ((ovl - 1) >> htab->num_lines_log2) + 1;
5005 		  relocation += set_id << 18;
5006 		}
5007 	    }
5008 	}
5009 
5010       if (htab->params->emit_fixups && !bfd_link_relocatable (info)
5011 	  && (input_section->flags & SEC_ALLOC) != 0
5012 	  && r_type == R_SPU_ADDR32)
5013 	{
5014 	  bfd_vma offset;
5015 	  offset = rel->r_offset + input_section->output_section->vma
5016 		   + input_section->output_offset;
5017 	  spu_elf_emit_fixup (output_bfd, info, offset);
5018 	}
5019 
5020       if (unresolved_reloc)
5021 	;
5022       else if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5023 	{
5024 	  if (is_ea_sym)
5025 	    {
5026 	      /* ._ea is a special section that isn't allocated in SPU
5027 		 memory, but rather occupies space in PPU memory as
5028 		 part of an embedded ELF image.  If this reloc is
5029 		 against a symbol defined in ._ea, then transform the
5030 		 reloc into an equivalent one without a symbol
5031 		 relative to the start of the ELF image.  */
5032 	      rel->r_addend += (relocation
5033 				- ea->vma
5034 				+ elf_section_data (ea)->this_hdr.sh_offset);
5035 	      rel->r_info = ELF32_R_INFO (0, r_type);
5036 	    }
5037 	  emit_these_relocs = TRUE;
5038 	  continue;
5039 	}
5040       else if (is_ea_sym)
5041 	unresolved_reloc = TRUE;
5042 
5043       if (unresolved_reloc
5044 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
5045 				      rel->r_offset) != (bfd_vma) -1)
5046 	{
5047 	  _bfd_error_handler
5048 	    /* xgettext:c-format */
5049 	    (_("%pB(%s+%#" PRIx64 "): "
5050 	       "unresolvable %s relocation against symbol `%s'"),
5051 	     input_bfd,
5052 	     bfd_get_section_name (input_bfd, input_section),
5053 	     (uint64_t) rel->r_offset,
5054 	     howto->name,
5055 	     sym_name);
5056 	  ret = FALSE;
5057 	}
5058 
5059       r = _bfd_final_link_relocate (howto,
5060 				    input_bfd,
5061 				    input_section,
5062 				    contents,
5063 				    rel->r_offset, relocation, addend);
5064 
5065       if (r != bfd_reloc_ok)
5066 	{
5067 	  const char *msg = (const char *) 0;
5068 
5069 	  switch (r)
5070 	    {
5071 	    case bfd_reloc_overflow:
5072 	      (*info->callbacks->reloc_overflow)
5073 		(info, (h ? &h->root : NULL), sym_name, howto->name,
5074 		 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
5075 	      break;
5076 
5077 	    case bfd_reloc_undefined:
5078 	      (*info->callbacks->undefined_symbol)
5079 		(info, sym_name, input_bfd, input_section, rel->r_offset, TRUE);
5080 	      break;
5081 
5082 	    case bfd_reloc_outofrange:
5083 	      msg = _("internal error: out of range error");
5084 	      goto common_error;
5085 
5086 	    case bfd_reloc_notsupported:
5087 	      msg = _("internal error: unsupported relocation error");
5088 	      goto common_error;
5089 
5090 	    case bfd_reloc_dangerous:
5091 	      msg = _("internal error: dangerous error");
5092 	      goto common_error;
5093 
5094 	    default:
5095 	      msg = _("internal error: unknown error");
5096 	      /* fall through */
5097 
5098 	    common_error:
5099 	      ret = FALSE;
5100 	      (*info->callbacks->warning) (info, msg, sym_name, input_bfd,
5101 					   input_section, rel->r_offset);
5102 	      break;
5103 	    }
5104 	}
5105     }
5106 
5107   if (ret
5108       && emit_these_relocs
5109       && !info->emitrelocations)
5110     {
5111       Elf_Internal_Rela *wrel;
5112       Elf_Internal_Shdr *rel_hdr;
5113 
5114       wrel = rel = relocs;
5115       relend = relocs + input_section->reloc_count;
5116       for (; rel < relend; rel++)
5117 	{
5118 	  int r_type;
5119 
5120 	  r_type = ELF32_R_TYPE (rel->r_info);
5121 	  if (r_type == R_SPU_PPU32 || r_type == R_SPU_PPU64)
5122 	    *wrel++ = *rel;
5123 	}
5124       input_section->reloc_count = wrel - relocs;
5125       /* Backflips for _bfd_elf_link_output_relocs.  */
5126       rel_hdr = _bfd_elf_single_rel_hdr (input_section);
5127       rel_hdr->sh_size = input_section->reloc_count * rel_hdr->sh_entsize;
5128       ret = 2;
5129     }
5130 
5131   return ret;
5132 }
5133 
5134 static bfd_boolean
5135 spu_elf_finish_dynamic_sections (bfd *output_bfd ATTRIBUTE_UNUSED,
5136 				 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5137 {
5138   return TRUE;
5139 }
5140 
5141 /* Adjust _SPUEAR_ syms to point at their overlay stubs.  */
5142 
5143 static int
5144 spu_elf_output_symbol_hook (struct bfd_link_info *info,
5145 			    const char *sym_name ATTRIBUTE_UNUSED,
5146 			    Elf_Internal_Sym *sym,
5147 			    asection *sym_sec ATTRIBUTE_UNUSED,
5148 			    struct elf_link_hash_entry *h)
5149 {
5150   struct spu_link_hash_table *htab = spu_hash_table (info);
5151 
5152   if (!bfd_link_relocatable (info)
5153       && htab->stub_sec != NULL
5154       && h != NULL
5155       && (h->root.type == bfd_link_hash_defined
5156 	  || h->root.type == bfd_link_hash_defweak)
5157       && h->def_regular
5158       && strncmp (h->root.root.string, "_SPUEAR_", 8) == 0)
5159     {
5160       struct got_entry *g;
5161 
5162       for (g = h->got.glist; g != NULL; g = g->next)
5163 	if (htab->params->ovly_flavour == ovly_soft_icache
5164 	    ? g->br_addr == g->stub_addr
5165 	    : g->addend == 0 && g->ovl == 0)
5166 	  {
5167 	    sym->st_shndx = (_bfd_elf_section_from_bfd_section
5168 			     (htab->stub_sec[0]->output_section->owner,
5169 			      htab->stub_sec[0]->output_section));
5170 	    sym->st_value = g->stub_addr;
5171 	    break;
5172 	  }
5173     }
5174 
5175   return 1;
5176 }
5177 
5178 static int spu_plugin = 0;
5179 
5180 void
5181 spu_elf_plugin (int val)
5182 {
5183   spu_plugin = val;
5184 }
5185 
5186 /* Set ELF header e_type for plugins.  */
5187 
5188 static void
5189 spu_elf_post_process_headers (bfd *abfd, struct bfd_link_info *info)
5190 {
5191   if (spu_plugin)
5192     {
5193       Elf_Internal_Ehdr *i_ehdrp = elf_elfheader (abfd);
5194 
5195       i_ehdrp->e_type = ET_DYN;
5196     }
5197 
5198   _bfd_elf_post_process_headers (abfd, info);
5199 }
5200 
5201 /* We may add an extra PT_LOAD segment for .toe.  We also need extra
5202    segments for overlays.  */
5203 
5204 static int
5205 spu_elf_additional_program_headers (bfd *abfd, struct bfd_link_info *info)
5206 {
5207   int extra = 0;
5208   asection *sec;
5209 
5210   if (info != NULL)
5211     {
5212       struct spu_link_hash_table *htab = spu_hash_table (info);
5213       extra = htab->num_overlays;
5214     }
5215 
5216   if (extra)
5217     ++extra;
5218 
5219   sec = bfd_get_section_by_name (abfd, ".toe");
5220   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
5221     ++extra;
5222 
5223   return extra;
5224 }
5225 
5226 /* Remove .toe section from other PT_LOAD segments and put it in
5227    a segment of its own.  Put overlays in separate segments too.  */
5228 
5229 static bfd_boolean
5230 spu_elf_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
5231 {
5232   asection *toe, *s;
5233   struct elf_segment_map *m, *m_overlay;
5234   struct elf_segment_map **p, **p_overlay, **first_load;
5235   unsigned int i;
5236 
5237   if (info == NULL)
5238     return TRUE;
5239 
5240   toe = bfd_get_section_by_name (abfd, ".toe");
5241   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
5242     if (m->p_type == PT_LOAD && m->count > 1)
5243       for (i = 0; i < m->count; i++)
5244 	if ((s = m->sections[i]) == toe
5245 	    || spu_elf_section_data (s)->u.o.ovl_index != 0)
5246 	  {
5247 	    struct elf_segment_map *m2;
5248 	    bfd_vma amt;
5249 
5250 	    if (i + 1 < m->count)
5251 	      {
5252 		amt = sizeof (struct elf_segment_map);
5253 		amt += (m->count - (i + 2)) * sizeof (m->sections[0]);
5254 		m2 = bfd_zalloc (abfd, amt);
5255 		if (m2 == NULL)
5256 		  return FALSE;
5257 		m2->count = m->count - (i + 1);
5258 		memcpy (m2->sections, m->sections + i + 1,
5259 			m2->count * sizeof (m->sections[0]));
5260 		m2->p_type = PT_LOAD;
5261 		m2->next = m->next;
5262 		m->next = m2;
5263 	      }
5264 	    m->count = 1;
5265 	    if (i != 0)
5266 	      {
5267 		m->count = i;
5268 		amt = sizeof (struct elf_segment_map);
5269 		m2 = bfd_zalloc (abfd, amt);
5270 		if (m2 == NULL)
5271 		  return FALSE;
5272 		m2->p_type = PT_LOAD;
5273 		m2->count = 1;
5274 		m2->sections[0] = s;
5275 		m2->next = m->next;
5276 		m->next = m2;
5277 	      }
5278 	    break;
5279 	  }
5280 
5281 
5282   /* Some SPU ELF loaders ignore the PF_OVERLAY flag and just load all
5283      PT_LOAD segments.  This can cause the .ovl.init section to be
5284      overwritten with the contents of some overlay segment.  To work
5285      around this issue, we ensure that all PF_OVERLAY segments are
5286      sorted first amongst the program headers; this ensures that even
5287      with a broken loader, the .ovl.init section (which is not marked
5288      as PF_OVERLAY) will be placed into SPU local store on startup.  */
5289 
5290   /* Move all overlay segments onto a separate list.  */
5291   p = &elf_seg_map (abfd);
5292   p_overlay = &m_overlay;
5293   m_overlay = NULL;
5294   first_load = NULL;
5295   while (*p != NULL)
5296     {
5297       if ((*p)->p_type == PT_LOAD)
5298 	{
5299 	  if (!first_load)
5300 	    first_load = p;
5301 	  if ((*p)->count == 1
5302 	      && spu_elf_section_data ((*p)->sections[0])->u.o.ovl_index != 0)
5303 	    {
5304 	      m = *p;
5305 	      *p = m->next;
5306 	      *p_overlay = m;
5307 	      p_overlay = &m->next;
5308 	      continue;
5309 	    }
5310 	}
5311       p = &((*p)->next);
5312     }
5313 
5314   /* Re-insert overlay segments at the head of the segment map.  */
5315   if (m_overlay != NULL)
5316     {
5317       p = first_load;
5318       if (*p != NULL && (*p)->p_type == PT_LOAD && (*p)->includes_filehdr)
5319 	/* It doesn't really make sense for someone to include the ELF
5320 	   file header into an spu image, but if they do the code that
5321 	   assigns p_offset needs to see the segment containing the
5322 	   header first.  */
5323 	p = &(*p)->next;
5324       *p_overlay = *p;
5325       *p = m_overlay;
5326     }
5327 
5328   return TRUE;
5329 }
5330 
5331 /* Tweak the section type of .note.spu_name.  */
5332 
5333 static bfd_boolean
5334 spu_elf_fake_sections (bfd *obfd ATTRIBUTE_UNUSED,
5335 		       Elf_Internal_Shdr *hdr,
5336 		       asection *sec)
5337 {
5338   if (strcmp (sec->name, SPU_PTNOTE_SPUNAME) == 0)
5339     hdr->sh_type = SHT_NOTE;
5340   return TRUE;
5341 }
5342 
5343 /* Tweak phdrs before writing them out.  */
5344 
5345 static int
5346 spu_elf_modify_program_headers (bfd *abfd, struct bfd_link_info *info)
5347 {
5348   const struct elf_backend_data *bed;
5349   struct elf_obj_tdata *tdata;
5350   Elf_Internal_Phdr *phdr, *last;
5351   struct spu_link_hash_table *htab;
5352   unsigned int count;
5353   unsigned int i;
5354 
5355   if (info == NULL)
5356     return TRUE;
5357 
5358   bed = get_elf_backend_data (abfd);
5359   tdata = elf_tdata (abfd);
5360   phdr = tdata->phdr;
5361   count = elf_program_header_size (abfd) / bed->s->sizeof_phdr;
5362   htab = spu_hash_table (info);
5363   if (htab->num_overlays != 0)
5364     {
5365       struct elf_segment_map *m;
5366       unsigned int o;
5367 
5368       for (i = 0, m = elf_seg_map (abfd); m; ++i, m = m->next)
5369 	if (m->count != 0
5370 	    && (o = spu_elf_section_data (m->sections[0])->u.o.ovl_index) != 0)
5371 	  {
5372 	    /* Mark this as an overlay header.  */
5373 	    phdr[i].p_flags |= PF_OVERLAY;
5374 
5375 	    if (htab->ovtab != NULL && htab->ovtab->size != 0
5376 		&& htab->params->ovly_flavour != ovly_soft_icache)
5377 	      {
5378 		bfd_byte *p = htab->ovtab->contents;
5379 		unsigned int off = o * 16 + 8;
5380 
5381 		/* Write file_off into _ovly_table.  */
5382 		bfd_put_32 (htab->ovtab->owner, phdr[i].p_offset, p + off);
5383 	      }
5384 	  }
5385       /* Soft-icache has its file offset put in .ovl.init.  */
5386       if (htab->init != NULL && htab->init->size != 0)
5387 	{
5388 	  bfd_vma val = elf_section_data (htab->ovl_sec[0])->this_hdr.sh_offset;
5389 
5390 	  bfd_put_32 (htab->init->owner, val, htab->init->contents + 4);
5391 	}
5392     }
5393 
5394   /* Round up p_filesz and p_memsz of PT_LOAD segments to multiples
5395      of 16.  This should always be possible when using the standard
5396      linker scripts, but don't create overlapping segments if
5397      someone is playing games with linker scripts.  */
5398   last = NULL;
5399   for (i = count; i-- != 0; )
5400     if (phdr[i].p_type == PT_LOAD)
5401       {
5402 	unsigned adjust;
5403 
5404 	adjust = -phdr[i].p_filesz & 15;
5405 	if (adjust != 0
5406 	    && last != NULL
5407 	    && phdr[i].p_offset + phdr[i].p_filesz > last->p_offset - adjust)
5408 	  break;
5409 
5410 	adjust = -phdr[i].p_memsz & 15;
5411 	if (adjust != 0
5412 	    && last != NULL
5413 	    && phdr[i].p_filesz != 0
5414 	    && phdr[i].p_vaddr + phdr[i].p_memsz > last->p_vaddr - adjust
5415 	    && phdr[i].p_vaddr + phdr[i].p_memsz <= last->p_vaddr)
5416 	  break;
5417 
5418 	if (phdr[i].p_filesz != 0)
5419 	  last = &phdr[i];
5420       }
5421 
5422   if (i == (unsigned int) -1)
5423     for (i = count; i-- != 0; )
5424       if (phdr[i].p_type == PT_LOAD)
5425 	{
5426 	unsigned adjust;
5427 
5428 	adjust = -phdr[i].p_filesz & 15;
5429 	phdr[i].p_filesz += adjust;
5430 
5431 	adjust = -phdr[i].p_memsz & 15;
5432 	phdr[i].p_memsz += adjust;
5433       }
5434 
5435   return TRUE;
5436 }
5437 
5438 bfd_boolean
5439 spu_elf_size_sections (bfd * output_bfd, struct bfd_link_info *info)
5440 {
5441   struct spu_link_hash_table *htab = spu_hash_table (info);
5442   if (htab->params->emit_fixups)
5443     {
5444       asection *sfixup = htab->sfixup;
5445       int fixup_count = 0;
5446       bfd *ibfd;
5447       size_t size;
5448 
5449       for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
5450 	{
5451 	  asection *isec;
5452 
5453 	  if (bfd_get_flavour (ibfd) != bfd_target_elf_flavour)
5454 	    continue;
5455 
5456 	  /* Walk over each section attached to the input bfd.  */
5457 	  for (isec = ibfd->sections; isec != NULL; isec = isec->next)
5458 	    {
5459 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5460 	      bfd_vma base_end;
5461 
5462 	      /* If there aren't any relocs, then there's nothing more
5463 		 to do.  */
5464 	      if ((isec->flags & SEC_ALLOC) == 0
5465 		  || (isec->flags & SEC_RELOC) == 0
5466 		  || isec->reloc_count == 0)
5467 		continue;
5468 
5469 	      /* Get the relocs.  */
5470 	      internal_relocs =
5471 		_bfd_elf_link_read_relocs (ibfd, isec, NULL, NULL,
5472 					   info->keep_memory);
5473 	      if (internal_relocs == NULL)
5474 		return FALSE;
5475 
5476 	      /* 1 quadword can contain up to 4 R_SPU_ADDR32
5477 		 relocations.  They are stored in a single word by
5478 		 saving the upper 28 bits of the address and setting the
5479 		 lower 4 bits to a bit mask of the words that have the
5480 		 relocation.  BASE_END keeps track of the next quadword. */
5481 	      irela = internal_relocs;
5482 	      irelaend = irela + isec->reloc_count;
5483 	      base_end = 0;
5484 	      for (; irela < irelaend; irela++)
5485 		if (ELF32_R_TYPE (irela->r_info) == R_SPU_ADDR32
5486 		    && irela->r_offset >= base_end)
5487 		  {
5488 		    base_end = (irela->r_offset & ~(bfd_vma) 15) + 16;
5489 		    fixup_count++;
5490 		  }
5491 	    }
5492 	}
5493 
5494       /* We always have a NULL fixup as a sentinel */
5495       size = (fixup_count + 1) * FIXUP_RECORD_SIZE;
5496       if (!bfd_set_section_size (output_bfd, sfixup, size))
5497 	return FALSE;
5498       sfixup->contents = (bfd_byte *) bfd_zalloc (info->input_bfds, size);
5499       if (sfixup->contents == NULL)
5500 	return FALSE;
5501     }
5502   return TRUE;
5503 }
5504 
5505 #define TARGET_BIG_SYM		spu_elf32_vec
5506 #define TARGET_BIG_NAME		"elf32-spu"
5507 #define ELF_ARCH		bfd_arch_spu
5508 #define ELF_TARGET_ID		SPU_ELF_DATA
5509 #define ELF_MACHINE_CODE	EM_SPU
5510 /* This matches the alignment need for DMA.  */
5511 #define ELF_MAXPAGESIZE		0x80
5512 #define elf_backend_rela_normal		1
5513 #define elf_backend_can_gc_sections	1
5514 
5515 #define bfd_elf32_bfd_reloc_type_lookup		spu_elf_reloc_type_lookup
5516 #define bfd_elf32_bfd_reloc_name_lookup		spu_elf_reloc_name_lookup
5517 #define elf_info_to_howto			spu_elf_info_to_howto
5518 #define elf_backend_count_relocs		spu_elf_count_relocs
5519 #define elf_backend_relocate_section		spu_elf_relocate_section
5520 #define elf_backend_finish_dynamic_sections	spu_elf_finish_dynamic_sections
5521 #define elf_backend_symbol_processing		spu_elf_backend_symbol_processing
5522 #define elf_backend_link_output_symbol_hook	spu_elf_output_symbol_hook
5523 #define elf_backend_object_p			spu_elf_object_p
5524 #define bfd_elf32_new_section_hook		spu_elf_new_section_hook
5525 #define bfd_elf32_bfd_link_hash_table_create	spu_elf_link_hash_table_create
5526 
5527 #define elf_backend_additional_program_headers	spu_elf_additional_program_headers
5528 #define elf_backend_modify_segment_map		spu_elf_modify_segment_map
5529 #define elf_backend_modify_program_headers	spu_elf_modify_program_headers
5530 #define elf_backend_post_process_headers	spu_elf_post_process_headers
5531 #define elf_backend_fake_sections		spu_elf_fake_sections
5532 #define elf_backend_special_sections		spu_elf_special_sections
5533 #define bfd_elf32_bfd_final_link		spu_elf_final_link
5534 
5535 #include "elf32-target.h"
5536