xref: /netbsd-src/external/gpl3/gdb/dist/bfd/coff-sh.c (revision 8e33eff89e26cf71871ead62f0d5063e1313c33a)
1 /* BFD back-end for Renesas Super-H COFF binaries.
2    Copyright (C) 1993-2024 Free Software Foundation, Inc.
3    Contributed by Cygnus Support.
4    Written by Steve Chamberlain, <sac@cygnus.com>.
5    Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6 
7    This file is part of BFD, the Binary File Descriptor library.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
22    MA 02110-1301, USA.  */
23 
24 #include "sysdep.h"
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31 
32 #undef  bfd_pe_print_pdata
33 
34 #ifdef COFF_WITH_PE
35 #include "coff/pe.h"
36 
37 #ifndef COFF_IMAGE_WITH_PE
38 static bool sh_align_load_span
39   (bfd *, asection *, bfd_byte *,
40    bool (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
41    void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bool *);
42 
43 #define _bfd_sh_align_load_span sh_align_load_span
44 #endif
45 
46 #define	bfd_pe_print_pdata   _bfd_pe_print_ce_compressed_pdata
47 
48 #else
49 
50 #define	bfd_pe_print_pdata   NULL
51 
52 #endif /* COFF_WITH_PE.  */
53 
54 #include "libcoff.h"
55 
56 /* Internal functions.  */
57 
58 #ifdef COFF_WITH_PE
59 /* Can't build import tables with 2**4 alignment.  */
60 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	2
61 #else
62 /* Default section alignment to 2**4.  */
63 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	4
64 #endif
65 
66 #ifdef COFF_IMAGE_WITH_PE
67 /* Align PE executables.  */
68 #define COFF_PAGE_SIZE 0x1000
69 #endif
70 
71 /* Generate long file names.  */
72 #define COFF_LONG_FILENAMES
73 
74 #ifdef COFF_WITH_PE
75 /* Return TRUE if this relocation should
76    appear in the output .reloc section.  */
77 
78 static bool
79 in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
80 	    reloc_howto_type * howto)
81 {
82   return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
83 }
84 #endif
85 
86 static bfd_reloc_status_type
87 sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
88 static bool
89 sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
90 		     bfd_byte *, struct internal_reloc *,
91 		     struct internal_syment *, asection **);
92 static bool
93 sh_align_loads (bfd *, asection *, struct internal_reloc *,
94 		bfd_byte *, bool *);
95 
96 /* The supported relocations.  There are a lot of relocations defined
97    in coff/internal.h which we do not expect to ever see.  */
98 static reloc_howto_type sh_coff_howtos[] =
99 {
100   EMPTY_HOWTO (0),
101   EMPTY_HOWTO (1),
102 #ifdef COFF_WITH_PE
103   /* Windows CE */
104   HOWTO (R_SH_IMM32CE,		/* type */
105 	 0,			/* rightshift */
106 	 4,			/* size */
107 	 32,			/* bitsize */
108 	 false,			/* pc_relative */
109 	 0,			/* bitpos */
110 	 complain_overflow_bitfield, /* complain_on_overflow */
111 	 sh_reloc,		/* special_function */
112 	 "r_imm32ce",		/* name */
113 	 true,			/* partial_inplace */
114 	 0xffffffff,		/* src_mask */
115 	 0xffffffff,		/* dst_mask */
116 	 false),		/* pcrel_offset */
117 #else
118   EMPTY_HOWTO (2),
119 #endif
120   EMPTY_HOWTO (3), /* R_SH_PCREL8 */
121   EMPTY_HOWTO (4), /* R_SH_PCREL16 */
122   EMPTY_HOWTO (5), /* R_SH_HIGH8 */
123   EMPTY_HOWTO (6), /* R_SH_IMM24 */
124   EMPTY_HOWTO (7), /* R_SH_LOW16 */
125   EMPTY_HOWTO (8),
126   EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
127 
128   HOWTO (R_SH_PCDISP8BY2,	/* type */
129 	 1,			/* rightshift */
130 	 2,			/* size */
131 	 8,			/* bitsize */
132 	 true,			/* pc_relative */
133 	 0,			/* bitpos */
134 	 complain_overflow_signed, /* complain_on_overflow */
135 	 sh_reloc,		/* special_function */
136 	 "r_pcdisp8by2",	/* name */
137 	 true,			/* partial_inplace */
138 	 0xff,			/* src_mask */
139 	 0xff,			/* dst_mask */
140 	 true),			/* pcrel_offset */
141 
142   EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
143 
144   HOWTO (R_SH_PCDISP,		/* type */
145 	 1,			/* rightshift */
146 	 2,			/* size */
147 	 12,			/* bitsize */
148 	 true,			/* pc_relative */
149 	 0,			/* bitpos */
150 	 complain_overflow_signed, /* complain_on_overflow */
151 	 sh_reloc,		/* special_function */
152 	 "r_pcdisp12by2",	/* name */
153 	 true,			/* partial_inplace */
154 	 0xfff,			/* src_mask */
155 	 0xfff,			/* dst_mask */
156 	 true),			/* pcrel_offset */
157 
158   EMPTY_HOWTO (13),
159 
160   HOWTO (R_SH_IMM32,		/* type */
161 	 0,			/* rightshift */
162 	 4,			/* size */
163 	 32,			/* bitsize */
164 	 false,			/* pc_relative */
165 	 0,			/* bitpos */
166 	 complain_overflow_bitfield, /* complain_on_overflow */
167 	 sh_reloc,		/* special_function */
168 	 "r_imm32",		/* name */
169 	 true,			/* partial_inplace */
170 	 0xffffffff,		/* src_mask */
171 	 0xffffffff,		/* dst_mask */
172 	 false),		/* pcrel_offset */
173 
174   EMPTY_HOWTO (15),
175 #ifdef COFF_WITH_PE
176   HOWTO (R_SH_IMAGEBASE,	/* type */
177 	 0,			/* rightshift */
178 	 4,			/* size */
179 	 32,			/* bitsize */
180 	 false,			/* pc_relative */
181 	 0,			/* bitpos */
182 	 complain_overflow_bitfield, /* complain_on_overflow */
183 	 sh_reloc,		/* special_function */
184 	 "rva32",		/* name */
185 	 true,			/* partial_inplace */
186 	 0xffffffff,		/* src_mask */
187 	 0xffffffff,		/* dst_mask */
188 	 false),		/* pcrel_offset */
189 #else
190   EMPTY_HOWTO (16), /* R_SH_IMM8 */
191 #endif
192   EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
193   EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
194   EMPTY_HOWTO (19), /* R_SH_IMM4 */
195   EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
196   EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
197 
198   HOWTO (R_SH_PCRELIMM8BY2,	/* type */
199 	 1,			/* rightshift */
200 	 2,			/* size */
201 	 8,			/* bitsize */
202 	 true,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_unsigned, /* complain_on_overflow */
205 	 sh_reloc,		/* special_function */
206 	 "r_pcrelimm8by2",	/* name */
207 	 true,			/* partial_inplace */
208 	 0xff,			/* src_mask */
209 	 0xff,			/* dst_mask */
210 	 true),			/* pcrel_offset */
211 
212   HOWTO (R_SH_PCRELIMM8BY4,	/* type */
213 	 2,			/* rightshift */
214 	 2,			/* size */
215 	 8,			/* bitsize */
216 	 true,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_unsigned, /* complain_on_overflow */
219 	 sh_reloc,		/* special_function */
220 	 "r_pcrelimm8by4",	/* name */
221 	 true,			/* partial_inplace */
222 	 0xff,			/* src_mask */
223 	 0xff,			/* dst_mask */
224 	 true),			/* pcrel_offset */
225 
226   HOWTO (R_SH_IMM16,		/* type */
227 	 0,			/* rightshift */
228 	 2,			/* size */
229 	 16,			/* bitsize */
230 	 false,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_bitfield, /* complain_on_overflow */
233 	 sh_reloc,		/* special_function */
234 	 "r_imm16",		/* name */
235 	 true,			/* partial_inplace */
236 	 0xffff,		/* src_mask */
237 	 0xffff,		/* dst_mask */
238 	 false),		/* pcrel_offset */
239 
240   HOWTO (R_SH_SWITCH16,		/* type */
241 	 0,			/* rightshift */
242 	 2,			/* size */
243 	 16,			/* bitsize */
244 	 false,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_bitfield, /* complain_on_overflow */
247 	 sh_reloc,		/* special_function */
248 	 "r_switch16",		/* name */
249 	 true,			/* partial_inplace */
250 	 0xffff,		/* src_mask */
251 	 0xffff,		/* dst_mask */
252 	 false),		/* pcrel_offset */
253 
254   HOWTO (R_SH_SWITCH32,		/* type */
255 	 0,			/* rightshift */
256 	 4,			/* size */
257 	 32,			/* bitsize */
258 	 false,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_bitfield, /* complain_on_overflow */
261 	 sh_reloc,		/* special_function */
262 	 "r_switch32",		/* name */
263 	 true,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 false),		/* pcrel_offset */
267 
268   HOWTO (R_SH_USES,		/* type */
269 	 0,			/* rightshift */
270 	 2,			/* size */
271 	 16,			/* bitsize */
272 	 false,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield, /* complain_on_overflow */
275 	 sh_reloc,		/* special_function */
276 	 "r_uses",		/* name */
277 	 true,			/* partial_inplace */
278 	 0xffff,		/* src_mask */
279 	 0xffff,		/* dst_mask */
280 	 false),		/* pcrel_offset */
281 
282   HOWTO (R_SH_COUNT,		/* type */
283 	 0,			/* rightshift */
284 	 4,			/* size */
285 	 32,			/* bitsize */
286 	 false,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_bitfield, /* complain_on_overflow */
289 	 sh_reloc,		/* special_function */
290 	 "r_count",		/* name */
291 	 true,			/* partial_inplace */
292 	 0xffffffff,		/* src_mask */
293 	 0xffffffff,		/* dst_mask */
294 	 false),		/* pcrel_offset */
295 
296   HOWTO (R_SH_ALIGN,		/* type */
297 	 0,			/* rightshift */
298 	 4,			/* size */
299 	 32,			/* bitsize */
300 	 false,			/* pc_relative */
301 	 0,			/* bitpos */
302 	 complain_overflow_bitfield, /* complain_on_overflow */
303 	 sh_reloc,		/* special_function */
304 	 "r_align",		/* name */
305 	 true,			/* partial_inplace */
306 	 0xffffffff,		/* src_mask */
307 	 0xffffffff,		/* dst_mask */
308 	 false),		/* pcrel_offset */
309 
310   HOWTO (R_SH_CODE,		/* type */
311 	 0,			/* rightshift */
312 	 4,			/* size */
313 	 32,			/* bitsize */
314 	 false,			/* pc_relative */
315 	 0,			/* bitpos */
316 	 complain_overflow_bitfield, /* complain_on_overflow */
317 	 sh_reloc,		/* special_function */
318 	 "r_code",		/* name */
319 	 true,			/* partial_inplace */
320 	 0xffffffff,		/* src_mask */
321 	 0xffffffff,		/* dst_mask */
322 	 false),		/* pcrel_offset */
323 
324   HOWTO (R_SH_DATA,		/* type */
325 	 0,			/* rightshift */
326 	 4,			/* size */
327 	 32,			/* bitsize */
328 	 false,			/* pc_relative */
329 	 0,			/* bitpos */
330 	 complain_overflow_bitfield, /* complain_on_overflow */
331 	 sh_reloc,		/* special_function */
332 	 "r_data",		/* name */
333 	 true,			/* partial_inplace */
334 	 0xffffffff,		/* src_mask */
335 	 0xffffffff,		/* dst_mask */
336 	 false),		/* pcrel_offset */
337 
338   HOWTO (R_SH_LABEL,		/* type */
339 	 0,			/* rightshift */
340 	 4,			/* size */
341 	 32,			/* bitsize */
342 	 false,			/* pc_relative */
343 	 0,			/* bitpos */
344 	 complain_overflow_bitfield, /* complain_on_overflow */
345 	 sh_reloc,		/* special_function */
346 	 "r_label",		/* name */
347 	 true,			/* partial_inplace */
348 	 0xffffffff,		/* src_mask */
349 	 0xffffffff,		/* dst_mask */
350 	 false),		/* pcrel_offset */
351 
352   HOWTO (R_SH_SWITCH8,		/* type */
353 	 0,			/* rightshift */
354 	 1,			/* size */
355 	 8,			/* bitsize */
356 	 false,			/* pc_relative */
357 	 0,			/* bitpos */
358 	 complain_overflow_bitfield, /* complain_on_overflow */
359 	 sh_reloc,		/* special_function */
360 	 "r_switch8",		/* name */
361 	 true,			/* partial_inplace */
362 	 0xff,			/* src_mask */
363 	 0xff,			/* dst_mask */
364 	 false)			/* pcrel_offset */
365 };
366 
367 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
368 
369 /* Check for a bad magic number.  */
370 #define BADMAG(x) SHBADMAG(x)
371 
372 /* Customize coffcode.h (this is not currently used).  */
373 #define SH 1
374 
375 /* FIXME: This should not be set here.  */
376 #define __A_MAGIC_SET__
377 
378 #ifndef COFF_WITH_PE
379 /* Swap the r_offset field in and out.  */
380 #define SWAP_IN_RELOC_OFFSET  H_GET_32
381 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
382 
383 /* Swap out extra information in the reloc structure.  */
384 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst)	\
385   do						\
386     {						\
387       dst->r_stuff[0] = 'S';			\
388       dst->r_stuff[1] = 'C';			\
389     }						\
390   while (0)
391 #endif
392 
393 /* Get the value of a symbol, when performing a relocation.  */
394 
395 static long
396 get_symbol_value (asymbol *symbol)
397 {
398   bfd_vma relocation;
399 
400   if (bfd_is_com_section (symbol->section))
401     relocation = 0;
402   else
403     relocation = (symbol->value +
404 		  symbol->section->output_section->vma +
405 		  symbol->section->output_offset);
406 
407   return relocation;
408 }
409 
410 #ifdef COFF_WITH_PE
411 /* Convert an rtype to howto for the COFF backend linker.
412    Copied from coff-i386.  */
413 #define coff_rtype_to_howto coff_sh_rtype_to_howto
414 
415 
416 static reloc_howto_type *
417 coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
418 			asection * sec,
419 			struct internal_reloc * rel,
420 			struct coff_link_hash_entry * h,
421 			struct internal_syment * sym,
422 			bfd_vma * addendp)
423 {
424   reloc_howto_type * howto;
425 
426   howto = sh_coff_howtos + rel->r_type;
427 
428   *addendp = 0;
429 
430   if (howto->pc_relative)
431     *addendp += sec->vma;
432 
433   if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
434     {
435       /* This is a common symbol.  The section contents include the
436 	 size (sym->n_value) as an addend.  The relocate_section
437 	 function will be adding in the final value of the symbol.  We
438 	 need to subtract out the current size in order to get the
439 	 correct result.  */
440       BFD_ASSERT (h != NULL);
441     }
442 
443   if (howto->pc_relative)
444     {
445       *addendp -= 4;
446 
447       /* If the symbol is defined, then the generic code is going to
448 	 add back the symbol value in order to cancel out an
449 	 adjustment it made to the addend.  However, we set the addend
450 	 to 0 at the start of this function.  We need to adjust here,
451 	 to avoid the adjustment the generic code will make.  FIXME:
452 	 This is getting a bit hackish.  */
453       if (sym != NULL && sym->n_scnum != 0)
454 	*addendp -= sym->n_value;
455     }
456 
457   if (rel->r_type == R_SH_IMAGEBASE)
458     *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
459 
460   return howto;
461 }
462 
463 #endif /* COFF_WITH_PE */
464 
465 /* This structure is used to map BFD reloc codes to SH PE relocs.  */
466 struct shcoff_reloc_map
467 {
468   bfd_reloc_code_real_type bfd_reloc_val;
469   unsigned char shcoff_reloc_val;
470 };
471 
472 #ifdef COFF_WITH_PE
473 /* An array mapping BFD reloc codes to SH PE relocs.  */
474 static const struct shcoff_reloc_map sh_reloc_map[] =
475 {
476   { BFD_RELOC_32, R_SH_IMM32CE },
477   { BFD_RELOC_RVA, R_SH_IMAGEBASE },
478   { BFD_RELOC_CTOR, R_SH_IMM32CE },
479 };
480 #else
481 /* An array mapping BFD reloc codes to SH PE relocs.  */
482 static const struct shcoff_reloc_map sh_reloc_map[] =
483 {
484   { BFD_RELOC_32, R_SH_IMM32 },
485   { BFD_RELOC_CTOR, R_SH_IMM32 },
486 };
487 #endif
488 
489 /* Given a BFD reloc code, return the howto structure for the
490    corresponding SH PE reloc.  */
491 #define coff_bfd_reloc_type_lookup	sh_coff_reloc_type_lookup
492 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
493 
494 static reloc_howto_type *
495 sh_coff_reloc_type_lookup (bfd *abfd,
496 			   bfd_reloc_code_real_type code)
497 {
498   unsigned int i;
499 
500   for (i = ARRAY_SIZE (sh_reloc_map); i--;)
501     if (sh_reloc_map[i].bfd_reloc_val == code)
502       return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
503 
504   _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
505 		      abfd, (unsigned int) code);
506   return NULL;
507 }
508 
509 static reloc_howto_type *
510 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
511 			   const char *r_name)
512 {
513   unsigned int i;
514 
515   for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
516     if (sh_coff_howtos[i].name != NULL
517 	&& strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
518       return &sh_coff_howtos[i];
519 
520   return NULL;
521 }
522 
523 /* This macro is used in coffcode.h to get the howto corresponding to
524    an internal reloc.  */
525 
526 #define RTYPE2HOWTO(relent, internal)		\
527   ((relent)->howto =				\
528    ((internal)->r_type < SH_COFF_HOWTO_COUNT	\
529     ? &sh_coff_howtos[(internal)->r_type]	\
530     : (reloc_howto_type *) NULL))
531 
532 /* This is the same as the macro in coffcode.h, except that it copies
533    r_offset into reloc_entry->addend for some relocs.  */
534 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr)		\
535   {								\
536     coff_symbol_type *coffsym = (coff_symbol_type *) NULL;	\
537     if (ptr && bfd_asymbol_bfd (ptr) != abfd)			\
538       coffsym = (obj_symbols (abfd)				\
539 		 + (cache_ptr->sym_ptr_ptr - symbols));		\
540     else if (ptr)						\
541       coffsym = coff_symbol_from (ptr);				\
542     if (coffsym != (coff_symbol_type *) NULL			\
543 	&& coffsym->native->u.syment.n_scnum == 0)		\
544       cache_ptr->addend = 0;					\
545     else if (ptr && bfd_asymbol_bfd (ptr) == abfd		\
546 	     && ptr->section != (asection *) NULL)		\
547       cache_ptr->addend = - (ptr->section->vma			\
548 			     + COFF_PE_ADDEND_BIAS (ptr));	\
549     else							\
550       cache_ptr->addend = 0;					\
551     if ((reloc).r_type == R_SH_SWITCH8				\
552 	|| (reloc).r_type == R_SH_SWITCH16			\
553 	|| (reloc).r_type == R_SH_SWITCH32			\
554 	|| (reloc).r_type == R_SH_USES				\
555 	|| (reloc).r_type == R_SH_COUNT				\
556 	|| (reloc).r_type == R_SH_ALIGN)			\
557       cache_ptr->addend = (reloc).r_offset;			\
558   }
559 
560 /* This is the howto function for the SH relocations.  */
561 
562 static bfd_reloc_status_type
563 sh_reloc (bfd *      abfd,
564 	  arelent *  reloc_entry,
565 	  asymbol *  symbol_in,
566 	  void *     data,
567 	  asection * input_section,
568 	  bfd *      output_bfd,
569 	  char **    error_message ATTRIBUTE_UNUSED)
570 {
571   bfd_vma insn;
572   bfd_vma sym_value;
573   unsigned short r_type;
574   bfd_vma addr = reloc_entry->address;
575   bfd_byte *hit_data = addr + (bfd_byte *) data;
576 
577   r_type = reloc_entry->howto->type;
578 
579   if (output_bfd != NULL)
580     {
581       /* Partial linking--do nothing.  */
582       reloc_entry->address += input_section->output_offset;
583       return bfd_reloc_ok;
584     }
585 
586   /* Almost all relocs have to do with relaxing.  If any work must be
587      done for them, it has been done in sh_relax_section.  */
588   if (r_type != R_SH_IMM32
589 #ifdef COFF_WITH_PE
590       && r_type != R_SH_IMM32CE
591       && r_type != R_SH_IMAGEBASE
592 #endif
593       && (r_type != R_SH_PCDISP
594 	  || (symbol_in->flags & BSF_LOCAL) != 0))
595     return bfd_reloc_ok;
596 
597   if (symbol_in != NULL
598       && bfd_is_und_section (symbol_in->section))
599     return bfd_reloc_undefined;
600 
601   if (!bfd_reloc_offset_in_range (reloc_entry->howto, abfd, input_section,
602 				  addr))
603     return bfd_reloc_outofrange;
604 
605   sym_value = get_symbol_value (symbol_in);
606 
607   switch (r_type)
608     {
609     case R_SH_IMM32:
610 #ifdef COFF_WITH_PE
611     case R_SH_IMM32CE:
612 #endif
613       insn = bfd_get_32 (abfd, hit_data);
614       insn += sym_value + reloc_entry->addend;
615       bfd_put_32 (abfd, insn, hit_data);
616       break;
617 #ifdef COFF_WITH_PE
618     case R_SH_IMAGEBASE:
619       insn = bfd_get_32 (abfd, hit_data);
620       insn += sym_value + reloc_entry->addend;
621       insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
622       bfd_put_32 (abfd, insn, hit_data);
623       break;
624 #endif
625     case R_SH_PCDISP:
626       insn = bfd_get_16 (abfd, hit_data);
627       sym_value += reloc_entry->addend;
628       sym_value -= (input_section->output_section->vma
629 		    + input_section->output_offset
630 		    + addr
631 		    + 4);
632       sym_value += (((insn & 0xfff) ^ 0x800) - 0x800) << 1;
633       insn = (insn & 0xf000) | ((sym_value >> 1) & 0xfff);
634       bfd_put_16 (abfd, insn, hit_data);
635       if (sym_value + 0x1000 >= 0x2000 || (sym_value & 1) != 0)
636 	return bfd_reloc_overflow;
637       break;
638     default:
639       abort ();
640       break;
641     }
642 
643   return bfd_reloc_ok;
644 }
645 
646 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
647 
648 /* We can do relaxing.  */
649 #define coff_bfd_relax_section sh_relax_section
650 
651 /* We use the special COFF backend linker.  */
652 #define coff_relocate_section sh_relocate_section
653 
654 /* When relaxing, we need to use special code to get the relocated
655    section contents.  */
656 #define coff_bfd_get_relocated_section_contents \
657   sh_coff_get_relocated_section_contents
658 
659 #include "coffcode.h"
660 
661 static bool
662 sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
663 
664 /* This function handles relaxing on the SH.
665 
666    Function calls on the SH look like this:
667 
668        movl  L1,r0
669        ...
670        jsr   @r0
671        ...
672      L1:
673        .long function
674 
675    The compiler and assembler will cooperate to create R_SH_USES
676    relocs on the jsr instructions.  The r_offset field of the
677    R_SH_USES reloc is the PC relative offset to the instruction which
678    loads the register (the r_offset field is computed as though it
679    were a jump instruction, so the offset value is actually from four
680    bytes past the instruction).  The linker can use this reloc to
681    determine just which function is being called, and thus decide
682    whether it is possible to replace the jsr with a bsr.
683 
684    If multiple function calls are all based on a single register load
685    (i.e., the same function is called multiple times), the compiler
686    guarantees that each function call will have an R_SH_USES reloc.
687    Therefore, if the linker is able to convert each R_SH_USES reloc
688    which refers to that address, it can safely eliminate the register
689    load.
690 
691    When the assembler creates an R_SH_USES reloc, it examines it to
692    determine which address is being loaded (L1 in the above example).
693    It then counts the number of references to that address, and
694    creates an R_SH_COUNT reloc at that address.  The r_offset field of
695    the R_SH_COUNT reloc will be the number of references.  If the
696    linker is able to eliminate a register load, it can use the
697    R_SH_COUNT reloc to see whether it can also eliminate the function
698    address.
699 
700    SH relaxing also handles another, unrelated, matter.  On the SH, if
701    a load or store instruction is not aligned on a four byte boundary,
702    the memory cycle interferes with the 32 bit instruction fetch,
703    causing a one cycle bubble in the pipeline.  Therefore, we try to
704    align load and store instructions on four byte boundaries if we
705    can, by swapping them with one of the adjacent instructions.  */
706 
707 static bool
708 sh_relax_section (bfd *abfd,
709 		  asection *sec,
710 		  struct bfd_link_info *link_info,
711 		  bool *again)
712 {
713   struct internal_reloc *internal_relocs;
714   bool have_code;
715   struct internal_reloc *irel, *irelend;
716   bfd_byte *contents = NULL;
717 
718   *again = false;
719 
720   if (bfd_link_relocatable (link_info)
721       || (sec->flags & SEC_HAS_CONTENTS) == 0
722       || (sec->flags & SEC_RELOC) == 0
723       || sec->reloc_count == 0)
724     return true;
725 
726   if (coff_section_data (abfd, sec) == NULL)
727     {
728       size_t amt = sizeof (struct coff_section_tdata);
729       sec->used_by_bfd = bfd_zalloc (abfd, amt);
730       if (sec->used_by_bfd == NULL)
731 	return false;
732     }
733 
734   internal_relocs = (_bfd_coff_read_internal_relocs
735 		     (abfd, sec, link_info->keep_memory,
736 		      (bfd_byte *) NULL, false,
737 		      (struct internal_reloc *) NULL));
738   if (internal_relocs == NULL)
739     goto error_return;
740 
741   have_code = false;
742 
743   irelend = internal_relocs + sec->reloc_count;
744   for (irel = internal_relocs; irel < irelend; irel++)
745     {
746       bfd_vma laddr, paddr, symval;
747       unsigned short insn;
748       struct internal_reloc *irelfn, *irelscan, *irelcount;
749       struct internal_syment sym;
750       bfd_signed_vma foff;
751 
752       if (irel->r_type == R_SH_CODE)
753 	have_code = true;
754 
755       if (irel->r_type != R_SH_USES)
756 	continue;
757 
758       /* Get the section contents.  */
759       if (contents == NULL)
760 	{
761 	  if (coff_section_data (abfd, sec)->contents != NULL)
762 	    contents = coff_section_data (abfd, sec)->contents;
763 	  else
764 	    {
765 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
766 		goto error_return;
767 	    }
768 	}
769 
770       /* The r_offset field of the R_SH_USES reloc will point us to
771 	 the register load.  The 4 is because the r_offset field is
772 	 computed as though it were a jump offset, which are based
773 	 from 4 bytes after the jump instruction.  */
774       laddr = irel->r_vaddr - sec->vma + 4;
775       /* Careful to sign extend the 32-bit offset.  */
776       laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
777       if (laddr >= sec->size)
778 	{
779 	  /* xgettext: c-format */
780 	  _bfd_error_handler
781 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES offset"),
782 	     abfd, (uint64_t) irel->r_vaddr);
783 	  continue;
784 	}
785       insn = bfd_get_16 (abfd, contents + laddr);
786 
787       /* If the instruction is not mov.l NN,rN, we don't know what to do.  */
788       if ((insn & 0xf000) != 0xd000)
789 	{
790 	  _bfd_error_handler
791 	    /* xgettext: c-format */
792 	    (_("%pB: %#" PRIx64 ": warning: R_SH_USES points to unrecognized insn %#x"),
793 	     abfd, (uint64_t) irel->r_vaddr, insn);
794 	  continue;
795 	}
796 
797       /* Get the address from which the register is being loaded.  The
798 	 displacement in the mov.l instruction is quadrupled.  It is a
799 	 displacement from four bytes after the movl instruction, but,
800 	 before adding in the PC address, two least significant bits
801 	 of the PC are cleared.  We assume that the section is aligned
802 	 on a four byte boundary.  */
803       paddr = insn & 0xff;
804       paddr *= 4;
805       paddr += (laddr + 4) &~ (bfd_vma) 3;
806       if (paddr >= sec->size)
807 	{
808 	  _bfd_error_handler
809 	    /* xgettext: c-format */
810 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES load offset"),
811 	     abfd, (uint64_t) irel->r_vaddr);
812 	  continue;
813 	}
814 
815       /* Get the reloc for the address from which the register is
816 	 being loaded.  This reloc will tell us which function is
817 	 actually being called.  */
818       paddr += sec->vma;
819       for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
820 	if (irelfn->r_vaddr == paddr
821 #ifdef COFF_WITH_PE
822 	    && (irelfn->r_type == R_SH_IMM32
823 		|| irelfn->r_type == R_SH_IMM32CE
824 		|| irelfn->r_type == R_SH_IMAGEBASE)
825 
826 #else
827 	    && irelfn->r_type == R_SH_IMM32
828 #endif
829 	    )
830 	  break;
831       if (irelfn >= irelend)
832 	{
833 	  _bfd_error_handler
834 	    /* xgettext: c-format */
835 	    (_("%pB: %#" PRIx64 ": warning: could not find expected reloc"),
836 	     abfd, (uint64_t) paddr);
837 	  continue;
838 	}
839 
840       /* Get the value of the symbol referred to by the reloc.  */
841       if (! _bfd_coff_get_external_symbols (abfd))
842 	goto error_return;
843       bfd_coff_swap_sym_in (abfd,
844 			    ((bfd_byte *) obj_coff_external_syms (abfd)
845 			     + (irelfn->r_symndx
846 				* bfd_coff_symesz (abfd))),
847 			    &sym);
848       if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
849 	{
850 	  _bfd_error_handler
851 	    /* xgettext: c-format */
852 	    (_("%pB: %#" PRIx64 ": warning: symbol in unexpected section"),
853 	     abfd, (uint64_t) paddr);
854 	  continue;
855 	}
856 
857       if (sym.n_sclass != C_EXT)
858 	{
859 	  symval = (sym.n_value
860 		    - sec->vma
861 		    + sec->output_section->vma
862 		    + sec->output_offset);
863 	}
864       else
865 	{
866 	  struct coff_link_hash_entry *h;
867 
868 	  h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
869 	  BFD_ASSERT (h != NULL);
870 	  if (h->root.type != bfd_link_hash_defined
871 	      && h->root.type != bfd_link_hash_defweak)
872 	    {
873 	      /* This appears to be a reference to an undefined
874 		 symbol.  Just ignore it--it will be caught by the
875 		 regular reloc processing.  */
876 	      continue;
877 	    }
878 
879 	  symval = (h->root.u.def.value
880 		    + h->root.u.def.section->output_section->vma
881 		    + h->root.u.def.section->output_offset);
882 	}
883 
884       symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
885 
886       /* See if this function call can be shortened.  */
887       foff = (symval
888 	      - (irel->r_vaddr
889 		 - sec->vma
890 		 + sec->output_section->vma
891 		 + sec->output_offset
892 		 + 4));
893       if (foff < -0x1000 || foff >= 0x1000)
894 	{
895 	  /* After all that work, we can't shorten this function call.  */
896 	  continue;
897 	}
898 
899       /* Shorten the function call.  */
900 
901       /* For simplicity of coding, we are going to modify the section
902 	 contents, the section relocs, and the BFD symbol table.  We
903 	 must tell the rest of the code not to free up this
904 	 information.  It would be possible to instead create a table
905 	 of changes which have to be made, as is done in coff-mips.c;
906 	 that would be more work, but would require less memory when
907 	 the linker is run.  */
908 
909       coff_section_data (abfd, sec)->relocs = internal_relocs;
910       coff_section_data (abfd, sec)->contents = contents;
911 
912       /* Replace the jsr with a bsr.  */
913 
914       /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
915 	 replace the jsr with a bsr.  */
916       irel->r_type = R_SH_PCDISP;
917       irel->r_symndx = irelfn->r_symndx;
918       if (sym.n_sclass != C_EXT)
919 	{
920 	  /* If this needs to be changed because of future relaxing,
921 	     it will be handled here like other internal PCDISP
922 	     relocs.  */
923 	  bfd_put_16 (abfd,
924 		      (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
925 		      contents + irel->r_vaddr - sec->vma);
926 	}
927       else
928 	{
929 	  /* We can't fully resolve this yet, because the external
930 	     symbol value may be changed by future relaxing.  We let
931 	     the final link phase handle it.  */
932 	  bfd_put_16 (abfd, (bfd_vma) 0xb000,
933 		      contents + irel->r_vaddr - sec->vma);
934 	}
935 
936       /* See if there is another R_SH_USES reloc referring to the same
937 	 register load.  */
938       for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
939 	if (irelscan->r_type == R_SH_USES
940 	    && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
941 	  break;
942       if (irelscan < irelend)
943 	{
944 	  /* Some other function call depends upon this register load,
945 	     and we have not yet converted that function call.
946 	     Indeed, we may never be able to convert it.  There is
947 	     nothing else we can do at this point.  */
948 	  continue;
949 	}
950 
951       /* Look for a R_SH_COUNT reloc on the location where the
952 	 function address is stored.  Do this before deleting any
953 	 bytes, to avoid confusion about the address.  */
954       for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
955 	if (irelcount->r_vaddr == paddr
956 	    && irelcount->r_type == R_SH_COUNT)
957 	  break;
958 
959       /* Delete the register load.  */
960       if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
961 	goto error_return;
962 
963       /* That will change things, so, just in case it permits some
964 	 other function call to come within range, we should relax
965 	 again.  Note that this is not required, and it may be slow.  */
966       *again = true;
967 
968       /* Now check whether we got a COUNT reloc.  */
969       if (irelcount >= irelend)
970 	{
971 	  _bfd_error_handler
972 	    /* xgettext: c-format */
973 	    (_("%pB: %#" PRIx64 ": warning: could not find expected COUNT reloc"),
974 	     abfd, (uint64_t) paddr);
975 	  continue;
976 	}
977 
978       /* The number of uses is stored in the r_offset field.  We've
979 	 just deleted one.  */
980       if (irelcount->r_offset == 0)
981 	{
982 	  /* xgettext: c-format */
983 	  _bfd_error_handler (_("%pB: %#" PRIx64 ": warning: bad count"),
984 			      abfd, (uint64_t) paddr);
985 	  continue;
986 	}
987 
988       --irelcount->r_offset;
989 
990       /* If there are no more uses, we can delete the address.  Reload
991 	 the address from irelfn, in case it was changed by the
992 	 previous call to sh_relax_delete_bytes.  */
993       if (irelcount->r_offset == 0)
994 	{
995 	  if (! sh_relax_delete_bytes (abfd, sec,
996 				       irelfn->r_vaddr - sec->vma, 4))
997 	    goto error_return;
998 	}
999 
1000       /* We've done all we can with that function call.  */
1001     }
1002 
1003   /* Look for load and store instructions that we can align on four
1004      byte boundaries.  */
1005   if (have_code)
1006     {
1007       bool swapped;
1008 
1009       /* Get the section contents.  */
1010       if (contents == NULL)
1011 	{
1012 	  if (coff_section_data (abfd, sec)->contents != NULL)
1013 	    contents = coff_section_data (abfd, sec)->contents;
1014 	  else
1015 	    {
1016 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1017 		goto error_return;
1018 	    }
1019 	}
1020 
1021       if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1022 	goto error_return;
1023 
1024       if (swapped)
1025 	{
1026 	  coff_section_data (abfd, sec)->relocs = internal_relocs;
1027 	  coff_section_data (abfd, sec)->contents = contents;
1028 	}
1029     }
1030 
1031   if (internal_relocs != NULL
1032       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1033     {
1034       if (! link_info->keep_memory)
1035 	free (internal_relocs);
1036       else
1037 	coff_section_data (abfd, sec)->relocs = internal_relocs;
1038     }
1039 
1040   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1041     {
1042       if (! link_info->keep_memory)
1043 	free (contents);
1044       else
1045 	/* Cache the section contents for coff_link_input_bfd.  */
1046 	coff_section_data (abfd, sec)->contents = contents;
1047     }
1048 
1049   return true;
1050 
1051  error_return:
1052   if (internal_relocs != coff_section_data (abfd, sec)->relocs)
1053     free (internal_relocs);
1054   if (contents != coff_section_data (abfd, sec)->contents)
1055     free (contents);
1056   return false;
1057 }
1058 
1059 /* Delete some bytes from a section while relaxing.  */
1060 
1061 static bool
1062 sh_relax_delete_bytes (bfd *abfd,
1063 		       asection *sec,
1064 		       bfd_vma addr,
1065 		       int count)
1066 {
1067   bfd_byte *contents;
1068   struct internal_reloc *irel, *irelend;
1069   struct internal_reloc *irelalign;
1070   bfd_vma toaddr;
1071   bfd_byte *esym, *esymend;
1072   bfd_size_type symesz;
1073   struct coff_link_hash_entry **sym_hash;
1074   asection *o;
1075 
1076   contents = coff_section_data (abfd, sec)->contents;
1077 
1078   /* The deletion must stop at the next ALIGN reloc for an alignment
1079      power larger than the number of bytes we are deleting.  */
1080 
1081   irelalign = NULL;
1082   toaddr = sec->size;
1083 
1084   irel = coff_section_data (abfd, sec)->relocs;
1085   irelend = irel + sec->reloc_count;
1086   for (; irel < irelend; irel++)
1087     {
1088       if (irel->r_type == R_SH_ALIGN
1089 	  && irel->r_vaddr - sec->vma > addr
1090 	  && count < (1 << irel->r_offset))
1091 	{
1092 	  irelalign = irel;
1093 	  toaddr = irel->r_vaddr - sec->vma;
1094 	  break;
1095 	}
1096     }
1097 
1098   /* Actually delete the bytes.  */
1099   memmove (contents + addr, contents + addr + count,
1100 	   (size_t) (toaddr - addr - count));
1101   if (irelalign == NULL)
1102     sec->size -= count;
1103   else
1104     {
1105       int i;
1106 
1107 #define NOP_OPCODE (0x0009)
1108 
1109       BFD_ASSERT ((count & 1) == 0);
1110       for (i = 0; i < count; i += 2)
1111 	bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1112     }
1113 
1114   /* Adjust all the relocs.  */
1115   for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1116     {
1117       bfd_vma nraddr, stop;
1118       bfd_vma start = 0;
1119       int insn = 0;
1120       struct internal_syment sym;
1121       int off, adjust, oinsn;
1122       bfd_signed_vma voff = 0;
1123       bool overflow;
1124 
1125       /* Get the new reloc address.  */
1126       nraddr = irel->r_vaddr - sec->vma;
1127       if ((irel->r_vaddr - sec->vma > addr
1128 	   && irel->r_vaddr - sec->vma < toaddr)
1129 	  || (irel->r_type == R_SH_ALIGN
1130 	      && irel->r_vaddr - sec->vma == toaddr))
1131 	nraddr -= count;
1132 
1133       /* See if this reloc was for the bytes we have deleted, in which
1134 	 case we no longer care about it.  Don't delete relocs which
1135 	 represent addresses, though.  */
1136       if (irel->r_vaddr - sec->vma >= addr
1137 	  && irel->r_vaddr - sec->vma < addr + count
1138 	  && irel->r_type != R_SH_ALIGN
1139 	  && irel->r_type != R_SH_CODE
1140 	  && irel->r_type != R_SH_DATA
1141 	  && irel->r_type != R_SH_LABEL)
1142 	irel->r_type = R_SH_UNUSED;
1143 
1144       /* If this is a PC relative reloc, see if the range it covers
1145 	 includes the bytes we have deleted.  */
1146       switch (irel->r_type)
1147 	{
1148 	default:
1149 	  break;
1150 
1151 	case R_SH_PCDISP8BY2:
1152 	case R_SH_PCDISP:
1153 	case R_SH_PCRELIMM8BY2:
1154 	case R_SH_PCRELIMM8BY4:
1155 	  start = irel->r_vaddr - sec->vma;
1156 	  insn = bfd_get_16 (abfd, contents + nraddr);
1157 	  break;
1158 	}
1159 
1160       switch (irel->r_type)
1161 	{
1162 	default:
1163 	  start = stop = addr;
1164 	  break;
1165 
1166 	case R_SH_IMM32:
1167 #ifdef COFF_WITH_PE
1168 	case R_SH_IMM32CE:
1169 	case R_SH_IMAGEBASE:
1170 #endif
1171 	  /* If this reloc is against a symbol defined in this
1172 	     section, and the symbol will not be adjusted below, we
1173 	     must check the addend to see it will put the value in
1174 	     range to be adjusted, and hence must be changed.  */
1175 	  bfd_coff_swap_sym_in (abfd,
1176 				((bfd_byte *) obj_coff_external_syms (abfd)
1177 				 + (irel->r_symndx
1178 				    * bfd_coff_symesz (abfd))),
1179 				&sym);
1180 	  if (sym.n_sclass != C_EXT
1181 	      && sym.n_scnum == sec->target_index
1182 	      && ((bfd_vma) sym.n_value <= addr
1183 		  || (bfd_vma) sym.n_value >= toaddr))
1184 	    {
1185 	      bfd_vma val;
1186 
1187 	      val = bfd_get_32 (abfd, contents + nraddr);
1188 	      val += sym.n_value;
1189 	      if (val > addr && val < toaddr)
1190 		bfd_put_32 (abfd, val - count, contents + nraddr);
1191 	    }
1192 	  start = stop = addr;
1193 	  break;
1194 
1195 	case R_SH_PCDISP8BY2:
1196 	  off = insn & 0xff;
1197 	  if (off & 0x80)
1198 	    off -= 0x100;
1199 	  stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1200 	  break;
1201 
1202 	case R_SH_PCDISP:
1203 	  bfd_coff_swap_sym_in (abfd,
1204 				((bfd_byte *) obj_coff_external_syms (abfd)
1205 				 + (irel->r_symndx
1206 				    * bfd_coff_symesz (abfd))),
1207 				&sym);
1208 	  if (sym.n_sclass == C_EXT)
1209 	    start = stop = addr;
1210 	  else
1211 	    {
1212 	      off = insn & 0xfff;
1213 	      if (off & 0x800)
1214 		off -= 0x1000;
1215 	      stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1216 	    }
1217 	  break;
1218 
1219 	case R_SH_PCRELIMM8BY2:
1220 	  off = insn & 0xff;
1221 	  stop = start + 4 + off * 2;
1222 	  break;
1223 
1224 	case R_SH_PCRELIMM8BY4:
1225 	  off = insn & 0xff;
1226 	  stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1227 	  break;
1228 
1229 	case R_SH_SWITCH8:
1230 	case R_SH_SWITCH16:
1231 	case R_SH_SWITCH32:
1232 	  /* These relocs types represent
1233 	       .word L2-L1
1234 	     The r_offset field holds the difference between the reloc
1235 	     address and L1.  That is the start of the reloc, and
1236 	     adding in the contents gives us the top.  We must adjust
1237 	     both the r_offset field and the section contents.  */
1238 
1239 	  start = irel->r_vaddr - sec->vma;
1240 	  stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1241 
1242 	  if (start > addr
1243 	      && start < toaddr
1244 	      && (stop <= addr || stop >= toaddr))
1245 	    irel->r_offset += count;
1246 	  else if (stop > addr
1247 		   && stop < toaddr
1248 		   && (start <= addr || start >= toaddr))
1249 	    irel->r_offset -= count;
1250 
1251 	  start = stop;
1252 
1253 	  if (irel->r_type == R_SH_SWITCH16)
1254 	    voff = bfd_get_signed_16 (abfd, contents + nraddr);
1255 	  else if (irel->r_type == R_SH_SWITCH8)
1256 	    voff = bfd_get_8 (abfd, contents + nraddr);
1257 	  else
1258 	    voff = bfd_get_signed_32 (abfd, contents + nraddr);
1259 	  stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1260 
1261 	  break;
1262 
1263 	case R_SH_USES:
1264 	  start = irel->r_vaddr - sec->vma;
1265 	  stop = (bfd_vma) ((bfd_signed_vma) start
1266 			    + (long) irel->r_offset
1267 			    + 4);
1268 	  break;
1269 	}
1270 
1271       if (start > addr
1272 	  && start < toaddr
1273 	  && (stop <= addr || stop >= toaddr))
1274 	adjust = count;
1275       else if (stop > addr
1276 	       && stop < toaddr
1277 	       && (start <= addr || start >= toaddr))
1278 	adjust = - count;
1279       else
1280 	adjust = 0;
1281 
1282       if (adjust != 0)
1283 	{
1284 	  oinsn = insn;
1285 	  overflow = false;
1286 	  switch (irel->r_type)
1287 	    {
1288 	    default:
1289 	      abort ();
1290 	      break;
1291 
1292 	    case R_SH_PCDISP8BY2:
1293 	    case R_SH_PCRELIMM8BY2:
1294 	      insn += adjust / 2;
1295 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1296 		overflow = true;
1297 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1298 	      break;
1299 
1300 	    case R_SH_PCDISP:
1301 	      insn += adjust / 2;
1302 	      if ((oinsn & 0xf000) != (insn & 0xf000))
1303 		overflow = true;
1304 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1305 	      break;
1306 
1307 	    case R_SH_PCRELIMM8BY4:
1308 	      BFD_ASSERT (adjust == count || count >= 4);
1309 	      if (count >= 4)
1310 		insn += adjust / 4;
1311 	      else
1312 		{
1313 		  if ((irel->r_vaddr & 3) == 0)
1314 		    ++insn;
1315 		}
1316 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1317 		overflow = true;
1318 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1319 	      break;
1320 
1321 	    case R_SH_SWITCH8:
1322 	      voff += adjust;
1323 	      if (voff < 0 || voff >= 0xff)
1324 		overflow = true;
1325 	      bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1326 	      break;
1327 
1328 	    case R_SH_SWITCH16:
1329 	      voff += adjust;
1330 	      if (voff < - 0x8000 || voff >= 0x8000)
1331 		overflow = true;
1332 	      bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1333 	      break;
1334 
1335 	    case R_SH_SWITCH32:
1336 	      voff += adjust;
1337 	      bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1338 	      break;
1339 
1340 	    case R_SH_USES:
1341 	      irel->r_offset += adjust;
1342 	      break;
1343 	    }
1344 
1345 	  if (overflow)
1346 	    {
1347 	      _bfd_error_handler
1348 		/* xgettext: c-format */
1349 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
1350 		 abfd, (uint64_t) irel->r_vaddr);
1351 	      bfd_set_error (bfd_error_bad_value);
1352 	      return false;
1353 	    }
1354 	}
1355 
1356       irel->r_vaddr = nraddr + sec->vma;
1357     }
1358 
1359   /* Look through all the other sections.  If there contain any IMM32
1360      relocs against internal symbols which we are not going to adjust
1361      below, we may need to adjust the addends.  */
1362   for (o = abfd->sections; o != NULL; o = o->next)
1363     {
1364       struct internal_reloc *internal_relocs;
1365       struct internal_reloc *irelscan, *irelscanend;
1366       bfd_byte *ocontents;
1367 
1368       if (o == sec
1369 	  || (o->flags & SEC_HAS_CONTENTS) == 0
1370 	  || (o->flags & SEC_RELOC) == 0
1371 	  || o->reloc_count == 0)
1372 	continue;
1373 
1374       /* We always cache the relocs.  Perhaps, if info->keep_memory is
1375 	 FALSE, we should free them, if we are permitted to, when we
1376 	 leave sh_coff_relax_section.  */
1377       internal_relocs = (_bfd_coff_read_internal_relocs
1378 			 (abfd, o, true, (bfd_byte *) NULL, false,
1379 			  (struct internal_reloc *) NULL));
1380       if (internal_relocs == NULL)
1381 	return false;
1382 
1383       ocontents = NULL;
1384       irelscanend = internal_relocs + o->reloc_count;
1385       for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1386 	{
1387 	  struct internal_syment sym;
1388 
1389 #ifdef COFF_WITH_PE
1390 	  if (irelscan->r_type != R_SH_IMM32
1391 	      && irelscan->r_type != R_SH_IMAGEBASE
1392 	      && irelscan->r_type != R_SH_IMM32CE)
1393 #else
1394 	  if (irelscan->r_type != R_SH_IMM32)
1395 #endif
1396 	    continue;
1397 
1398 	  bfd_coff_swap_sym_in (abfd,
1399 				((bfd_byte *) obj_coff_external_syms (abfd)
1400 				 + (irelscan->r_symndx
1401 				    * bfd_coff_symesz (abfd))),
1402 				&sym);
1403 	  if (sym.n_sclass != C_EXT
1404 	      && sym.n_scnum == sec->target_index
1405 	      && ((bfd_vma) sym.n_value <= addr
1406 		  || (bfd_vma) sym.n_value >= toaddr))
1407 	    {
1408 	      bfd_vma val;
1409 
1410 	      if (ocontents == NULL)
1411 		{
1412 		  if (coff_section_data (abfd, o)->contents != NULL)
1413 		    ocontents = coff_section_data (abfd, o)->contents;
1414 		  else
1415 		    {
1416 		      if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1417 			return false;
1418 		      /* We always cache the section contents.
1419 			 Perhaps, if info->keep_memory is FALSE, we
1420 			 should free them, if we are permitted to,
1421 			 when we leave sh_coff_relax_section.  */
1422 		      coff_section_data (abfd, o)->contents = ocontents;
1423 		    }
1424 		}
1425 
1426 	      val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1427 	      val += sym.n_value;
1428 	      if (val > addr && val < toaddr)
1429 		bfd_put_32 (abfd, val - count,
1430 			    ocontents + irelscan->r_vaddr - o->vma);
1431 	    }
1432 	}
1433     }
1434 
1435   /* Adjusting the internal symbols will not work if something has
1436      already retrieved the generic symbols.  It would be possible to
1437      make this work by adjusting the generic symbols at the same time.
1438      However, this case should not arise in normal usage.  */
1439   if (obj_symbols (abfd) != NULL
1440       || obj_raw_syments (abfd) != NULL)
1441     {
1442       _bfd_error_handler
1443 	(_("%pB: fatal: generic symbols retrieved before relaxing"), abfd);
1444       bfd_set_error (bfd_error_invalid_operation);
1445       return false;
1446     }
1447 
1448   /* Adjust all the symbols.  */
1449   sym_hash = obj_coff_sym_hashes (abfd);
1450   symesz = bfd_coff_symesz (abfd);
1451   esym = (bfd_byte *) obj_coff_external_syms (abfd);
1452   esymend = esym + obj_raw_syment_count (abfd) * symesz;
1453   while (esym < esymend)
1454     {
1455       struct internal_syment isym;
1456 
1457       bfd_coff_swap_sym_in (abfd, esym, &isym);
1458 
1459       if (isym.n_scnum == sec->target_index
1460 	  && (bfd_vma) isym.n_value > addr
1461 	  && (bfd_vma) isym.n_value < toaddr)
1462 	{
1463 	  isym.n_value -= count;
1464 
1465 	  bfd_coff_swap_sym_out (abfd, &isym, esym);
1466 
1467 	  if (*sym_hash != NULL)
1468 	    {
1469 	      BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1470 			  || (*sym_hash)->root.type == bfd_link_hash_defweak);
1471 	      BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1472 			  && (*sym_hash)->root.u.def.value < toaddr);
1473 	      (*sym_hash)->root.u.def.value -= count;
1474 	    }
1475 	}
1476 
1477       esym += (isym.n_numaux + 1) * symesz;
1478       sym_hash += isym.n_numaux + 1;
1479     }
1480 
1481   /* See if we can move the ALIGN reloc forward.  We have adjusted
1482      r_vaddr for it already.  */
1483   if (irelalign != NULL)
1484     {
1485       bfd_vma alignto, alignaddr;
1486 
1487       alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1488       alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1489 			     1 << irelalign->r_offset);
1490       if (alignto != alignaddr)
1491 	{
1492 	  /* Tail recursion.  */
1493 	  return sh_relax_delete_bytes (abfd, sec, alignaddr,
1494 					(int) (alignto - alignaddr));
1495 	}
1496     }
1497 
1498   return true;
1499 }
1500 
1501 /* This is yet another version of the SH opcode table, used to rapidly
1502    get information about a particular instruction.  */
1503 
1504 /* The opcode map is represented by an array of these structures.  The
1505    array is indexed by the high order four bits in the instruction.  */
1506 
1507 struct sh_major_opcode
1508 {
1509   /* A pointer to the instruction list.  This is an array which
1510      contains all the instructions with this major opcode.  */
1511   const struct sh_minor_opcode *minor_opcodes;
1512   /* The number of elements in minor_opcodes.  */
1513   unsigned short count;
1514 };
1515 
1516 /* This structure holds information for a set of SH opcodes.  The
1517    instruction code is anded with the mask value, and the resulting
1518    value is used to search the order opcode list.  */
1519 
1520 struct sh_minor_opcode
1521 {
1522   /* The sorted opcode list.  */
1523   const struct sh_opcode *opcodes;
1524   /* The number of elements in opcodes.  */
1525   unsigned short count;
1526   /* The mask value to use when searching the opcode list.  */
1527   unsigned short mask;
1528 };
1529 
1530 /* This structure holds information for an SH instruction.  An array
1531    of these structures is sorted in order by opcode.  */
1532 
1533 struct sh_opcode
1534 {
1535   /* The code for this instruction, after it has been anded with the
1536      mask value in the sh_major_opcode structure.  */
1537   unsigned short opcode;
1538   /* Flags for this instruction.  */
1539   unsigned long flags;
1540 };
1541 
1542 /* Flag which appear in the sh_opcode structure.  */
1543 
1544 /* This instruction loads a value from memory.  */
1545 #define LOAD (0x1)
1546 
1547 /* This instruction stores a value to memory.  */
1548 #define STORE (0x2)
1549 
1550 /* This instruction is a branch.  */
1551 #define BRANCH (0x4)
1552 
1553 /* This instruction has a delay slot.  */
1554 #define DELAY (0x8)
1555 
1556 /* This instruction uses the value in the register in the field at
1557    mask 0x0f00 of the instruction.  */
1558 #define USES1 (0x10)
1559 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1560 
1561 /* This instruction uses the value in the register in the field at
1562    mask 0x00f0 of the instruction.  */
1563 #define USES2 (0x20)
1564 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1565 
1566 /* This instruction uses the value in register 0.  */
1567 #define USESR0 (0x40)
1568 
1569 /* This instruction sets the value in the register in the field at
1570    mask 0x0f00 of the instruction.  */
1571 #define SETS1 (0x80)
1572 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1573 
1574 /* This instruction sets the value in the register in the field at
1575    mask 0x00f0 of the instruction.  */
1576 #define SETS2 (0x100)
1577 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1578 
1579 /* This instruction sets register 0.  */
1580 #define SETSR0 (0x200)
1581 
1582 /* This instruction sets a special register.  */
1583 #define SETSSP (0x400)
1584 
1585 /* This instruction uses a special register.  */
1586 #define USESSP (0x800)
1587 
1588 /* This instruction uses the floating point register in the field at
1589    mask 0x0f00 of the instruction.  */
1590 #define USESF1 (0x1000)
1591 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1592 
1593 /* This instruction uses the floating point register in the field at
1594    mask 0x00f0 of the instruction.  */
1595 #define USESF2 (0x2000)
1596 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1597 
1598 /* This instruction uses floating point register 0.  */
1599 #define USESF0 (0x4000)
1600 
1601 /* This instruction sets the floating point register in the field at
1602    mask 0x0f00 of the instruction.  */
1603 #define SETSF1 (0x8000)
1604 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1605 
1606 #define USESAS (0x10000)
1607 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1608 #define USESR8 (0x20000)
1609 #define SETSAS (0x40000)
1610 #define SETSAS_REG(x) USESAS_REG (x)
1611 
1612 #define MAP(a) a, sizeof a / sizeof a[0]
1613 
1614 #ifndef COFF_IMAGE_WITH_PE
1615 
1616 /* The opcode maps.  */
1617 
1618 static const struct sh_opcode sh_opcode00[] =
1619 {
1620   { 0x0008, SETSSP },			/* clrt */
1621   { 0x0009, 0 },			/* nop */
1622   { 0x000b, BRANCH | DELAY | USESSP },	/* rts */
1623   { 0x0018, SETSSP },			/* sett */
1624   { 0x0019, SETSSP },			/* div0u */
1625   { 0x001b, 0 },			/* sleep */
1626   { 0x0028, SETSSP },			/* clrmac */
1627   { 0x002b, BRANCH | DELAY | SETSSP },	/* rte */
1628   { 0x0038, USESSP | SETSSP },		/* ldtlb */
1629   { 0x0048, SETSSP },			/* clrs */
1630   { 0x0058, SETSSP }			/* sets */
1631 };
1632 
1633 static const struct sh_opcode sh_opcode01[] =
1634 {
1635   { 0x0003, BRANCH | DELAY | USES1 | SETSSP },	/* bsrf rn */
1636   { 0x000a, SETS1 | USESSP },			/* sts mach,rn */
1637   { 0x001a, SETS1 | USESSP },			/* sts macl,rn */
1638   { 0x0023, BRANCH | DELAY | USES1 },		/* braf rn */
1639   { 0x0029, SETS1 | USESSP },			/* movt rn */
1640   { 0x002a, SETS1 | USESSP },			/* sts pr,rn */
1641   { 0x005a, SETS1 | USESSP },			/* sts fpul,rn */
1642   { 0x006a, SETS1 | USESSP },			/* sts fpscr,rn / sts dsr,rn */
1643   { 0x0083, LOAD | USES1 },			/* pref @rn */
1644   { 0x007a, SETS1 | USESSP },			/* sts a0,rn */
1645   { 0x008a, SETS1 | USESSP },			/* sts x0,rn */
1646   { 0x009a, SETS1 | USESSP },			/* sts x1,rn */
1647   { 0x00aa, SETS1 | USESSP },			/* sts y0,rn */
1648   { 0x00ba, SETS1 | USESSP }			/* sts y1,rn */
1649 };
1650 
1651 static const struct sh_opcode sh_opcode02[] =
1652 {
1653   { 0x0002, SETS1 | USESSP },			/* stc <special_reg>,rn */
1654   { 0x0004, STORE | USES1 | USES2 | USESR0 },	/* mov.b rm,@(r0,rn) */
1655   { 0x0005, STORE | USES1 | USES2 | USESR0 },	/* mov.w rm,@(r0,rn) */
1656   { 0x0006, STORE | USES1 | USES2 | USESR0 },	/* mov.l rm,@(r0,rn) */
1657   { 0x0007, SETSSP | USES1 | USES2 },		/* mul.l rm,rn */
1658   { 0x000c, LOAD | SETS1 | USES2 | USESR0 },	/* mov.b @(r0,rm),rn */
1659   { 0x000d, LOAD | SETS1 | USES2 | USESR0 },	/* mov.w @(r0,rm),rn */
1660   { 0x000e, LOAD | SETS1 | USES2 | USESR0 },	/* mov.l @(r0,rm),rn */
1661   { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1662 };
1663 
1664 static const struct sh_minor_opcode sh_opcode0[] =
1665 {
1666   { MAP (sh_opcode00), 0xffff },
1667   { MAP (sh_opcode01), 0xf0ff },
1668   { MAP (sh_opcode02), 0xf00f }
1669 };
1670 
1671 static const struct sh_opcode sh_opcode10[] =
1672 {
1673   { 0x1000, STORE | USES1 | USES2 }	/* mov.l rm,@(disp,rn) */
1674 };
1675 
1676 static const struct sh_minor_opcode sh_opcode1[] =
1677 {
1678   { MAP (sh_opcode10), 0xf000 }
1679 };
1680 
1681 static const struct sh_opcode sh_opcode20[] =
1682 {
1683   { 0x2000, STORE | USES1 | USES2 },		/* mov.b rm,@rn */
1684   { 0x2001, STORE | USES1 | USES2 },		/* mov.w rm,@rn */
1685   { 0x2002, STORE | USES1 | USES2 },		/* mov.l rm,@rn */
1686   { 0x2004, STORE | SETS1 | USES1 | USES2 },	/* mov.b rm,@-rn */
1687   { 0x2005, STORE | SETS1 | USES1 | USES2 },	/* mov.w rm,@-rn */
1688   { 0x2006, STORE | SETS1 | USES1 | USES2 },	/* mov.l rm,@-rn */
1689   { 0x2007, SETSSP | USES1 | USES2 | USESSP },	/* div0s */
1690   { 0x2008, SETSSP | USES1 | USES2 },		/* tst rm,rn */
1691   { 0x2009, SETS1 | USES1 | USES2 },		/* and rm,rn */
1692   { 0x200a, SETS1 | USES1 | USES2 },		/* xor rm,rn */
1693   { 0x200b, SETS1 | USES1 | USES2 },		/* or rm,rn */
1694   { 0x200c, SETSSP | USES1 | USES2 },		/* cmp/str rm,rn */
1695   { 0x200d, SETS1 | USES1 | USES2 },		/* xtrct rm,rn */
1696   { 0x200e, SETSSP | USES1 | USES2 },		/* mulu.w rm,rn */
1697   { 0x200f, SETSSP | USES1 | USES2 }		/* muls.w rm,rn */
1698 };
1699 
1700 static const struct sh_minor_opcode sh_opcode2[] =
1701 {
1702   { MAP (sh_opcode20), 0xf00f }
1703 };
1704 
1705 static const struct sh_opcode sh_opcode30[] =
1706 {
1707   { 0x3000, SETSSP | USES1 | USES2 },		/* cmp/eq rm,rn */
1708   { 0x3002, SETSSP | USES1 | USES2 },		/* cmp/hs rm,rn */
1709   { 0x3003, SETSSP | USES1 | USES2 },		/* cmp/ge rm,rn */
1710   { 0x3004, SETSSP | USESSP | USES1 | USES2 },	/* div1 rm,rn */
1711   { 0x3005, SETSSP | USES1 | USES2 },		/* dmulu.l rm,rn */
1712   { 0x3006, SETSSP | USES1 | USES2 },		/* cmp/hi rm,rn */
1713   { 0x3007, SETSSP | USES1 | USES2 },		/* cmp/gt rm,rn */
1714   { 0x3008, SETS1 | USES1 | USES2 },		/* sub rm,rn */
1715   { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1716   { 0x300b, SETS1 | SETSSP | USES1 | USES2 },	/* subv rm,rn */
1717   { 0x300c, SETS1 | USES1 | USES2 },		/* add rm,rn */
1718   { 0x300d, SETSSP | USES1 | USES2 },		/* dmuls.l rm,rn */
1719   { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1720   { 0x300f, SETS1 | SETSSP | USES1 | USES2 }	/* addv rm,rn */
1721 };
1722 
1723 static const struct sh_minor_opcode sh_opcode3[] =
1724 {
1725   { MAP (sh_opcode30), 0xf00f }
1726 };
1727 
1728 static const struct sh_opcode sh_opcode40[] =
1729 {
1730   { 0x4000, SETS1 | SETSSP | USES1 },		/* shll rn */
1731   { 0x4001, SETS1 | SETSSP | USES1 },		/* shlr rn */
1732   { 0x4002, STORE | SETS1 | USES1 | USESSP },	/* sts.l mach,@-rn */
1733   { 0x4004, SETS1 | SETSSP | USES1 },		/* rotl rn */
1734   { 0x4005, SETS1 | SETSSP | USES1 },		/* rotr rn */
1735   { 0x4006, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,mach */
1736   { 0x4008, SETS1 | USES1 },			/* shll2 rn */
1737   { 0x4009, SETS1 | USES1 },			/* shlr2 rn */
1738   { 0x400a, SETSSP | USES1 },			/* lds rm,mach */
1739   { 0x400b, BRANCH | DELAY | USES1 },		/* jsr @rn */
1740   { 0x4010, SETS1 | SETSSP | USES1 },		/* dt rn */
1741   { 0x4011, SETSSP | USES1 },			/* cmp/pz rn */
1742   { 0x4012, STORE | SETS1 | USES1 | USESSP },	/* sts.l macl,@-rn */
1743   { 0x4014, SETSSP | USES1 },			/* setrc rm */
1744   { 0x4015, SETSSP | USES1 },			/* cmp/pl rn */
1745   { 0x4016, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,macl */
1746   { 0x4018, SETS1 | USES1 },			/* shll8 rn */
1747   { 0x4019, SETS1 | USES1 },			/* shlr8 rn */
1748   { 0x401a, SETSSP | USES1 },			/* lds rm,macl */
1749   { 0x401b, LOAD | SETSSP | USES1 },		/* tas.b @rn */
1750   { 0x4020, SETS1 | SETSSP | USES1 },		/* shal rn */
1751   { 0x4021, SETS1 | SETSSP | USES1 },		/* shar rn */
1752   { 0x4022, STORE | SETS1 | USES1 | USESSP },	/* sts.l pr,@-rn */
1753   { 0x4024, SETS1 | SETSSP | USES1 | USESSP },	/* rotcl rn */
1754   { 0x4025, SETS1 | SETSSP | USES1 | USESSP },	/* rotcr rn */
1755   { 0x4026, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,pr */
1756   { 0x4028, SETS1 | USES1 },			/* shll16 rn */
1757   { 0x4029, SETS1 | USES1 },			/* shlr16 rn */
1758   { 0x402a, SETSSP | USES1 },			/* lds rm,pr */
1759   { 0x402b, BRANCH | DELAY | USES1 },		/* jmp @rn */
1760   { 0x4052, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpul,@-rn */
1761   { 0x4056, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpul */
1762   { 0x405a, SETSSP | USES1 },			/* lds.l rm,fpul */
1763   { 0x4062, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpscr / dsr,@-rn */
1764   { 0x4066, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpscr / dsr */
1765   { 0x406a, SETSSP | USES1 },			/* lds rm,fpscr / lds rm,dsr */
1766   { 0x4072, STORE | SETS1 | USES1 | USESSP },	/* sts.l a0,@-rn */
1767   { 0x4076, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,a0 */
1768   { 0x407a, SETSSP | USES1 },			/* lds.l rm,a0 */
1769   { 0x4082, STORE | SETS1 | USES1 | USESSP },	/* sts.l x0,@-rn */
1770   { 0x4086, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x0 */
1771   { 0x408a, SETSSP | USES1 },			/* lds.l rm,x0 */
1772   { 0x4092, STORE | SETS1 | USES1 | USESSP },	/* sts.l x1,@-rn */
1773   { 0x4096, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x1 */
1774   { 0x409a, SETSSP | USES1 },			/* lds.l rm,x1 */
1775   { 0x40a2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y0,@-rn */
1776   { 0x40a6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y0 */
1777   { 0x40aa, SETSSP | USES1 },			/* lds.l rm,y0 */
1778   { 0x40b2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y1,@-rn */
1779   { 0x40b6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y1 */
1780   { 0x40ba, SETSSP | USES1 }			/* lds.l rm,y1 */
1781 };
1782 
1783 static const struct sh_opcode sh_opcode41[] =
1784 {
1785   { 0x4003, STORE | SETS1 | USES1 | USESSP },	/* stc.l <special_reg>,@-rn */
1786   { 0x4007, LOAD | SETS1 | SETSSP | USES1 },	/* ldc.l @rm+,<special_reg> */
1787   { 0x400c, SETS1 | USES1 | USES2 },		/* shad rm,rn */
1788   { 0x400d, SETS1 | USES1 | USES2 },		/* shld rm,rn */
1789   { 0x400e, SETSSP | USES1 },			/* ldc rm,<special_reg> */
1790   { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1791 };
1792 
1793 static const struct sh_minor_opcode sh_opcode4[] =
1794 {
1795   { MAP (sh_opcode40), 0xf0ff },
1796   { MAP (sh_opcode41), 0xf00f }
1797 };
1798 
1799 static const struct sh_opcode sh_opcode50[] =
1800 {
1801   { 0x5000, LOAD | SETS1 | USES2 }	/* mov.l @(disp,rm),rn */
1802 };
1803 
1804 static const struct sh_minor_opcode sh_opcode5[] =
1805 {
1806   { MAP (sh_opcode50), 0xf000 }
1807 };
1808 
1809 static const struct sh_opcode sh_opcode60[] =
1810 {
1811   { 0x6000, LOAD | SETS1 | USES2 },		/* mov.b @rm,rn */
1812   { 0x6001, LOAD | SETS1 | USES2 },		/* mov.w @rm,rn */
1813   { 0x6002, LOAD | SETS1 | USES2 },		/* mov.l @rm,rn */
1814   { 0x6003, SETS1 | USES2 },			/* mov rm,rn */
1815   { 0x6004, LOAD | SETS1 | SETS2 | USES2 },	/* mov.b @rm+,rn */
1816   { 0x6005, LOAD | SETS1 | SETS2 | USES2 },	/* mov.w @rm+,rn */
1817   { 0x6006, LOAD | SETS1 | SETS2 | USES2 },	/* mov.l @rm+,rn */
1818   { 0x6007, SETS1 | USES2 },			/* not rm,rn */
1819   { 0x6008, SETS1 | USES2 },			/* swap.b rm,rn */
1820   { 0x6009, SETS1 | USES2 },			/* swap.w rm,rn */
1821   { 0x600a, SETS1 | SETSSP | USES2 | USESSP },	/* negc rm,rn */
1822   { 0x600b, SETS1 | USES2 },			/* neg rm,rn */
1823   { 0x600c, SETS1 | USES2 },			/* extu.b rm,rn */
1824   { 0x600d, SETS1 | USES2 },			/* extu.w rm,rn */
1825   { 0x600e, SETS1 | USES2 },			/* exts.b rm,rn */
1826   { 0x600f, SETS1 | USES2 }			/* exts.w rm,rn */
1827 };
1828 
1829 static const struct sh_minor_opcode sh_opcode6[] =
1830 {
1831   { MAP (sh_opcode60), 0xf00f }
1832 };
1833 
1834 static const struct sh_opcode sh_opcode70[] =
1835 {
1836   { 0x7000, SETS1 | USES1 }		/* add #imm,rn */
1837 };
1838 
1839 static const struct sh_minor_opcode sh_opcode7[] =
1840 {
1841   { MAP (sh_opcode70), 0xf000 }
1842 };
1843 
1844 static const struct sh_opcode sh_opcode80[] =
1845 {
1846   { 0x8000, STORE | USES2 | USESR0 },	/* mov.b r0,@(disp,rn) */
1847   { 0x8100, STORE | USES2 | USESR0 },	/* mov.w r0,@(disp,rn) */
1848   { 0x8200, SETSSP },			/* setrc #imm */
1849   { 0x8400, LOAD | SETSR0 | USES2 },	/* mov.b @(disp,rm),r0 */
1850   { 0x8500, LOAD | SETSR0 | USES2 },	/* mov.w @(disp,rn),r0 */
1851   { 0x8800, SETSSP | USESR0 },		/* cmp/eq #imm,r0 */
1852   { 0x8900, BRANCH | USESSP },		/* bt label */
1853   { 0x8b00, BRANCH | USESSP },		/* bf label */
1854   { 0x8c00, SETSSP },			/* ldrs @(disp,pc) */
1855   { 0x8d00, BRANCH | DELAY | USESSP },	/* bt/s label */
1856   { 0x8e00, SETSSP },			/* ldre @(disp,pc) */
1857   { 0x8f00, BRANCH | DELAY | USESSP }	/* bf/s label */
1858 };
1859 
1860 static const struct sh_minor_opcode sh_opcode8[] =
1861 {
1862   { MAP (sh_opcode80), 0xff00 }
1863 };
1864 
1865 static const struct sh_opcode sh_opcode90[] =
1866 {
1867   { 0x9000, LOAD | SETS1 }	/* mov.w @(disp,pc),rn */
1868 };
1869 
1870 static const struct sh_minor_opcode sh_opcode9[] =
1871 {
1872   { MAP (sh_opcode90), 0xf000 }
1873 };
1874 
1875 static const struct sh_opcode sh_opcodea0[] =
1876 {
1877   { 0xa000, BRANCH | DELAY }	/* bra label */
1878 };
1879 
1880 static const struct sh_minor_opcode sh_opcodea[] =
1881 {
1882   { MAP (sh_opcodea0), 0xf000 }
1883 };
1884 
1885 static const struct sh_opcode sh_opcodeb0[] =
1886 {
1887   { 0xb000, BRANCH | DELAY }	/* bsr label */
1888 };
1889 
1890 static const struct sh_minor_opcode sh_opcodeb[] =
1891 {
1892   { MAP (sh_opcodeb0), 0xf000 }
1893 };
1894 
1895 static const struct sh_opcode sh_opcodec0[] =
1896 {
1897   { 0xc000, STORE | USESR0 | USESSP },		/* mov.b r0,@(disp,gbr) */
1898   { 0xc100, STORE | USESR0 | USESSP },		/* mov.w r0,@(disp,gbr) */
1899   { 0xc200, STORE | USESR0 | USESSP },		/* mov.l r0,@(disp,gbr) */
1900   { 0xc300, BRANCH | USESSP },			/* trapa #imm */
1901   { 0xc400, LOAD | SETSR0 | USESSP },		/* mov.b @(disp,gbr),r0 */
1902   { 0xc500, LOAD | SETSR0 | USESSP },		/* mov.w @(disp,gbr),r0 */
1903   { 0xc600, LOAD | SETSR0 | USESSP },		/* mov.l @(disp,gbr),r0 */
1904   { 0xc700, SETSR0 },				/* mova @(disp,pc),r0 */
1905   { 0xc800, SETSSP | USESR0 },			/* tst #imm,r0 */
1906   { 0xc900, SETSR0 | USESR0 },			/* and #imm,r0 */
1907   { 0xca00, SETSR0 | USESR0 },			/* xor #imm,r0 */
1908   { 0xcb00, SETSR0 | USESR0 },			/* or #imm,r0 */
1909   { 0xcc00, LOAD | SETSSP | USESR0 | USESSP },	/* tst.b #imm,@(r0,gbr) */
1910   { 0xcd00, LOAD | STORE | USESR0 | USESSP },	/* and.b #imm,@(r0,gbr) */
1911   { 0xce00, LOAD | STORE | USESR0 | USESSP },	/* xor.b #imm,@(r0,gbr) */
1912   { 0xcf00, LOAD | STORE | USESR0 | USESSP }	/* or.b #imm,@(r0,gbr) */
1913 };
1914 
1915 static const struct sh_minor_opcode sh_opcodec[] =
1916 {
1917   { MAP (sh_opcodec0), 0xff00 }
1918 };
1919 
1920 static const struct sh_opcode sh_opcoded0[] =
1921 {
1922   { 0xd000, LOAD | SETS1 }		/* mov.l @(disp,pc),rn */
1923 };
1924 
1925 static const struct sh_minor_opcode sh_opcoded[] =
1926 {
1927   { MAP (sh_opcoded0), 0xf000 }
1928 };
1929 
1930 static const struct sh_opcode sh_opcodee0[] =
1931 {
1932   { 0xe000, SETS1 }		/* mov #imm,rn */
1933 };
1934 
1935 static const struct sh_minor_opcode sh_opcodee[] =
1936 {
1937   { MAP (sh_opcodee0), 0xf000 }
1938 };
1939 
1940 static const struct sh_opcode sh_opcodef0[] =
1941 {
1942   { 0xf000, SETSF1 | USESF1 | USESF2 },		/* fadd fm,fn */
1943   { 0xf001, SETSF1 | USESF1 | USESF2 },		/* fsub fm,fn */
1944   { 0xf002, SETSF1 | USESF1 | USESF2 },		/* fmul fm,fn */
1945   { 0xf003, SETSF1 | USESF1 | USESF2 },		/* fdiv fm,fn */
1946   { 0xf004, SETSSP | USESF1 | USESF2 },		/* fcmp/eq fm,fn */
1947   { 0xf005, SETSSP | USESF1 | USESF2 },		/* fcmp/gt fm,fn */
1948   { 0xf006, LOAD | SETSF1 | USES2 | USESR0 },	/* fmov.s @(r0,rm),fn */
1949   { 0xf007, STORE | USES1 | USESF2 | USESR0 },	/* fmov.s fm,@(r0,rn) */
1950   { 0xf008, LOAD | SETSF1 | USES2 },		/* fmov.s @rm,fn */
1951   { 0xf009, LOAD | SETS2 | SETSF1 | USES2 },	/* fmov.s @rm+,fn */
1952   { 0xf00a, STORE | USES1 | USESF2 },		/* fmov.s fm,@rn */
1953   { 0xf00b, STORE | SETS1 | USES1 | USESF2 },	/* fmov.s fm,@-rn */
1954   { 0xf00c, SETSF1 | USESF2 },			/* fmov fm,fn */
1955   { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 }	/* fmac f0,fm,fn */
1956 };
1957 
1958 static const struct sh_opcode sh_opcodef1[] =
1959 {
1960   { 0xf00d, SETSF1 | USESSP },	/* fsts fpul,fn */
1961   { 0xf01d, SETSSP | USESF1 },	/* flds fn,fpul */
1962   { 0xf02d, SETSF1 | USESSP },	/* float fpul,fn */
1963   { 0xf03d, SETSSP | USESF1 },	/* ftrc fn,fpul */
1964   { 0xf04d, SETSF1 | USESF1 },	/* fneg fn */
1965   { 0xf05d, SETSF1 | USESF1 },	/* fabs fn */
1966   { 0xf06d, SETSF1 | USESF1 },	/* fsqrt fn */
1967   { 0xf07d, SETSSP | USESF1 },	/* ftst/nan fn */
1968   { 0xf08d, SETSF1 },		/* fldi0 fn */
1969   { 0xf09d, SETSF1 }		/* fldi1 fn */
1970 };
1971 
1972 static const struct sh_minor_opcode sh_opcodef[] =
1973 {
1974   { MAP (sh_opcodef0), 0xf00f },
1975   { MAP (sh_opcodef1), 0xf0ff }
1976 };
1977 
1978 static struct sh_major_opcode sh_opcodes[] =
1979 {
1980   { MAP (sh_opcode0) },
1981   { MAP (sh_opcode1) },
1982   { MAP (sh_opcode2) },
1983   { MAP (sh_opcode3) },
1984   { MAP (sh_opcode4) },
1985   { MAP (sh_opcode5) },
1986   { MAP (sh_opcode6) },
1987   { MAP (sh_opcode7) },
1988   { MAP (sh_opcode8) },
1989   { MAP (sh_opcode9) },
1990   { MAP (sh_opcodea) },
1991   { MAP (sh_opcodeb) },
1992   { MAP (sh_opcodec) },
1993   { MAP (sh_opcoded) },
1994   { MAP (sh_opcodee) },
1995   { MAP (sh_opcodef) }
1996 };
1997 
1998 /* The double data transfer / parallel processing insns are not
1999    described here.  This will cause sh_align_load_span to leave them alone.  */
2000 
2001 static const struct sh_opcode sh_dsp_opcodef0[] =
2002 {
2003   { 0xf400, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @-as,ds */
2004   { 0xf401, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@-as */
2005   { 0xf404, USESAS | LOAD | SETSSP },		/* movs.x @as,ds */
2006   { 0xf405, USESAS | STORE | USESSP },		/* movs.x ds,@as */
2007   { 0xf408, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @as+,ds */
2008   { 0xf409, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@as+ */
2009   { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 },	/* movs.x @as+r8,ds */
2010   { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 }	/* movs.x ds,@as+r8 */
2011 };
2012 
2013 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2014 {
2015   { MAP (sh_dsp_opcodef0), 0xfc0d }
2016 };
2017 
2018 /* Given an instruction, return a pointer to the corresponding
2019    sh_opcode structure.  Return NULL if the instruction is not
2020    recognized.  */
2021 
2022 static const struct sh_opcode *
2023 sh_insn_info (unsigned int insn)
2024 {
2025   const struct sh_major_opcode *maj;
2026   const struct sh_minor_opcode *min, *minend;
2027 
2028   maj = &sh_opcodes[(insn & 0xf000) >> 12];
2029   min = maj->minor_opcodes;
2030   minend = min + maj->count;
2031   for (; min < minend; min++)
2032     {
2033       unsigned int l;
2034       const struct sh_opcode *op, *opend;
2035 
2036       l = insn & min->mask;
2037       op = min->opcodes;
2038       opend = op + min->count;
2039 
2040       /* Since the opcodes tables are sorted, we could use a binary
2041 	 search here if the count were above some cutoff value.  */
2042       for (; op < opend; op++)
2043 	if (op->opcode == l)
2044 	  return op;
2045     }
2046 
2047   return NULL;
2048 }
2049 
2050 /* See whether an instruction uses a general purpose register.  */
2051 
2052 static bool
2053 sh_insn_uses_reg (unsigned int insn,
2054 		  const struct sh_opcode *op,
2055 		  unsigned int reg)
2056 {
2057   unsigned int f;
2058 
2059   f = op->flags;
2060 
2061   if ((f & USES1) != 0
2062       && USES1_REG (insn) == reg)
2063     return true;
2064   if ((f & USES2) != 0
2065       && USES2_REG (insn) == reg)
2066     return true;
2067   if ((f & USESR0) != 0
2068       && reg == 0)
2069     return true;
2070   if ((f & USESAS) && reg == USESAS_REG (insn))
2071     return true;
2072   if ((f & USESR8) && reg == 8)
2073     return true;
2074 
2075   return false;
2076 }
2077 
2078 /* See whether an instruction sets a general purpose register.  */
2079 
2080 static bool
2081 sh_insn_sets_reg (unsigned int insn,
2082 		  const struct sh_opcode *op,
2083 		  unsigned int reg)
2084 {
2085   unsigned int f;
2086 
2087   f = op->flags;
2088 
2089   if ((f & SETS1) != 0
2090       && SETS1_REG (insn) == reg)
2091     return true;
2092   if ((f & SETS2) != 0
2093       && SETS2_REG (insn) == reg)
2094     return true;
2095   if ((f & SETSR0) != 0
2096       && reg == 0)
2097     return true;
2098   if ((f & SETSAS) && reg == SETSAS_REG (insn))
2099     return true;
2100 
2101   return false;
2102 }
2103 
2104 /* See whether an instruction uses or sets a general purpose register */
2105 
2106 static bool
2107 sh_insn_uses_or_sets_reg (unsigned int insn,
2108 			  const struct sh_opcode *op,
2109 			  unsigned int reg)
2110 {
2111   if (sh_insn_uses_reg (insn, op, reg))
2112     return true;
2113 
2114   return sh_insn_sets_reg (insn, op, reg);
2115 }
2116 
2117 /* See whether an instruction uses a floating point register.  */
2118 
2119 static bool
2120 sh_insn_uses_freg (unsigned int insn,
2121 		   const struct sh_opcode *op,
2122 		   unsigned int freg)
2123 {
2124   unsigned int f;
2125 
2126   f = op->flags;
2127 
2128   /* We can't tell if this is a double-precision insn, so just play safe
2129      and assume that it might be.  So not only have we test FREG against
2130      itself, but also even FREG against FREG+1 - if the using insn uses
2131      just the low part of a double precision value - but also an odd
2132      FREG against FREG-1 -  if the setting insn sets just the low part
2133      of a double precision value.
2134      So what this all boils down to is that we have to ignore the lowest
2135      bit of the register number.  */
2136 
2137   if ((f & USESF1) != 0
2138       && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2139     return true;
2140   if ((f & USESF2) != 0
2141       && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2142     return true;
2143   if ((f & USESF0) != 0
2144       && freg == 0)
2145     return true;
2146 
2147   return false;
2148 }
2149 
2150 /* See whether an instruction sets a floating point register.  */
2151 
2152 static bool
2153 sh_insn_sets_freg (unsigned int insn,
2154 		   const struct sh_opcode *op,
2155 		   unsigned int freg)
2156 {
2157   unsigned int f;
2158 
2159   f = op->flags;
2160 
2161   /* We can't tell if this is a double-precision insn, so just play safe
2162      and assume that it might be.  So not only have we test FREG against
2163      itself, but also even FREG against FREG+1 - if the using insn uses
2164      just the low part of a double precision value - but also an odd
2165      FREG against FREG-1 -  if the setting insn sets just the low part
2166      of a double precision value.
2167      So what this all boils down to is that we have to ignore the lowest
2168      bit of the register number.  */
2169 
2170   if ((f & SETSF1) != 0
2171       && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2172     return true;
2173 
2174   return false;
2175 }
2176 
2177 /* See whether an instruction uses or sets a floating point register */
2178 
2179 static bool
2180 sh_insn_uses_or_sets_freg (unsigned int insn,
2181 			   const struct sh_opcode *op,
2182 			   unsigned int reg)
2183 {
2184   if (sh_insn_uses_freg (insn, op, reg))
2185     return true;
2186 
2187   return sh_insn_sets_freg (insn, op, reg);
2188 }
2189 
2190 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2191    before I2.  OP1 and OP2 are the corresponding sh_opcode structures.
2192    This should return TRUE if there is a conflict, or FALSE if the
2193    instructions can be swapped safely.  */
2194 
2195 static bool
2196 sh_insns_conflict (unsigned int i1,
2197 		   const struct sh_opcode *op1,
2198 		   unsigned int i2,
2199 		   const struct sh_opcode *op2)
2200 {
2201   unsigned int f1, f2;
2202 
2203   f1 = op1->flags;
2204   f2 = op2->flags;
2205 
2206   /* Load of fpscr conflicts with floating point operations.
2207      FIXME: shouldn't test raw opcodes here.  */
2208   if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2209       || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2210     return true;
2211 
2212   if ((f1 & (BRANCH | DELAY)) != 0
2213       || (f2 & (BRANCH | DELAY)) != 0)
2214     return true;
2215 
2216   if (((f1 | f2) & SETSSP)
2217       && (f1 & (SETSSP | USESSP))
2218       && (f2 & (SETSSP | USESSP)))
2219     return true;
2220 
2221   if ((f1 & SETS1) != 0
2222       && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2223     return true;
2224   if ((f1 & SETS2) != 0
2225       && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2226     return true;
2227   if ((f1 & SETSR0) != 0
2228       && sh_insn_uses_or_sets_reg (i2, op2, 0))
2229     return true;
2230   if ((f1 & SETSAS)
2231       && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2232     return true;
2233   if ((f1 & SETSF1) != 0
2234       && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2235     return true;
2236 
2237   if ((f2 & SETS1) != 0
2238       && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2239     return true;
2240   if ((f2 & SETS2) != 0
2241       && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2242     return true;
2243   if ((f2 & SETSR0) != 0
2244       && sh_insn_uses_or_sets_reg (i1, op1, 0))
2245     return true;
2246   if ((f2 & SETSAS)
2247       && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2248     return true;
2249   if ((f2 & SETSF1) != 0
2250       && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2251     return true;
2252 
2253   /* The instructions do not conflict.  */
2254   return false;
2255 }
2256 
2257 /* I1 is a load instruction, and I2 is some other instruction.  Return
2258    TRUE if I1 loads a register which I2 uses.  */
2259 
2260 static bool
2261 sh_load_use (unsigned int i1,
2262 	     const struct sh_opcode *op1,
2263 	     unsigned int i2,
2264 	     const struct sh_opcode *op2)
2265 {
2266   unsigned int f1;
2267 
2268   f1 = op1->flags;
2269 
2270   if ((f1 & LOAD) == 0)
2271     return false;
2272 
2273   /* If both SETS1 and SETSSP are set, that means a load to a special
2274      register using postincrement addressing mode, which we don't care
2275      about here.  */
2276   if ((f1 & SETS1) != 0
2277       && (f1 & SETSSP) == 0
2278       && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2279     return true;
2280 
2281   if ((f1 & SETSR0) != 0
2282       && sh_insn_uses_reg (i2, op2, 0))
2283     return true;
2284 
2285   if ((f1 & SETSF1) != 0
2286       && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2287     return true;
2288 
2289   return false;
2290 }
2291 
2292 /* Try to align loads and stores within a span of memory.  This is
2293    called by both the ELF and the COFF sh targets.  ABFD and SEC are
2294    the BFD and section we are examining.  CONTENTS is the contents of
2295    the section.  SWAP is the routine to call to swap two instructions.
2296    RELOCS is a pointer to the internal relocation information, to be
2297    passed to SWAP.  PLABEL is a pointer to the current label in a
2298    sorted list of labels; LABEL_END is the end of the list.  START and
2299    STOP are the range of memory to examine.  If a swap is made,
2300    *PSWAPPED is set to TRUE.  */
2301 
2302 #ifdef COFF_WITH_PE
2303 static
2304 #endif
2305 bool
2306 _bfd_sh_align_load_span (bfd *abfd,
2307 			 asection *sec,
2308 			 bfd_byte *contents,
2309 			 bool (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
2310 			 void * relocs,
2311 			 bfd_vma **plabel,
2312 			 bfd_vma *label_end,
2313 			 bfd_vma start,
2314 			 bfd_vma stop,
2315 			 bool *pswapped)
2316 {
2317   int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2318 	     || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2319   bfd_vma i;
2320 
2321   /* The SH4 has a Harvard architecture, hence aligning loads is not
2322      desirable.  In fact, it is counter-productive, since it interferes
2323      with the schedules generated by the compiler.  */
2324   if (abfd->arch_info->mach == bfd_mach_sh4)
2325     return true;
2326 
2327   /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2328      instructions.  */
2329   if (dsp)
2330     {
2331       sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2332       sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef [0];
2333     }
2334 
2335   /* Instructions should be aligned on 2 byte boundaries.  */
2336   if ((start & 1) == 1)
2337     ++start;
2338 
2339   /* Now look through the unaligned addresses.  */
2340   i = start;
2341   if ((i & 2) == 0)
2342     i += 2;
2343   for (; i < stop; i += 4)
2344     {
2345       unsigned int insn;
2346       const struct sh_opcode *op;
2347       unsigned int prev_insn = 0;
2348       const struct sh_opcode *prev_op = NULL;
2349 
2350       insn = bfd_get_16 (abfd, contents + i);
2351       op = sh_insn_info (insn);
2352       if (op == NULL
2353 	  || (op->flags & (LOAD | STORE)) == 0)
2354 	continue;
2355 
2356       /* This is a load or store which is not on a four byte boundary.  */
2357 
2358       while (*plabel < label_end && **plabel < i)
2359 	++*plabel;
2360 
2361       if (i > start)
2362 	{
2363 	  prev_insn = bfd_get_16 (abfd, contents + i - 2);
2364 	  /* If INSN is the field b of a parallel processing insn, it is not
2365 	     a load / store after all.  Note that the test here might mistake
2366 	     the field_b of a pcopy insn for the starting code of a parallel
2367 	     processing insn; this might miss a swapping opportunity, but at
2368 	     least we're on the safe side.  */
2369 	  if (dsp && (prev_insn & 0xfc00) == 0xf800)
2370 	    continue;
2371 
2372 	  /* Check if prev_insn is actually the field b of a parallel
2373 	     processing insn.  Again, this can give a spurious match
2374 	     after a pcopy.  */
2375 	  if (dsp && i - 2 > start)
2376 	    {
2377 	      unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2378 
2379 	      if ((pprev_insn & 0xfc00) == 0xf800)
2380 		prev_op = NULL;
2381 	      else
2382 		prev_op = sh_insn_info (prev_insn);
2383 	    }
2384 	  else
2385 	    prev_op = sh_insn_info (prev_insn);
2386 
2387 	  /* If the load/store instruction is in a delay slot, we
2388 	     can't swap.  */
2389 	  if (prev_op == NULL
2390 	      || (prev_op->flags & DELAY) != 0)
2391 	    continue;
2392 	}
2393       if (i > start
2394 	  && (*plabel >= label_end || **plabel != i)
2395 	  && prev_op != NULL
2396 	  && (prev_op->flags & (LOAD | STORE)) == 0
2397 	  && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2398 	{
2399 	  bool ok;
2400 
2401 	  /* The load/store instruction does not have a label, and
2402 	     there is a previous instruction; PREV_INSN is not
2403 	     itself a load/store instruction, and PREV_INSN and
2404 	     INSN do not conflict.  */
2405 
2406 	  ok = true;
2407 
2408 	  if (i >= start + 4)
2409 	    {
2410 	      unsigned int prev2_insn;
2411 	      const struct sh_opcode *prev2_op;
2412 
2413 	      prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2414 	      prev2_op = sh_insn_info (prev2_insn);
2415 
2416 	      /* If the instruction before PREV_INSN has a delay
2417 		 slot--that is, PREV_INSN is in a delay slot--we
2418 		 can not swap.  */
2419 	      if (prev2_op == NULL
2420 		  || (prev2_op->flags & DELAY) != 0)
2421 		ok = false;
2422 
2423 	      /* If the instruction before PREV_INSN is a load,
2424 		 and it sets a register which INSN uses, then
2425 		 putting INSN immediately after PREV_INSN will
2426 		 cause a pipeline bubble, so there is no point to
2427 		 making the swap.  */
2428 	      if (ok
2429 		  && (prev2_op->flags & LOAD) != 0
2430 		  && sh_load_use (prev2_insn, prev2_op, insn, op))
2431 		ok = false;
2432 	    }
2433 
2434 	  if (ok)
2435 	    {
2436 	      if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2437 		return false;
2438 	      *pswapped = true;
2439 	      continue;
2440 	    }
2441 	}
2442 
2443       while (*plabel < label_end && **plabel < i + 2)
2444 	++*plabel;
2445 
2446       if (i + 2 < stop
2447 	  && (*plabel >= label_end || **plabel != i + 2))
2448 	{
2449 	  unsigned int next_insn;
2450 	  const struct sh_opcode *next_op;
2451 
2452 	  /* There is an instruction after the load/store
2453 	     instruction, and it does not have a label.  */
2454 	  next_insn = bfd_get_16 (abfd, contents + i + 2);
2455 	  next_op = sh_insn_info (next_insn);
2456 	  if (next_op != NULL
2457 	      && (next_op->flags & (LOAD | STORE)) == 0
2458 	      && ! sh_insns_conflict (insn, op, next_insn, next_op))
2459 	    {
2460 	      bool ok;
2461 
2462 	      /* NEXT_INSN is not itself a load/store instruction,
2463 		 and it does not conflict with INSN.  */
2464 
2465 	      ok = true;
2466 
2467 	      /* If PREV_INSN is a load, and it sets a register
2468 		 which NEXT_INSN uses, then putting NEXT_INSN
2469 		 immediately after PREV_INSN will cause a pipeline
2470 		 bubble, so there is no reason to make this swap.  */
2471 	      if (prev_op != NULL
2472 		  && (prev_op->flags & LOAD) != 0
2473 		  && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2474 		ok = false;
2475 
2476 	      /* If INSN is a load, and it sets a register which
2477 		 the insn after NEXT_INSN uses, then doing the
2478 		 swap will cause a pipeline bubble, so there is no
2479 		 reason to make the swap.  However, if the insn
2480 		 after NEXT_INSN is itself a load or store
2481 		 instruction, then it is misaligned, so
2482 		 optimistically hope that it will be swapped
2483 		 itself, and just live with the pipeline bubble if
2484 		 it isn't.  */
2485 	      if (ok
2486 		  && i + 4 < stop
2487 		  && (op->flags & LOAD) != 0)
2488 		{
2489 		  unsigned int next2_insn;
2490 		  const struct sh_opcode *next2_op;
2491 
2492 		  next2_insn = bfd_get_16 (abfd, contents + i + 4);
2493 		  next2_op = sh_insn_info (next2_insn);
2494 		  if (next2_op == NULL
2495 		      || ((next2_op->flags & (LOAD | STORE)) == 0
2496 			  && sh_load_use (insn, op, next2_insn, next2_op)))
2497 		    ok = false;
2498 		}
2499 
2500 	      if (ok)
2501 		{
2502 		  if (! (*swap) (abfd, sec, relocs, contents, i))
2503 		    return false;
2504 		  *pswapped = true;
2505 		  continue;
2506 		}
2507 	    }
2508 	}
2509     }
2510 
2511   return true;
2512 }
2513 #endif /* not COFF_IMAGE_WITH_PE */
2514 
2515 /* Swap two SH instructions.  */
2516 
2517 static bool
2518 sh_swap_insns (bfd *      abfd,
2519 	       asection * sec,
2520 	       void *     relocs,
2521 	       bfd_byte * contents,
2522 	       bfd_vma    addr)
2523 {
2524   struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2525   unsigned short i1, i2;
2526   struct internal_reloc *irel, *irelend;
2527 
2528   /* Swap the instructions themselves.  */
2529   i1 = bfd_get_16 (abfd, contents + addr);
2530   i2 = bfd_get_16 (abfd, contents + addr + 2);
2531   bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2532   bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2533 
2534   /* Adjust all reloc addresses.  */
2535   irelend = internal_relocs + sec->reloc_count;
2536   for (irel = internal_relocs; irel < irelend; irel++)
2537     {
2538       int type, add;
2539 
2540       /* There are a few special types of relocs that we don't want to
2541 	 adjust.  These relocs do not apply to the instruction itself,
2542 	 but are only associated with the address.  */
2543       type = irel->r_type;
2544       if (type == R_SH_ALIGN
2545 	  || type == R_SH_CODE
2546 	  || type == R_SH_DATA
2547 	  || type == R_SH_LABEL)
2548 	continue;
2549 
2550       /* If an R_SH_USES reloc points to one of the addresses being
2551 	 swapped, we must adjust it.  It would be incorrect to do this
2552 	 for a jump, though, since we want to execute both
2553 	 instructions after the jump.  (We have avoided swapping
2554 	 around a label, so the jump will not wind up executing an
2555 	 instruction it shouldn't).  */
2556       if (type == R_SH_USES)
2557 	{
2558 	  bfd_vma off;
2559 
2560 	  off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2561 	  if (off == addr)
2562 	    irel->r_offset += 2;
2563 	  else if (off == addr + 2)
2564 	    irel->r_offset -= 2;
2565 	}
2566 
2567       if (irel->r_vaddr - sec->vma == addr)
2568 	{
2569 	  irel->r_vaddr += 2;
2570 	  add = -2;
2571 	}
2572       else if (irel->r_vaddr - sec->vma == addr + 2)
2573 	{
2574 	  irel->r_vaddr -= 2;
2575 	  add = 2;
2576 	}
2577       else
2578 	add = 0;
2579 
2580       if (add != 0)
2581 	{
2582 	  bfd_byte *loc;
2583 	  unsigned short insn, oinsn;
2584 	  bool overflow;
2585 
2586 	  loc = contents + irel->r_vaddr - sec->vma;
2587 	  overflow = false;
2588 	  switch (type)
2589 	    {
2590 	    default:
2591 	      break;
2592 
2593 	    case R_SH_PCDISP8BY2:
2594 	    case R_SH_PCRELIMM8BY2:
2595 	      insn = bfd_get_16 (abfd, loc);
2596 	      oinsn = insn;
2597 	      insn += add / 2;
2598 	      if ((oinsn & 0xff00) != (insn & 0xff00))
2599 		overflow = true;
2600 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2601 	      break;
2602 
2603 	    case R_SH_PCDISP:
2604 	      insn = bfd_get_16 (abfd, loc);
2605 	      oinsn = insn;
2606 	      insn += add / 2;
2607 	      if ((oinsn & 0xf000) != (insn & 0xf000))
2608 		overflow = true;
2609 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2610 	      break;
2611 
2612 	    case R_SH_PCRELIMM8BY4:
2613 	      /* This reloc ignores the least significant 3 bits of
2614 		 the program counter before adding in the offset.
2615 		 This means that if ADDR is at an even address, the
2616 		 swap will not affect the offset.  If ADDR is an at an
2617 		 odd address, then the instruction will be crossing a
2618 		 four byte boundary, and must be adjusted.  */
2619 	      if ((addr & 3) != 0)
2620 		{
2621 		  insn = bfd_get_16 (abfd, loc);
2622 		  oinsn = insn;
2623 		  insn += add / 2;
2624 		  if ((oinsn & 0xff00) != (insn & 0xff00))
2625 		    overflow = true;
2626 		  bfd_put_16 (abfd, (bfd_vma) insn, loc);
2627 		}
2628 
2629 	      break;
2630 	    }
2631 
2632 	  if (overflow)
2633 	    {
2634 	      _bfd_error_handler
2635 		/* xgettext: c-format */
2636 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
2637 		 abfd, (uint64_t) irel->r_vaddr);
2638 	      bfd_set_error (bfd_error_bad_value);
2639 	      return false;
2640 	    }
2641 	}
2642     }
2643 
2644   return true;
2645 }
2646 
2647 /* Look for loads and stores which we can align to four byte
2648    boundaries.  See the longer comment above sh_relax_section for why
2649    this is desirable.  This sets *PSWAPPED if some instruction was
2650    swapped.  */
2651 
2652 static bool
2653 sh_align_loads (bfd *abfd,
2654 		asection *sec,
2655 		struct internal_reloc *internal_relocs,
2656 		bfd_byte *contents,
2657 		bool *pswapped)
2658 {
2659   struct internal_reloc *irel, *irelend;
2660   bfd_vma *labels = NULL;
2661   bfd_vma *label, *label_end;
2662   bfd_size_type amt;
2663 
2664   *pswapped = false;
2665 
2666   irelend = internal_relocs + sec->reloc_count;
2667 
2668   /* Get all the addresses with labels on them.  */
2669   amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2670   labels = (bfd_vma *) bfd_malloc (amt);
2671   if (labels == NULL)
2672     goto error_return;
2673   label_end = labels;
2674   for (irel = internal_relocs; irel < irelend; irel++)
2675     {
2676       if (irel->r_type == R_SH_LABEL)
2677 	{
2678 	  *label_end = irel->r_vaddr - sec->vma;
2679 	  ++label_end;
2680 	}
2681     }
2682 
2683   /* Note that the assembler currently always outputs relocs in
2684      address order.  If that ever changes, this code will need to sort
2685      the label values and the relocs.  */
2686 
2687   label = labels;
2688 
2689   for (irel = internal_relocs; irel < irelend; irel++)
2690     {
2691       bfd_vma start, stop;
2692 
2693       if (irel->r_type != R_SH_CODE)
2694 	continue;
2695 
2696       start = irel->r_vaddr - sec->vma;
2697 
2698       for (irel++; irel < irelend; irel++)
2699 	if (irel->r_type == R_SH_DATA)
2700 	  break;
2701       if (irel < irelend)
2702 	stop = irel->r_vaddr - sec->vma;
2703       else
2704 	stop = sec->size;
2705 
2706       if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2707 				     internal_relocs, &label,
2708 				     label_end, start, stop, pswapped))
2709 	goto error_return;
2710     }
2711 
2712   free (labels);
2713 
2714   return true;
2715 
2716  error_return:
2717   free (labels);
2718   return false;
2719 }
2720 
2721 /* This is a modification of _bfd_coff_generic_relocate_section, which
2722    will handle SH relaxing.  */
2723 
2724 static bool
2725 sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
2726 		     struct bfd_link_info *info,
2727 		     bfd *input_bfd,
2728 		     asection *input_section,
2729 		     bfd_byte *contents,
2730 		     struct internal_reloc *relocs,
2731 		     struct internal_syment *syms,
2732 		     asection **sections)
2733 {
2734   struct internal_reloc *rel;
2735   struct internal_reloc *relend;
2736 
2737   rel = relocs;
2738   relend = rel + input_section->reloc_count;
2739   for (; rel < relend; rel++)
2740     {
2741       long symndx;
2742       struct coff_link_hash_entry *h;
2743       struct internal_syment *sym;
2744       bfd_vma addend;
2745       bfd_vma val;
2746       reloc_howto_type *howto;
2747       bfd_reloc_status_type rstat;
2748 
2749       /* Almost all relocs have to do with relaxing.  If any work must
2750 	 be done for them, it has been done in sh_relax_section.  */
2751       if (rel->r_type != R_SH_IMM32
2752 #ifdef COFF_WITH_PE
2753 	  && rel->r_type != R_SH_IMM32CE
2754 	  && rel->r_type != R_SH_IMAGEBASE
2755 #endif
2756 	  && rel->r_type != R_SH_PCDISP)
2757 	continue;
2758 
2759       symndx = rel->r_symndx;
2760 
2761       if (symndx == -1)
2762 	{
2763 	  h = NULL;
2764 	  sym = NULL;
2765 	}
2766       else
2767 	{
2768 	  if (symndx < 0
2769 	      || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2770 	    {
2771 	      _bfd_error_handler
2772 		/* xgettext: c-format */
2773 		(_("%pB: illegal symbol index %ld in relocs"),
2774 		 input_bfd, symndx);
2775 	      bfd_set_error (bfd_error_bad_value);
2776 	      return false;
2777 	    }
2778 	  h = obj_coff_sym_hashes (input_bfd)[symndx];
2779 	  sym = syms + symndx;
2780 	}
2781 
2782       if (sym != NULL && sym->n_scnum != 0)
2783 	addend = - sym->n_value;
2784       else
2785 	addend = 0;
2786 
2787       if (rel->r_type == R_SH_PCDISP)
2788 	addend -= 4;
2789 
2790       if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2791 	howto = NULL;
2792       else
2793 	howto = &sh_coff_howtos[rel->r_type];
2794 
2795       if (howto == NULL)
2796 	{
2797 	  bfd_set_error (bfd_error_bad_value);
2798 	  return false;
2799 	}
2800 
2801 #ifdef COFF_WITH_PE
2802       if (rel->r_type == R_SH_IMAGEBASE)
2803 	addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2804 #endif
2805 
2806       val = 0;
2807 
2808       if (h == NULL)
2809 	{
2810 	  asection *sec;
2811 
2812 	  /* There is nothing to do for an internal PCDISP reloc.  */
2813 	  if (rel->r_type == R_SH_PCDISP)
2814 	    continue;
2815 
2816 	  if (symndx == -1)
2817 	    {
2818 	      sec = bfd_abs_section_ptr;
2819 	      val = 0;
2820 	    }
2821 	  else
2822 	    {
2823 	      sec = sections[symndx];
2824 	      val = (sec->output_section->vma
2825 		     + sec->output_offset
2826 		     + sym->n_value
2827 		     - sec->vma);
2828 	    }
2829 	}
2830       else
2831 	{
2832 	  if (h->root.type == bfd_link_hash_defined
2833 	      || h->root.type == bfd_link_hash_defweak)
2834 	    {
2835 	      asection *sec;
2836 
2837 	      sec = h->root.u.def.section;
2838 	      val = (h->root.u.def.value
2839 		     + sec->output_section->vma
2840 		     + sec->output_offset);
2841 	    }
2842 	  else if (! bfd_link_relocatable (info))
2843 	    (*info->callbacks->undefined_symbol)
2844 	      (info, h->root.root.string, input_bfd, input_section,
2845 	       rel->r_vaddr - input_section->vma, true);
2846 	}
2847 
2848       rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2849 					contents,
2850 					rel->r_vaddr - input_section->vma,
2851 					val, addend);
2852 
2853       switch (rstat)
2854 	{
2855 	default:
2856 	  abort ();
2857 	case bfd_reloc_ok:
2858 	  break;
2859 	case bfd_reloc_overflow:
2860 	  {
2861 	    const char *name;
2862 	    char buf[SYMNMLEN + 1];
2863 
2864 	    if (symndx == -1)
2865 	      name = "*ABS*";
2866 	    else if (h != NULL)
2867 	      name = NULL;
2868 	    else if (sym->_n._n_n._n_zeroes == 0
2869 		     && sym->_n._n_n._n_offset != 0)
2870 	      {
2871 		if (sym->_n._n_n._n_offset < obj_coff_strings_len (input_bfd))
2872 		  name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2873 		else
2874 		  name = "?";
2875 	      }
2876 	    else
2877 	      {
2878 		strncpy (buf, sym->_n._n_name, SYMNMLEN);
2879 		buf[SYMNMLEN] = '\0';
2880 		name = buf;
2881 	      }
2882 
2883 	    (*info->callbacks->reloc_overflow)
2884 	      (info, (h ? &h->root : NULL), name, howto->name,
2885 	       (bfd_vma) 0, input_bfd, input_section,
2886 	       rel->r_vaddr - input_section->vma);
2887 	  }
2888 	}
2889     }
2890 
2891   return true;
2892 }
2893 
2894 /* This is a version of bfd_generic_get_relocated_section_contents
2895    which uses sh_relocate_section.  */
2896 
2897 static bfd_byte *
2898 sh_coff_get_relocated_section_contents (bfd *output_bfd,
2899 					struct bfd_link_info *link_info,
2900 					struct bfd_link_order *link_order,
2901 					bfd_byte *data,
2902 					bool relocatable,
2903 					asymbol **symbols)
2904 {
2905   asection *input_section = link_order->u.indirect.section;
2906   bfd *input_bfd = input_section->owner;
2907   asection **sections = NULL;
2908   struct internal_reloc *internal_relocs = NULL;
2909   struct internal_syment *internal_syms = NULL;
2910 
2911   /* We only need to handle the case of relaxing, or of having a
2912      particular set of section contents, specially.  */
2913   if (relocatable
2914       || coff_section_data (input_bfd, input_section) == NULL
2915       || coff_section_data (input_bfd, input_section)->contents == NULL)
2916     return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2917 						       link_order, data,
2918 						       relocatable,
2919 						       symbols);
2920 
2921   bfd_byte *orig_data = data;
2922   if (data == NULL)
2923     {
2924       data = bfd_malloc (input_section->size);
2925       if (data == NULL)
2926 	return NULL;
2927     }
2928   memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2929 	  (size_t) input_section->size);
2930 
2931   if ((input_section->flags & SEC_RELOC) != 0
2932       && input_section->reloc_count > 0)
2933     {
2934       bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2935       bfd_byte *esym, *esymend;
2936       struct internal_syment *isymp;
2937       asection **secpp;
2938       bfd_size_type amt;
2939 
2940       if (! _bfd_coff_get_external_symbols (input_bfd))
2941 	goto error_return;
2942 
2943       internal_relocs = (_bfd_coff_read_internal_relocs
2944 			 (input_bfd, input_section, false, (bfd_byte *) NULL,
2945 			  false, (struct internal_reloc *) NULL));
2946       if (internal_relocs == NULL)
2947 	goto error_return;
2948 
2949       amt = obj_raw_syment_count (input_bfd);
2950       amt *= sizeof (struct internal_syment);
2951       internal_syms = (struct internal_syment *) bfd_malloc (amt);
2952       if (internal_syms == NULL)
2953 	goto error_return;
2954 
2955       amt = obj_raw_syment_count (input_bfd);
2956       amt *= sizeof (asection *);
2957       sections = (asection **) bfd_malloc (amt);
2958       if (sections == NULL)
2959 	goto error_return;
2960 
2961       isymp = internal_syms;
2962       secpp = sections;
2963       esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2964       esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2965       while (esym < esymend)
2966 	{
2967 	  bfd_coff_swap_sym_in (input_bfd, esym, isymp);
2968 
2969 	  if (isymp->n_scnum != 0)
2970 	    *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2971 	  else
2972 	    {
2973 	      if (isymp->n_value == 0)
2974 		*secpp = bfd_und_section_ptr;
2975 	      else
2976 		*secpp = bfd_com_section_ptr;
2977 	    }
2978 
2979 	  esym += (isymp->n_numaux + 1) * symesz;
2980 	  secpp += isymp->n_numaux + 1;
2981 	  isymp += isymp->n_numaux + 1;
2982 	}
2983 
2984       if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2985 				 input_section, data, internal_relocs,
2986 				 internal_syms, sections))
2987 	goto error_return;
2988 
2989       free (sections);
2990       sections = NULL;
2991       free (internal_syms);
2992       internal_syms = NULL;
2993       free (internal_relocs);
2994       internal_relocs = NULL;
2995     }
2996 
2997   return data;
2998 
2999  error_return:
3000   free (internal_relocs);
3001   free (internal_syms);
3002   free (sections);
3003   if (orig_data == NULL)
3004     free (data);
3005   return NULL;
3006 }
3007 
3008 /* The target vectors.  */
3009 
3010 #ifndef TARGET_SHL_SYM
3011 CREATE_BIG_COFF_TARGET_VEC (sh_coff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3012 #endif
3013 
3014 #ifdef TARGET_SHL_SYM
3015 #define TARGET_SYM TARGET_SHL_SYM
3016 #else
3017 #define TARGET_SYM sh_coff_le_vec
3018 #endif
3019 
3020 #ifndef TARGET_SHL_NAME
3021 #define TARGET_SHL_NAME "coff-shl"
3022 #endif
3023 
3024 #ifdef COFF_WITH_PE
3025 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3026 			       SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3027 #else
3028 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3029 			       0, '_', NULL, COFF_SWAP_TABLE)
3030 #endif
3031 
3032 #ifndef TARGET_SHL_SYM
3033 
3034 /* Some people want versions of the SH COFF target which do not align
3035    to 16 byte boundaries.  We implement that by adding a couple of new
3036    target vectors.  These are just like the ones above, but they
3037    change the default section alignment.  To generate them in the
3038    assembler, use -small.  To use them in the linker, use -b
3039    coff-sh{l}-small and -oformat coff-sh{l}-small.
3040 
3041    Yes, this is a horrible hack.  A general solution for setting
3042    section alignment in COFF is rather complex.  ELF handles this
3043    correctly.  */
3044 
3045 /* Only recognize the small versions if the target was not defaulted.
3046    Otherwise we won't recognize the non default endianness.  */
3047 
3048 static bfd_cleanup
3049 coff_small_object_p (bfd *abfd)
3050 {
3051   if (abfd->target_defaulted)
3052     {
3053       bfd_set_error (bfd_error_wrong_format);
3054       return NULL;
3055     }
3056   return coff_object_p (abfd);
3057 }
3058 
3059 /* Set the section alignment for the small versions.  */
3060 
3061 static bool
3062 coff_small_new_section_hook (bfd *abfd, asection *section)
3063 {
3064   if (! coff_new_section_hook (abfd, section))
3065     return false;
3066 
3067   /* We must align to at least a four byte boundary, because longword
3068      accesses must be on a four byte boundary.  */
3069   if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3070     section->alignment_power = 2;
3071 
3072   return true;
3073 }
3074 
3075 /* This is copied from bfd_coff_std_swap_table so that we can change
3076    the default section alignment power.  */
3077 
3078 static const bfd_coff_backend_data bfd_coff_small_swap_table =
3079 {
3080   coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3081   coff_swap_aux_out, coff_swap_sym_out,
3082   coff_swap_lineno_out, coff_swap_reloc_out,
3083   coff_swap_filehdr_out, coff_swap_aouthdr_out,
3084   coff_swap_scnhdr_out,
3085   FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3086 #ifdef COFF_LONG_FILENAMES
3087   true,
3088 #else
3089   false,
3090 #endif
3091   COFF_DEFAULT_LONG_SECTION_NAMES,
3092   2,
3093 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3094   true,
3095 #else
3096   false,
3097 #endif
3098 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3099   4,
3100 #else
3101   2,
3102 #endif
3103   32768,
3104   coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3105   coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3106   coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3107   coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3108   coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3109   coff_classify_symbol, coff_compute_section_file_positions,
3110   coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3111   coff_adjust_symndx, coff_link_add_one_symbol,
3112   coff_link_output_has_begun, coff_final_link_postscript,
3113   bfd_pe_print_pdata
3114 };
3115 
3116 #define coff_small_close_and_cleanup \
3117   coff_close_and_cleanup
3118 #define coff_small_bfd_free_cached_info \
3119   coff_bfd_free_cached_info
3120 #define coff_small_get_section_contents \
3121   coff_get_section_contents
3122 
3123 extern const bfd_target sh_coff_small_le_vec;
3124 
3125 const bfd_target sh_coff_small_vec =
3126 {
3127   "coff-sh-small",		/* name */
3128   bfd_target_coff_flavour,
3129   BFD_ENDIAN_BIG,		/* data byte order is big */
3130   BFD_ENDIAN_BIG,		/* header byte order is big */
3131 
3132   (HAS_RELOC | EXEC_P		/* object flags */
3133    | HAS_LINENO | HAS_DEBUG
3134    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3135 
3136   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3137   '_',				/* leading symbol underscore */
3138   '/',				/* ar_pad_char */
3139   15,				/* ar_max_namelen */
3140   0,				/* match priority.  */
3141   TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols.  */
3142   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3143   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3144   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3145   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3146   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3147   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3148 
3149   {				/* bfd_check_format */
3150     _bfd_dummy_target,
3151     coff_small_object_p,
3152     bfd_generic_archive_p,
3153     _bfd_dummy_target
3154   },
3155   {				/* bfd_set_format */
3156     _bfd_bool_bfd_false_error,
3157     coff_mkobject,
3158     _bfd_generic_mkarchive,
3159     _bfd_bool_bfd_false_error
3160   },
3161   {				/* bfd_write_contents */
3162     _bfd_bool_bfd_false_error,
3163     coff_write_object_contents,
3164     _bfd_write_archive_contents,
3165     _bfd_bool_bfd_false_error
3166   },
3167 
3168   BFD_JUMP_TABLE_GENERIC (coff_small),
3169   BFD_JUMP_TABLE_COPY (coff),
3170   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3171   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3172   BFD_JUMP_TABLE_SYMBOLS (coff),
3173   BFD_JUMP_TABLE_RELOCS (coff),
3174   BFD_JUMP_TABLE_WRITE (coff),
3175   BFD_JUMP_TABLE_LINK (coff),
3176   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3177 
3178   &sh_coff_small_le_vec,
3179 
3180   &bfd_coff_small_swap_table
3181 };
3182 
3183 const bfd_target sh_coff_small_le_vec =
3184 {
3185   "coff-shl-small",		/* name */
3186   bfd_target_coff_flavour,
3187   BFD_ENDIAN_LITTLE,		/* data byte order is little */
3188   BFD_ENDIAN_LITTLE,		/* header byte order is little endian too*/
3189 
3190   (HAS_RELOC | EXEC_P		/* object flags */
3191    | HAS_LINENO | HAS_DEBUG
3192    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3193 
3194   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3195   '_',				/* leading symbol underscore */
3196   '/',				/* ar_pad_char */
3197   15,				/* ar_max_namelen */
3198   0,				/* match priority.  */
3199   TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols.  */
3200   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3201   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3202   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3203   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3204   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3205   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3206 
3207   {				/* bfd_check_format */
3208     _bfd_dummy_target,
3209     coff_small_object_p,
3210     bfd_generic_archive_p,
3211     _bfd_dummy_target
3212   },
3213   {				/* bfd_set_format */
3214     _bfd_bool_bfd_false_error,
3215     coff_mkobject,
3216     _bfd_generic_mkarchive,
3217     _bfd_bool_bfd_false_error
3218   },
3219   {				/* bfd_write_contents */
3220     _bfd_bool_bfd_false_error,
3221     coff_write_object_contents,
3222     _bfd_write_archive_contents,
3223     _bfd_bool_bfd_false_error
3224   },
3225 
3226   BFD_JUMP_TABLE_GENERIC (coff_small),
3227   BFD_JUMP_TABLE_COPY (coff),
3228   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3229   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3230   BFD_JUMP_TABLE_SYMBOLS (coff),
3231   BFD_JUMP_TABLE_RELOCS (coff),
3232   BFD_JUMP_TABLE_WRITE (coff),
3233   BFD_JUMP_TABLE_LINK (coff),
3234   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3235 
3236   &sh_coff_small_vec,
3237 
3238   &bfd_coff_small_swap_table
3239 };
3240 #endif
3241