xref: /netbsd-src/external/gpl3/binutils.old/dist/bfd/coff-sh.c (revision 867d70fc718005c0918b8b8b2f9d7f2d52d0a0db)
1 /* BFD back-end for Renesas Super-H COFF binaries.
2    Copyright (C) 1993-2018 Free Software Foundation, Inc.
3    Contributed by Cygnus Support.
4    Written by Steve Chamberlain, <sac@cygnus.com>.
5    Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6 
7    This file is part of BFD, the Binary File Descriptor library.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
22    MA 02110-1301, USA.  */
23 
24 #include "sysdep.h"
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31 
32 #undef  bfd_pe_print_pdata
33 
34 #ifdef COFF_WITH_PE
35 #include "coff/pe.h"
36 
37 #ifndef COFF_IMAGE_WITH_PE
38 static bfd_boolean sh_align_load_span
39   (bfd *, asection *, bfd_byte *,
40    bfd_boolean (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
41    void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bfd_boolean *);
42 
43 #define _bfd_sh_align_load_span sh_align_load_span
44 #endif
45 
46 #define	bfd_pe_print_pdata   _bfd_pe_print_ce_compressed_pdata
47 
48 #else
49 
50 #define	bfd_pe_print_pdata   NULL
51 
52 #endif /* COFF_WITH_PE.  */
53 
54 #include "libcoff.h"
55 
56 /* Internal functions.  */
57 
58 #ifdef COFF_WITH_PE
59 /* Can't build import tables with 2**4 alignment.  */
60 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	2
61 #else
62 /* Default section alignment to 2**4.  */
63 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	4
64 #endif
65 
66 #ifdef COFF_IMAGE_WITH_PE
67 /* Align PE executables.  */
68 #define COFF_PAGE_SIZE 0x1000
69 #endif
70 
71 /* Generate long file names.  */
72 #define COFF_LONG_FILENAMES
73 
74 #ifdef COFF_WITH_PE
75 /* Return TRUE if this relocation should
76    appear in the output .reloc section.  */
77 
78 static bfd_boolean
79 in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
80 	    reloc_howto_type * howto)
81 {
82   return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
83 }
84 #endif
85 
86 static bfd_reloc_status_type
87 sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
88 static bfd_boolean
89 sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
90 		     bfd_byte *, struct internal_reloc *,
91 		     struct internal_syment *, asection **);
92 static bfd_boolean
93 sh_align_loads (bfd *, asection *, struct internal_reloc *,
94 		bfd_byte *, bfd_boolean *);
95 
96 /* The supported relocations.  There are a lot of relocations defined
97    in coff/internal.h which we do not expect to ever see.  */
98 static reloc_howto_type sh_coff_howtos[] =
99 {
100   EMPTY_HOWTO (0),
101   EMPTY_HOWTO (1),
102 #ifdef COFF_WITH_PE
103   /* Windows CE */
104   HOWTO (R_SH_IMM32CE,		/* type */
105 	 0,			/* rightshift */
106 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
107 	 32,			/* bitsize */
108 	 FALSE,			/* pc_relative */
109 	 0,			/* bitpos */
110 	 complain_overflow_bitfield, /* complain_on_overflow */
111 	 sh_reloc,		/* special_function */
112 	 "r_imm32ce",		/* name */
113 	 TRUE,			/* partial_inplace */
114 	 0xffffffff,		/* src_mask */
115 	 0xffffffff,		/* dst_mask */
116 	 FALSE),		/* pcrel_offset */
117 #else
118   EMPTY_HOWTO (2),
119 #endif
120   EMPTY_HOWTO (3), /* R_SH_PCREL8 */
121   EMPTY_HOWTO (4), /* R_SH_PCREL16 */
122   EMPTY_HOWTO (5), /* R_SH_HIGH8 */
123   EMPTY_HOWTO (6), /* R_SH_IMM24 */
124   EMPTY_HOWTO (7), /* R_SH_LOW16 */
125   EMPTY_HOWTO (8),
126   EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
127 
128   HOWTO (R_SH_PCDISP8BY2,	/* type */
129 	 1,			/* rightshift */
130 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
131 	 8,			/* bitsize */
132 	 TRUE,			/* pc_relative */
133 	 0,			/* bitpos */
134 	 complain_overflow_signed, /* complain_on_overflow */
135 	 sh_reloc,		/* special_function */
136 	 "r_pcdisp8by2",	/* name */
137 	 TRUE,			/* partial_inplace */
138 	 0xff,			/* src_mask */
139 	 0xff,			/* dst_mask */
140 	 TRUE),			/* pcrel_offset */
141 
142   EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
143 
144   HOWTO (R_SH_PCDISP,		/* type */
145 	 1,			/* rightshift */
146 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
147 	 12,			/* bitsize */
148 	 TRUE,			/* pc_relative */
149 	 0,			/* bitpos */
150 	 complain_overflow_signed, /* complain_on_overflow */
151 	 sh_reloc,		/* special_function */
152 	 "r_pcdisp12by2",	/* name */
153 	 TRUE,			/* partial_inplace */
154 	 0xfff,			/* src_mask */
155 	 0xfff,			/* dst_mask */
156 	 TRUE),			/* pcrel_offset */
157 
158   EMPTY_HOWTO (13),
159 
160   HOWTO (R_SH_IMM32,		/* type */
161 	 0,			/* rightshift */
162 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
163 	 32,			/* bitsize */
164 	 FALSE,			/* pc_relative */
165 	 0,			/* bitpos */
166 	 complain_overflow_bitfield, /* complain_on_overflow */
167 	 sh_reloc,		/* special_function */
168 	 "r_imm32",		/* name */
169 	 TRUE,			/* partial_inplace */
170 	 0xffffffff,		/* src_mask */
171 	 0xffffffff,		/* dst_mask */
172 	 FALSE),		/* pcrel_offset */
173 
174   EMPTY_HOWTO (15),
175 #ifdef COFF_WITH_PE
176   HOWTO (R_SH_IMAGEBASE,	/* type */
177 	 0,			/* rightshift */
178 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
179 	 32,			/* bitsize */
180 	 FALSE,			/* pc_relative */
181 	 0,			/* bitpos */
182 	 complain_overflow_bitfield, /* complain_on_overflow */
183 	 sh_reloc,		/* special_function */
184 	 "rva32",		/* name */
185 	 TRUE,			/* partial_inplace */
186 	 0xffffffff,		/* src_mask */
187 	 0xffffffff,		/* dst_mask */
188 	 FALSE),		/* pcrel_offset */
189 #else
190   EMPTY_HOWTO (16), /* R_SH_IMM8 */
191 #endif
192   EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
193   EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
194   EMPTY_HOWTO (19), /* R_SH_IMM4 */
195   EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
196   EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
197 
198   HOWTO (R_SH_PCRELIMM8BY2,	/* type */
199 	 1,			/* rightshift */
200 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
201 	 8,			/* bitsize */
202 	 TRUE,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_unsigned, /* complain_on_overflow */
205 	 sh_reloc,		/* special_function */
206 	 "r_pcrelimm8by2",	/* name */
207 	 TRUE,			/* partial_inplace */
208 	 0xff,			/* src_mask */
209 	 0xff,			/* dst_mask */
210 	 TRUE),			/* pcrel_offset */
211 
212   HOWTO (R_SH_PCRELIMM8BY4,	/* type */
213 	 2,			/* rightshift */
214 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
215 	 8,			/* bitsize */
216 	 TRUE,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_unsigned, /* complain_on_overflow */
219 	 sh_reloc,		/* special_function */
220 	 "r_pcrelimm8by4",	/* name */
221 	 TRUE,			/* partial_inplace */
222 	 0xff,			/* src_mask */
223 	 0xff,			/* dst_mask */
224 	 TRUE),			/* pcrel_offset */
225 
226   HOWTO (R_SH_IMM16,		/* type */
227 	 0,			/* rightshift */
228 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
229 	 16,			/* bitsize */
230 	 FALSE,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_bitfield, /* complain_on_overflow */
233 	 sh_reloc,		/* special_function */
234 	 "r_imm16",		/* name */
235 	 TRUE,			/* partial_inplace */
236 	 0xffff,		/* src_mask */
237 	 0xffff,		/* dst_mask */
238 	 FALSE),		/* pcrel_offset */
239 
240   HOWTO (R_SH_SWITCH16,		/* type */
241 	 0,			/* rightshift */
242 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
243 	 16,			/* bitsize */
244 	 FALSE,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_bitfield, /* complain_on_overflow */
247 	 sh_reloc,		/* special_function */
248 	 "r_switch16",		/* name */
249 	 TRUE,			/* partial_inplace */
250 	 0xffff,		/* src_mask */
251 	 0xffff,		/* dst_mask */
252 	 FALSE),		/* pcrel_offset */
253 
254   HOWTO (R_SH_SWITCH32,		/* type */
255 	 0,			/* rightshift */
256 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
257 	 32,			/* bitsize */
258 	 FALSE,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_bitfield, /* complain_on_overflow */
261 	 sh_reloc,		/* special_function */
262 	 "r_switch32",		/* name */
263 	 TRUE,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 FALSE),		/* pcrel_offset */
267 
268   HOWTO (R_SH_USES,		/* type */
269 	 0,			/* rightshift */
270 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
271 	 16,			/* bitsize */
272 	 FALSE,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield, /* complain_on_overflow */
275 	 sh_reloc,		/* special_function */
276 	 "r_uses",		/* name */
277 	 TRUE,			/* partial_inplace */
278 	 0xffff,		/* src_mask */
279 	 0xffff,		/* dst_mask */
280 	 FALSE),		/* pcrel_offset */
281 
282   HOWTO (R_SH_COUNT,		/* type */
283 	 0,			/* rightshift */
284 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
285 	 32,			/* bitsize */
286 	 FALSE,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_bitfield, /* complain_on_overflow */
289 	 sh_reloc,		/* special_function */
290 	 "r_count",		/* name */
291 	 TRUE,			/* partial_inplace */
292 	 0xffffffff,		/* src_mask */
293 	 0xffffffff,		/* dst_mask */
294 	 FALSE),		/* pcrel_offset */
295 
296   HOWTO (R_SH_ALIGN,		/* type */
297 	 0,			/* rightshift */
298 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
299 	 32,			/* bitsize */
300 	 FALSE,			/* pc_relative */
301 	 0,			/* bitpos */
302 	 complain_overflow_bitfield, /* complain_on_overflow */
303 	 sh_reloc,		/* special_function */
304 	 "r_align",		/* name */
305 	 TRUE,			/* partial_inplace */
306 	 0xffffffff,		/* src_mask */
307 	 0xffffffff,		/* dst_mask */
308 	 FALSE),		/* pcrel_offset */
309 
310   HOWTO (R_SH_CODE,		/* type */
311 	 0,			/* rightshift */
312 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
313 	 32,			/* bitsize */
314 	 FALSE,			/* pc_relative */
315 	 0,			/* bitpos */
316 	 complain_overflow_bitfield, /* complain_on_overflow */
317 	 sh_reloc,		/* special_function */
318 	 "r_code",		/* name */
319 	 TRUE,			/* partial_inplace */
320 	 0xffffffff,		/* src_mask */
321 	 0xffffffff,		/* dst_mask */
322 	 FALSE),		/* pcrel_offset */
323 
324   HOWTO (R_SH_DATA,		/* type */
325 	 0,			/* rightshift */
326 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
327 	 32,			/* bitsize */
328 	 FALSE,			/* pc_relative */
329 	 0,			/* bitpos */
330 	 complain_overflow_bitfield, /* complain_on_overflow */
331 	 sh_reloc,		/* special_function */
332 	 "r_data",		/* name */
333 	 TRUE,			/* partial_inplace */
334 	 0xffffffff,		/* src_mask */
335 	 0xffffffff,		/* dst_mask */
336 	 FALSE),		/* pcrel_offset */
337 
338   HOWTO (R_SH_LABEL,		/* type */
339 	 0,			/* rightshift */
340 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
341 	 32,			/* bitsize */
342 	 FALSE,			/* pc_relative */
343 	 0,			/* bitpos */
344 	 complain_overflow_bitfield, /* complain_on_overflow */
345 	 sh_reloc,		/* special_function */
346 	 "r_label",		/* name */
347 	 TRUE,			/* partial_inplace */
348 	 0xffffffff,		/* src_mask */
349 	 0xffffffff,		/* dst_mask */
350 	 FALSE),		/* pcrel_offset */
351 
352   HOWTO (R_SH_SWITCH8,		/* type */
353 	 0,			/* rightshift */
354 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
355 	 8,			/* bitsize */
356 	 FALSE,			/* pc_relative */
357 	 0,			/* bitpos */
358 	 complain_overflow_bitfield, /* complain_on_overflow */
359 	 sh_reloc,		/* special_function */
360 	 "r_switch8",		/* name */
361 	 TRUE,			/* partial_inplace */
362 	 0xff,			/* src_mask */
363 	 0xff,			/* dst_mask */
364 	 FALSE)			/* pcrel_offset */
365 };
366 
367 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
368 
369 /* Check for a bad magic number.  */
370 #define BADMAG(x) SHBADMAG(x)
371 
372 /* Customize coffcode.h (this is not currently used).  */
373 #define SH 1
374 
375 /* FIXME: This should not be set here.  */
376 #define __A_MAGIC_SET__
377 
378 #ifndef COFF_WITH_PE
379 /* Swap the r_offset field in and out.  */
380 #define SWAP_IN_RELOC_OFFSET  H_GET_32
381 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
382 
383 /* Swap out extra information in the reloc structure.  */
384 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst)	\
385   do						\
386     {						\
387       dst->r_stuff[0] = 'S';			\
388       dst->r_stuff[1] = 'C';			\
389     }						\
390   while (0)
391 #endif
392 
393 /* Get the value of a symbol, when performing a relocation.  */
394 
395 static long
396 get_symbol_value (asymbol *symbol)
397 {
398   bfd_vma relocation;
399 
400   if (bfd_is_com_section (symbol->section))
401     relocation = 0;
402   else
403     relocation = (symbol->value +
404 		  symbol->section->output_section->vma +
405 		  symbol->section->output_offset);
406 
407   return relocation;
408 }
409 
410 #ifdef COFF_WITH_PE
411 /* Convert an rtype to howto for the COFF backend linker.
412    Copied from coff-i386.  */
413 #define coff_rtype_to_howto coff_sh_rtype_to_howto
414 
415 
416 static reloc_howto_type *
417 coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
418 			asection * sec,
419 			struct internal_reloc * rel,
420 			struct coff_link_hash_entry * h,
421 			struct internal_syment * sym,
422 			bfd_vma * addendp)
423 {
424   reloc_howto_type * howto;
425 
426   howto = sh_coff_howtos + rel->r_type;
427 
428   *addendp = 0;
429 
430   if (howto->pc_relative)
431     *addendp += sec->vma;
432 
433   if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
434     {
435       /* This is a common symbol.  The section contents include the
436 	 size (sym->n_value) as an addend.  The relocate_section
437 	 function will be adding in the final value of the symbol.  We
438 	 need to subtract out the current size in order to get the
439 	 correct result.  */
440       BFD_ASSERT (h != NULL);
441     }
442 
443   if (howto->pc_relative)
444     {
445       *addendp -= 4;
446 
447       /* If the symbol is defined, then the generic code is going to
448 	 add back the symbol value in order to cancel out an
449 	 adjustment it made to the addend.  However, we set the addend
450 	 to 0 at the start of this function.  We need to adjust here,
451 	 to avoid the adjustment the generic code will make.  FIXME:
452 	 This is getting a bit hackish.  */
453       if (sym != NULL && sym->n_scnum != 0)
454 	*addendp -= sym->n_value;
455     }
456 
457   if (rel->r_type == R_SH_IMAGEBASE)
458     *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
459 
460   return howto;
461 }
462 
463 #endif /* COFF_WITH_PE */
464 
465 /* This structure is used to map BFD reloc codes to SH PE relocs.  */
466 struct shcoff_reloc_map
467 {
468   bfd_reloc_code_real_type bfd_reloc_val;
469   unsigned char shcoff_reloc_val;
470 };
471 
472 #ifdef COFF_WITH_PE
473 /* An array mapping BFD reloc codes to SH PE relocs.  */
474 static const struct shcoff_reloc_map sh_reloc_map[] =
475 {
476   { BFD_RELOC_32, R_SH_IMM32CE },
477   { BFD_RELOC_RVA, R_SH_IMAGEBASE },
478   { BFD_RELOC_CTOR, R_SH_IMM32CE },
479 };
480 #else
481 /* An array mapping BFD reloc codes to SH PE relocs.  */
482 static const struct shcoff_reloc_map sh_reloc_map[] =
483 {
484   { BFD_RELOC_32, R_SH_IMM32 },
485   { BFD_RELOC_CTOR, R_SH_IMM32 },
486 };
487 #endif
488 
489 /* Given a BFD reloc code, return the howto structure for the
490    corresponding SH PE reloc.  */
491 #define coff_bfd_reloc_type_lookup	sh_coff_reloc_type_lookup
492 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
493 
494 static reloc_howto_type *
495 sh_coff_reloc_type_lookup (bfd *abfd,
496 			   bfd_reloc_code_real_type code)
497 {
498   unsigned int i;
499 
500   for (i = ARRAY_SIZE (sh_reloc_map); i--;)
501     if (sh_reloc_map[i].bfd_reloc_val == code)
502       return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
503 
504   _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
505 		      abfd, (unsigned int) code);
506   return NULL;
507 }
508 
509 static reloc_howto_type *
510 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
511 			   const char *r_name)
512 {
513   unsigned int i;
514 
515   for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
516     if (sh_coff_howtos[i].name != NULL
517 	&& strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
518       return &sh_coff_howtos[i];
519 
520   return NULL;
521 }
522 
523 /* This macro is used in coffcode.h to get the howto corresponding to
524    an internal reloc.  */
525 
526 #define RTYPE2HOWTO(relent, internal)		\
527   ((relent)->howto =				\
528    ((internal)->r_type < SH_COFF_HOWTO_COUNT	\
529     ? &sh_coff_howtos[(internal)->r_type]	\
530     : (reloc_howto_type *) NULL))
531 
532 /* This is the same as the macro in coffcode.h, except that it copies
533    r_offset into reloc_entry->addend for some relocs.  */
534 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr)		\
535   {								\
536     coff_symbol_type *coffsym = (coff_symbol_type *) NULL;	\
537     if (ptr && bfd_asymbol_bfd (ptr) != abfd)			\
538       coffsym = (obj_symbols (abfd)				\
539 		 + (cache_ptr->sym_ptr_ptr - symbols));		\
540     else if (ptr)						\
541       coffsym = coff_symbol_from (ptr);				\
542     if (coffsym != (coff_symbol_type *) NULL			\
543 	&& coffsym->native->u.syment.n_scnum == 0)		\
544       cache_ptr->addend = 0;					\
545     else if (ptr && bfd_asymbol_bfd (ptr) == abfd		\
546 	     && ptr->section != (asection *) NULL)		\
547       cache_ptr->addend = - (ptr->section->vma + ptr->value);	\
548     else							\
549       cache_ptr->addend = 0;					\
550     if ((reloc).r_type == R_SH_SWITCH8				\
551 	|| (reloc).r_type == R_SH_SWITCH16			\
552 	|| (reloc).r_type == R_SH_SWITCH32			\
553 	|| (reloc).r_type == R_SH_USES				\
554 	|| (reloc).r_type == R_SH_COUNT				\
555 	|| (reloc).r_type == R_SH_ALIGN)			\
556       cache_ptr->addend = (reloc).r_offset;			\
557   }
558 
559 /* This is the howto function for the SH relocations.  */
560 
561 static bfd_reloc_status_type
562 sh_reloc (bfd *      abfd,
563 	  arelent *  reloc_entry,
564 	  asymbol *  symbol_in,
565 	  void *     data,
566 	  asection * input_section,
567 	  bfd *      output_bfd,
568 	  char **    error_message ATTRIBUTE_UNUSED)
569 {
570   unsigned long insn;
571   bfd_vma sym_value;
572   unsigned short r_type;
573   bfd_vma addr = reloc_entry->address;
574   bfd_byte *hit_data = addr + (bfd_byte *) data;
575 
576   r_type = reloc_entry->howto->type;
577 
578   if (output_bfd != NULL)
579     {
580       /* Partial linking--do nothing.  */
581       reloc_entry->address += input_section->output_offset;
582       return bfd_reloc_ok;
583     }
584 
585   /* Almost all relocs have to do with relaxing.  If any work must be
586      done for them, it has been done in sh_relax_section.  */
587   if (r_type != R_SH_IMM32
588 #ifdef COFF_WITH_PE
589       && r_type != R_SH_IMM32CE
590       && r_type != R_SH_IMAGEBASE
591 #endif
592       && (r_type != R_SH_PCDISP
593 	  || (symbol_in->flags & BSF_LOCAL) != 0))
594     return bfd_reloc_ok;
595 
596   if (symbol_in != NULL
597       && bfd_is_und_section (symbol_in->section))
598     return bfd_reloc_undefined;
599 
600   if (addr > input_section->size)
601     return bfd_reloc_outofrange;
602 
603   sym_value = get_symbol_value (symbol_in);
604 
605   switch (r_type)
606     {
607     case R_SH_IMM32:
608 #ifdef COFF_WITH_PE
609     case R_SH_IMM32CE:
610 #endif
611       insn = bfd_get_32 (abfd, hit_data);
612       insn += sym_value + reloc_entry->addend;
613       bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
614       break;
615 #ifdef COFF_WITH_PE
616     case R_SH_IMAGEBASE:
617       insn = bfd_get_32 (abfd, hit_data);
618       insn += sym_value + reloc_entry->addend;
619       insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
620       bfd_put_32 (abfd, (bfd_vma) insn, hit_data);
621       break;
622 #endif
623     case R_SH_PCDISP:
624       insn = bfd_get_16 (abfd, hit_data);
625       sym_value += reloc_entry->addend;
626       sym_value -= (input_section->output_section->vma
627 		    + input_section->output_offset
628 		    + addr
629 		    + 4);
630       sym_value += (insn & 0xfff) << 1;
631       if (insn & 0x800)
632 	sym_value -= 0x1000;
633       insn = (insn & 0xf000) | (sym_value & 0xfff);
634       bfd_put_16 (abfd, (bfd_vma) insn, hit_data);
635       if (sym_value < (bfd_vma) -0x1000 || sym_value >= 0x1000)
636 	return bfd_reloc_overflow;
637       break;
638     default:
639       abort ();
640       break;
641     }
642 
643   return bfd_reloc_ok;
644 }
645 
646 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
647 
648 /* We can do relaxing.  */
649 #define coff_bfd_relax_section sh_relax_section
650 
651 /* We use the special COFF backend linker.  */
652 #define coff_relocate_section sh_relocate_section
653 
654 /* When relaxing, we need to use special code to get the relocated
655    section contents.  */
656 #define coff_bfd_get_relocated_section_contents \
657   sh_coff_get_relocated_section_contents
658 
659 #include "coffcode.h"
660 
661 static bfd_boolean
662 sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
663 
664 /* This function handles relaxing on the SH.
665 
666    Function calls on the SH look like this:
667 
668        movl  L1,r0
669        ...
670        jsr   @r0
671        ...
672      L1:
673        .long function
674 
675    The compiler and assembler will cooperate to create R_SH_USES
676    relocs on the jsr instructions.  The r_offset field of the
677    R_SH_USES reloc is the PC relative offset to the instruction which
678    loads the register (the r_offset field is computed as though it
679    were a jump instruction, so the offset value is actually from four
680    bytes past the instruction).  The linker can use this reloc to
681    determine just which function is being called, and thus decide
682    whether it is possible to replace the jsr with a bsr.
683 
684    If multiple function calls are all based on a single register load
685    (i.e., the same function is called multiple times), the compiler
686    guarantees that each function call will have an R_SH_USES reloc.
687    Therefore, if the linker is able to convert each R_SH_USES reloc
688    which refers to that address, it can safely eliminate the register
689    load.
690 
691    When the assembler creates an R_SH_USES reloc, it examines it to
692    determine which address is being loaded (L1 in the above example).
693    It then counts the number of references to that address, and
694    creates an R_SH_COUNT reloc at that address.  The r_offset field of
695    the R_SH_COUNT reloc will be the number of references.  If the
696    linker is able to eliminate a register load, it can use the
697    R_SH_COUNT reloc to see whether it can also eliminate the function
698    address.
699 
700    SH relaxing also handles another, unrelated, matter.  On the SH, if
701    a load or store instruction is not aligned on a four byte boundary,
702    the memory cycle interferes with the 32 bit instruction fetch,
703    causing a one cycle bubble in the pipeline.  Therefore, we try to
704    align load and store instructions on four byte boundaries if we
705    can, by swapping them with one of the adjacent instructions.  */
706 
707 static bfd_boolean
708 sh_relax_section (bfd *abfd,
709 		  asection *sec,
710 		  struct bfd_link_info *link_info,
711 		  bfd_boolean *again)
712 {
713   struct internal_reloc *internal_relocs;
714   bfd_boolean have_code;
715   struct internal_reloc *irel, *irelend;
716   bfd_byte *contents = NULL;
717 
718   *again = FALSE;
719 
720   if (bfd_link_relocatable (link_info)
721       || (sec->flags & SEC_RELOC) == 0
722       || sec->reloc_count == 0)
723     return TRUE;
724 
725   if (coff_section_data (abfd, sec) == NULL)
726     {
727       bfd_size_type amt = sizeof (struct coff_section_tdata);
728       sec->used_by_bfd = bfd_zalloc (abfd, amt);
729       if (sec->used_by_bfd == NULL)
730 	return FALSE;
731     }
732 
733   internal_relocs = (_bfd_coff_read_internal_relocs
734 		     (abfd, sec, link_info->keep_memory,
735 		      (bfd_byte *) NULL, FALSE,
736 		      (struct internal_reloc *) NULL));
737   if (internal_relocs == NULL)
738     goto error_return;
739 
740   have_code = FALSE;
741 
742   irelend = internal_relocs + sec->reloc_count;
743   for (irel = internal_relocs; irel < irelend; irel++)
744     {
745       bfd_vma laddr, paddr, symval;
746       unsigned short insn;
747       struct internal_reloc *irelfn, *irelscan, *irelcount;
748       struct internal_syment sym;
749       bfd_signed_vma foff;
750 
751       if (irel->r_type == R_SH_CODE)
752 	have_code = TRUE;
753 
754       if (irel->r_type != R_SH_USES)
755 	continue;
756 
757       /* Get the section contents.  */
758       if (contents == NULL)
759 	{
760 	  if (coff_section_data (abfd, sec)->contents != NULL)
761 	    contents = coff_section_data (abfd, sec)->contents;
762 	  else
763 	    {
764 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
765 		goto error_return;
766 	    }
767 	}
768 
769       /* The r_offset field of the R_SH_USES reloc will point us to
770 	 the register load.  The 4 is because the r_offset field is
771 	 computed as though it were a jump offset, which are based
772 	 from 4 bytes after the jump instruction.  */
773       laddr = irel->r_vaddr - sec->vma + 4;
774       /* Careful to sign extend the 32-bit offset.  */
775       laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
776       if (laddr >= sec->size)
777 	{
778 	  /* xgettext: c-format */
779 	  _bfd_error_handler
780 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES offset"),
781 	     abfd, (uint64_t) irel->r_vaddr);
782 	  continue;
783 	}
784       insn = bfd_get_16 (abfd, contents + laddr);
785 
786       /* If the instruction is not mov.l NN,rN, we don't know what to do.  */
787       if ((insn & 0xf000) != 0xd000)
788 	{
789 	  _bfd_error_handler
790 	    /* xgettext: c-format */
791 	    (_("%pB: %#" PRIx64 ": warning: R_SH_USES points to unrecognized insn %#x"),
792 	     abfd, (uint64_t) irel->r_vaddr, insn);
793 	  continue;
794 	}
795 
796       /* Get the address from which the register is being loaded.  The
797 	 displacement in the mov.l instruction is quadrupled.  It is a
798 	 displacement from four bytes after the movl instruction, but,
799 	 before adding in the PC address, two least significant bits
800 	 of the PC are cleared.  We assume that the section is aligned
801 	 on a four byte boundary.  */
802       paddr = insn & 0xff;
803       paddr *= 4;
804       paddr += (laddr + 4) &~ (bfd_vma) 3;
805       if (paddr >= sec->size)
806 	{
807 	  _bfd_error_handler
808 	    /* xgettext: c-format */
809 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES load offset"),
810 	     abfd, (uint64_t) irel->r_vaddr);
811 	  continue;
812 	}
813 
814       /* Get the reloc for the address from which the register is
815 	 being loaded.  This reloc will tell us which function is
816 	 actually being called.  */
817       paddr += sec->vma;
818       for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
819 	if (irelfn->r_vaddr == paddr
820 #ifdef COFF_WITH_PE
821 	    && (irelfn->r_type == R_SH_IMM32
822 		|| irelfn->r_type == R_SH_IMM32CE
823 		|| irelfn->r_type == R_SH_IMAGEBASE)
824 
825 #else
826 	    && irelfn->r_type == R_SH_IMM32
827 #endif
828 	    )
829 	  break;
830       if (irelfn >= irelend)
831 	{
832 	  _bfd_error_handler
833 	    /* xgettext: c-format */
834 	    (_("%pB: %#" PRIx64 ": warning: could not find expected reloc"),
835 	     abfd, (uint64_t) paddr);
836 	  continue;
837 	}
838 
839       /* Get the value of the symbol referred to by the reloc.  */
840       if (! _bfd_coff_get_external_symbols (abfd))
841 	goto error_return;
842       bfd_coff_swap_sym_in (abfd,
843 			    ((bfd_byte *) obj_coff_external_syms (abfd)
844 			     + (irelfn->r_symndx
845 				* bfd_coff_symesz (abfd))),
846 			    &sym);
847       if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
848 	{
849 	  _bfd_error_handler
850 	    /* xgettext: c-format */
851 	    (_("%pB: %#" PRIx64 ": warning: symbol in unexpected section"),
852 	     abfd, (uint64_t) paddr);
853 	  continue;
854 	}
855 
856       if (sym.n_sclass != C_EXT)
857 	{
858 	  symval = (sym.n_value
859 		    - sec->vma
860 		    + sec->output_section->vma
861 		    + sec->output_offset);
862 	}
863       else
864 	{
865 	  struct coff_link_hash_entry *h;
866 
867 	  h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
868 	  BFD_ASSERT (h != NULL);
869 	  if (h->root.type != bfd_link_hash_defined
870 	      && h->root.type != bfd_link_hash_defweak)
871 	    {
872 	      /* This appears to be a reference to an undefined
873 		 symbol.  Just ignore it--it will be caught by the
874 		 regular reloc processing.  */
875 	      continue;
876 	    }
877 
878 	  symval = (h->root.u.def.value
879 		    + h->root.u.def.section->output_section->vma
880 		    + h->root.u.def.section->output_offset);
881 	}
882 
883       symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
884 
885       /* See if this function call can be shortened.  */
886       foff = (symval
887 	      - (irel->r_vaddr
888 		 - sec->vma
889 		 + sec->output_section->vma
890 		 + sec->output_offset
891 		 + 4));
892       if (foff < -0x1000 || foff >= 0x1000)
893 	{
894 	  /* After all that work, we can't shorten this function call.  */
895 	  continue;
896 	}
897 
898       /* Shorten the function call.  */
899 
900       /* For simplicity of coding, we are going to modify the section
901 	 contents, the section relocs, and the BFD symbol table.  We
902 	 must tell the rest of the code not to free up this
903 	 information.  It would be possible to instead create a table
904 	 of changes which have to be made, as is done in coff-mips.c;
905 	 that would be more work, but would require less memory when
906 	 the linker is run.  */
907 
908       coff_section_data (abfd, sec)->relocs = internal_relocs;
909       coff_section_data (abfd, sec)->keep_relocs = TRUE;
910 
911       coff_section_data (abfd, sec)->contents = contents;
912       coff_section_data (abfd, sec)->keep_contents = TRUE;
913 
914       obj_coff_keep_syms (abfd) = TRUE;
915 
916       /* Replace the jsr with a bsr.  */
917 
918       /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
919 	 replace the jsr with a bsr.  */
920       irel->r_type = R_SH_PCDISP;
921       irel->r_symndx = irelfn->r_symndx;
922       if (sym.n_sclass != C_EXT)
923 	{
924 	  /* If this needs to be changed because of future relaxing,
925 	     it will be handled here like other internal PCDISP
926 	     relocs.  */
927 	  bfd_put_16 (abfd,
928 		      (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
929 		      contents + irel->r_vaddr - sec->vma);
930 	}
931       else
932 	{
933 	  /* We can't fully resolve this yet, because the external
934 	     symbol value may be changed by future relaxing.  We let
935 	     the final link phase handle it.  */
936 	  bfd_put_16 (abfd, (bfd_vma) 0xb000,
937 		      contents + irel->r_vaddr - sec->vma);
938 	}
939 
940       /* See if there is another R_SH_USES reloc referring to the same
941 	 register load.  */
942       for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
943 	if (irelscan->r_type == R_SH_USES
944 	    && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
945 	  break;
946       if (irelscan < irelend)
947 	{
948 	  /* Some other function call depends upon this register load,
949 	     and we have not yet converted that function call.
950 	     Indeed, we may never be able to convert it.  There is
951 	     nothing else we can do at this point.  */
952 	  continue;
953 	}
954 
955       /* Look for a R_SH_COUNT reloc on the location where the
956 	 function address is stored.  Do this before deleting any
957 	 bytes, to avoid confusion about the address.  */
958       for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
959 	if (irelcount->r_vaddr == paddr
960 	    && irelcount->r_type == R_SH_COUNT)
961 	  break;
962 
963       /* Delete the register load.  */
964       if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
965 	goto error_return;
966 
967       /* That will change things, so, just in case it permits some
968 	 other function call to come within range, we should relax
969 	 again.  Note that this is not required, and it may be slow.  */
970       *again = TRUE;
971 
972       /* Now check whether we got a COUNT reloc.  */
973       if (irelcount >= irelend)
974 	{
975 	  _bfd_error_handler
976 	    /* xgettext: c-format */
977 	    (_("%pB: %#" PRIx64 ": warning: could not find expected COUNT reloc"),
978 	     abfd, (uint64_t) paddr);
979 	  continue;
980 	}
981 
982       /* The number of uses is stored in the r_offset field.  We've
983 	 just deleted one.  */
984       if (irelcount->r_offset == 0)
985 	{
986 	  /* xgettext: c-format */
987 	  _bfd_error_handler (_("%pB: %#" PRIx64 ": warning: bad count"),
988 			      abfd, (uint64_t) paddr);
989 	  continue;
990 	}
991 
992       --irelcount->r_offset;
993 
994       /* If there are no more uses, we can delete the address.  Reload
995 	 the address from irelfn, in case it was changed by the
996 	 previous call to sh_relax_delete_bytes.  */
997       if (irelcount->r_offset == 0)
998 	{
999 	  if (! sh_relax_delete_bytes (abfd, sec,
1000 				       irelfn->r_vaddr - sec->vma, 4))
1001 	    goto error_return;
1002 	}
1003 
1004       /* We've done all we can with that function call.  */
1005     }
1006 
1007   /* Look for load and store instructions that we can align on four
1008      byte boundaries.  */
1009   if (have_code)
1010     {
1011       bfd_boolean swapped;
1012 
1013       /* Get the section contents.  */
1014       if (contents == NULL)
1015 	{
1016 	  if (coff_section_data (abfd, sec)->contents != NULL)
1017 	    contents = coff_section_data (abfd, sec)->contents;
1018 	  else
1019 	    {
1020 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1021 		goto error_return;
1022 	    }
1023 	}
1024 
1025       if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1026 	goto error_return;
1027 
1028       if (swapped)
1029 	{
1030 	  coff_section_data (abfd, sec)->relocs = internal_relocs;
1031 	  coff_section_data (abfd, sec)->keep_relocs = TRUE;
1032 
1033 	  coff_section_data (abfd, sec)->contents = contents;
1034 	  coff_section_data (abfd, sec)->keep_contents = TRUE;
1035 
1036 	  obj_coff_keep_syms (abfd) = TRUE;
1037 	}
1038     }
1039 
1040   if (internal_relocs != NULL
1041       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1042     {
1043       if (! link_info->keep_memory)
1044 	free (internal_relocs);
1045       else
1046 	coff_section_data (abfd, sec)->relocs = internal_relocs;
1047     }
1048 
1049   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1050     {
1051       if (! link_info->keep_memory)
1052 	free (contents);
1053       else
1054 	/* Cache the section contents for coff_link_input_bfd.  */
1055 	coff_section_data (abfd, sec)->contents = contents;
1056     }
1057 
1058   return TRUE;
1059 
1060  error_return:
1061   if (internal_relocs != NULL
1062       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1063     free (internal_relocs);
1064   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1065     free (contents);
1066   return FALSE;
1067 }
1068 
1069 /* Delete some bytes from a section while relaxing.  */
1070 
1071 static bfd_boolean
1072 sh_relax_delete_bytes (bfd *abfd,
1073 		       asection *sec,
1074 		       bfd_vma addr,
1075 		       int count)
1076 {
1077   bfd_byte *contents;
1078   struct internal_reloc *irel, *irelend;
1079   struct internal_reloc *irelalign;
1080   bfd_vma toaddr;
1081   bfd_byte *esym, *esymend;
1082   bfd_size_type symesz;
1083   struct coff_link_hash_entry **sym_hash;
1084   asection *o;
1085 
1086   contents = coff_section_data (abfd, sec)->contents;
1087 
1088   /* The deletion must stop at the next ALIGN reloc for an alignment
1089      power larger than the number of bytes we are deleting.  */
1090 
1091   irelalign = NULL;
1092   toaddr = sec->size;
1093 
1094   irel = coff_section_data (abfd, sec)->relocs;
1095   irelend = irel + sec->reloc_count;
1096   for (; irel < irelend; irel++)
1097     {
1098       if (irel->r_type == R_SH_ALIGN
1099 	  && irel->r_vaddr - sec->vma > addr
1100 	  && count < (1 << irel->r_offset))
1101 	{
1102 	  irelalign = irel;
1103 	  toaddr = irel->r_vaddr - sec->vma;
1104 	  break;
1105 	}
1106     }
1107 
1108   /* Actually delete the bytes.  */
1109   memmove (contents + addr, contents + addr + count,
1110 	   (size_t) (toaddr - addr - count));
1111   if (irelalign == NULL)
1112     sec->size -= count;
1113   else
1114     {
1115       int i;
1116 
1117 #define NOP_OPCODE (0x0009)
1118 
1119       BFD_ASSERT ((count & 1) == 0);
1120       for (i = 0; i < count; i += 2)
1121 	bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1122     }
1123 
1124   /* Adjust all the relocs.  */
1125   for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1126     {
1127       bfd_vma nraddr, stop;
1128       bfd_vma start = 0;
1129       int insn = 0;
1130       struct internal_syment sym;
1131       int off, adjust, oinsn;
1132       bfd_signed_vma voff = 0;
1133       bfd_boolean overflow;
1134 
1135       /* Get the new reloc address.  */
1136       nraddr = irel->r_vaddr - sec->vma;
1137       if ((irel->r_vaddr - sec->vma > addr
1138 	   && irel->r_vaddr - sec->vma < toaddr)
1139 	  || (irel->r_type == R_SH_ALIGN
1140 	      && irel->r_vaddr - sec->vma == toaddr))
1141 	nraddr -= count;
1142 
1143       /* See if this reloc was for the bytes we have deleted, in which
1144 	 case we no longer care about it.  Don't delete relocs which
1145 	 represent addresses, though.  */
1146       if (irel->r_vaddr - sec->vma >= addr
1147 	  && irel->r_vaddr - sec->vma < addr + count
1148 	  && irel->r_type != R_SH_ALIGN
1149 	  && irel->r_type != R_SH_CODE
1150 	  && irel->r_type != R_SH_DATA
1151 	  && irel->r_type != R_SH_LABEL)
1152 	irel->r_type = R_SH_UNUSED;
1153 
1154       /* If this is a PC relative reloc, see if the range it covers
1155 	 includes the bytes we have deleted.  */
1156       switch (irel->r_type)
1157 	{
1158 	default:
1159 	  break;
1160 
1161 	case R_SH_PCDISP8BY2:
1162 	case R_SH_PCDISP:
1163 	case R_SH_PCRELIMM8BY2:
1164 	case R_SH_PCRELIMM8BY4:
1165 	  start = irel->r_vaddr - sec->vma;
1166 	  insn = bfd_get_16 (abfd, contents + nraddr);
1167 	  break;
1168 	}
1169 
1170       switch (irel->r_type)
1171 	{
1172 	default:
1173 	  start = stop = addr;
1174 	  break;
1175 
1176 	case R_SH_IMM32:
1177 #ifdef COFF_WITH_PE
1178 	case R_SH_IMM32CE:
1179 	case R_SH_IMAGEBASE:
1180 #endif
1181 	  /* If this reloc is against a symbol defined in this
1182 	     section, and the symbol will not be adjusted below, we
1183 	     must check the addend to see it will put the value in
1184 	     range to be adjusted, and hence must be changed.  */
1185 	  bfd_coff_swap_sym_in (abfd,
1186 				((bfd_byte *) obj_coff_external_syms (abfd)
1187 				 + (irel->r_symndx
1188 				    * bfd_coff_symesz (abfd))),
1189 				&sym);
1190 	  if (sym.n_sclass != C_EXT
1191 	      && sym.n_scnum == sec->target_index
1192 	      && ((bfd_vma) sym.n_value <= addr
1193 		  || (bfd_vma) sym.n_value >= toaddr))
1194 	    {
1195 	      bfd_vma val;
1196 
1197 	      val = bfd_get_32 (abfd, contents + nraddr);
1198 	      val += sym.n_value;
1199 	      if (val > addr && val < toaddr)
1200 		bfd_put_32 (abfd, val - count, contents + nraddr);
1201 	    }
1202 	  start = stop = addr;
1203 	  break;
1204 
1205 	case R_SH_PCDISP8BY2:
1206 	  off = insn & 0xff;
1207 	  if (off & 0x80)
1208 	    off -= 0x100;
1209 	  stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1210 	  break;
1211 
1212 	case R_SH_PCDISP:
1213 	  bfd_coff_swap_sym_in (abfd,
1214 				((bfd_byte *) obj_coff_external_syms (abfd)
1215 				 + (irel->r_symndx
1216 				    * bfd_coff_symesz (abfd))),
1217 				&sym);
1218 	  if (sym.n_sclass == C_EXT)
1219 	    start = stop = addr;
1220 	  else
1221 	    {
1222 	      off = insn & 0xfff;
1223 	      if (off & 0x800)
1224 		off -= 0x1000;
1225 	      stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1226 	    }
1227 	  break;
1228 
1229 	case R_SH_PCRELIMM8BY2:
1230 	  off = insn & 0xff;
1231 	  stop = start + 4 + off * 2;
1232 	  break;
1233 
1234 	case R_SH_PCRELIMM8BY4:
1235 	  off = insn & 0xff;
1236 	  stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1237 	  break;
1238 
1239 	case R_SH_SWITCH8:
1240 	case R_SH_SWITCH16:
1241 	case R_SH_SWITCH32:
1242 	  /* These relocs types represent
1243 	       .word L2-L1
1244 	     The r_offset field holds the difference between the reloc
1245 	     address and L1.  That is the start of the reloc, and
1246 	     adding in the contents gives us the top.  We must adjust
1247 	     both the r_offset field and the section contents.  */
1248 
1249 	  start = irel->r_vaddr - sec->vma;
1250 	  stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1251 
1252 	  if (start > addr
1253 	      && start < toaddr
1254 	      && (stop <= addr || stop >= toaddr))
1255 	    irel->r_offset += count;
1256 	  else if (stop > addr
1257 		   && stop < toaddr
1258 		   && (start <= addr || start >= toaddr))
1259 	    irel->r_offset -= count;
1260 
1261 	  start = stop;
1262 
1263 	  if (irel->r_type == R_SH_SWITCH16)
1264 	    voff = bfd_get_signed_16 (abfd, contents + nraddr);
1265 	  else if (irel->r_type == R_SH_SWITCH8)
1266 	    voff = bfd_get_8 (abfd, contents + nraddr);
1267 	  else
1268 	    voff = bfd_get_signed_32 (abfd, contents + nraddr);
1269 	  stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1270 
1271 	  break;
1272 
1273 	case R_SH_USES:
1274 	  start = irel->r_vaddr - sec->vma;
1275 	  stop = (bfd_vma) ((bfd_signed_vma) start
1276 			    + (long) irel->r_offset
1277 			    + 4);
1278 	  break;
1279 	}
1280 
1281       if (start > addr
1282 	  && start < toaddr
1283 	  && (stop <= addr || stop >= toaddr))
1284 	adjust = count;
1285       else if (stop > addr
1286 	       && stop < toaddr
1287 	       && (start <= addr || start >= toaddr))
1288 	adjust = - count;
1289       else
1290 	adjust = 0;
1291 
1292       if (adjust != 0)
1293 	{
1294 	  oinsn = insn;
1295 	  overflow = FALSE;
1296 	  switch (irel->r_type)
1297 	    {
1298 	    default:
1299 	      abort ();
1300 	      break;
1301 
1302 	    case R_SH_PCDISP8BY2:
1303 	    case R_SH_PCRELIMM8BY2:
1304 	      insn += adjust / 2;
1305 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1306 		overflow = TRUE;
1307 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1308 	      break;
1309 
1310 	    case R_SH_PCDISP:
1311 	      insn += adjust / 2;
1312 	      if ((oinsn & 0xf000) != (insn & 0xf000))
1313 		overflow = TRUE;
1314 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1315 	      break;
1316 
1317 	    case R_SH_PCRELIMM8BY4:
1318 	      BFD_ASSERT (adjust == count || count >= 4);
1319 	      if (count >= 4)
1320 		insn += adjust / 4;
1321 	      else
1322 		{
1323 		  if ((irel->r_vaddr & 3) == 0)
1324 		    ++insn;
1325 		}
1326 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1327 		overflow = TRUE;
1328 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1329 	      break;
1330 
1331 	    case R_SH_SWITCH8:
1332 	      voff += adjust;
1333 	      if (voff < 0 || voff >= 0xff)
1334 		overflow = TRUE;
1335 	      bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1336 	      break;
1337 
1338 	    case R_SH_SWITCH16:
1339 	      voff += adjust;
1340 	      if (voff < - 0x8000 || voff >= 0x8000)
1341 		overflow = TRUE;
1342 	      bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1343 	      break;
1344 
1345 	    case R_SH_SWITCH32:
1346 	      voff += adjust;
1347 	      bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1348 	      break;
1349 
1350 	    case R_SH_USES:
1351 	      irel->r_offset += adjust;
1352 	      break;
1353 	    }
1354 
1355 	  if (overflow)
1356 	    {
1357 	      _bfd_error_handler
1358 		/* xgettext: c-format */
1359 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
1360 		 abfd, (uint64_t) irel->r_vaddr);
1361 	      bfd_set_error (bfd_error_bad_value);
1362 	      return FALSE;
1363 	    }
1364 	}
1365 
1366       irel->r_vaddr = nraddr + sec->vma;
1367     }
1368 
1369   /* Look through all the other sections.  If there contain any IMM32
1370      relocs against internal symbols which we are not going to adjust
1371      below, we may need to adjust the addends.  */
1372   for (o = abfd->sections; o != NULL; o = o->next)
1373     {
1374       struct internal_reloc *internal_relocs;
1375       struct internal_reloc *irelscan, *irelscanend;
1376       bfd_byte *ocontents;
1377 
1378       if (o == sec
1379 	  || (o->flags & SEC_RELOC) == 0
1380 	  || o->reloc_count == 0)
1381 	continue;
1382 
1383       /* We always cache the relocs.  Perhaps, if info->keep_memory is
1384 	 FALSE, we should free them, if we are permitted to, when we
1385 	 leave sh_coff_relax_section.  */
1386       internal_relocs = (_bfd_coff_read_internal_relocs
1387 			 (abfd, o, TRUE, (bfd_byte *) NULL, FALSE,
1388 			  (struct internal_reloc *) NULL));
1389       if (internal_relocs == NULL)
1390 	return FALSE;
1391 
1392       ocontents = NULL;
1393       irelscanend = internal_relocs + o->reloc_count;
1394       for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1395 	{
1396 	  struct internal_syment sym;
1397 
1398 #ifdef COFF_WITH_PE
1399 	  if (irelscan->r_type != R_SH_IMM32
1400 	      && irelscan->r_type != R_SH_IMAGEBASE
1401 	      && irelscan->r_type != R_SH_IMM32CE)
1402 #else
1403 	  if (irelscan->r_type != R_SH_IMM32)
1404 #endif
1405 	    continue;
1406 
1407 	  bfd_coff_swap_sym_in (abfd,
1408 				((bfd_byte *) obj_coff_external_syms (abfd)
1409 				 + (irelscan->r_symndx
1410 				    * bfd_coff_symesz (abfd))),
1411 				&sym);
1412 	  if (sym.n_sclass != C_EXT
1413 	      && sym.n_scnum == sec->target_index
1414 	      && ((bfd_vma) sym.n_value <= addr
1415 		  || (bfd_vma) sym.n_value >= toaddr))
1416 	    {
1417 	      bfd_vma val;
1418 
1419 	      if (ocontents == NULL)
1420 		{
1421 		  if (coff_section_data (abfd, o)->contents != NULL)
1422 		    ocontents = coff_section_data (abfd, o)->contents;
1423 		  else
1424 		    {
1425 		      if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1426 			return FALSE;
1427 		      /* We always cache the section contents.
1428 			 Perhaps, if info->keep_memory is FALSE, we
1429 			 should free them, if we are permitted to,
1430 			 when we leave sh_coff_relax_section.  */
1431 		      coff_section_data (abfd, o)->contents = ocontents;
1432 		    }
1433 		}
1434 
1435 	      val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1436 	      val += sym.n_value;
1437 	      if (val > addr && val < toaddr)
1438 		bfd_put_32 (abfd, val - count,
1439 			    ocontents + irelscan->r_vaddr - o->vma);
1440 
1441 	      coff_section_data (abfd, o)->keep_contents = TRUE;
1442 	    }
1443 	}
1444     }
1445 
1446   /* Adjusting the internal symbols will not work if something has
1447      already retrieved the generic symbols.  It would be possible to
1448      make this work by adjusting the generic symbols at the same time.
1449      However, this case should not arise in normal usage.  */
1450   if (obj_symbols (abfd) != NULL
1451       || obj_raw_syments (abfd) != NULL)
1452     {
1453       _bfd_error_handler
1454 	(_("%pB: fatal: generic symbols retrieved before relaxing"), abfd);
1455       bfd_set_error (bfd_error_invalid_operation);
1456       return FALSE;
1457     }
1458 
1459   /* Adjust all the symbols.  */
1460   sym_hash = obj_coff_sym_hashes (abfd);
1461   symesz = bfd_coff_symesz (abfd);
1462   esym = (bfd_byte *) obj_coff_external_syms (abfd);
1463   esymend = esym + obj_raw_syment_count (abfd) * symesz;
1464   while (esym < esymend)
1465     {
1466       struct internal_syment isym;
1467 
1468       bfd_coff_swap_sym_in (abfd, esym, &isym);
1469 
1470       if (isym.n_scnum == sec->target_index
1471 	  && (bfd_vma) isym.n_value > addr
1472 	  && (bfd_vma) isym.n_value < toaddr)
1473 	{
1474 	  isym.n_value -= count;
1475 
1476 	  bfd_coff_swap_sym_out (abfd, &isym, esym);
1477 
1478 	  if (*sym_hash != NULL)
1479 	    {
1480 	      BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1481 			  || (*sym_hash)->root.type == bfd_link_hash_defweak);
1482 	      BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1483 			  && (*sym_hash)->root.u.def.value < toaddr);
1484 	      (*sym_hash)->root.u.def.value -= count;
1485 	    }
1486 	}
1487 
1488       esym += (isym.n_numaux + 1) * symesz;
1489       sym_hash += isym.n_numaux + 1;
1490     }
1491 
1492   /* See if we can move the ALIGN reloc forward.  We have adjusted
1493      r_vaddr for it already.  */
1494   if (irelalign != NULL)
1495     {
1496       bfd_vma alignto, alignaddr;
1497 
1498       alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1499       alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1500 			     1 << irelalign->r_offset);
1501       if (alignto != alignaddr)
1502 	{
1503 	  /* Tail recursion.  */
1504 	  return sh_relax_delete_bytes (abfd, sec, alignaddr,
1505 					(int) (alignto - alignaddr));
1506 	}
1507     }
1508 
1509   return TRUE;
1510 }
1511 
1512 /* This is yet another version of the SH opcode table, used to rapidly
1513    get information about a particular instruction.  */
1514 
1515 /* The opcode map is represented by an array of these structures.  The
1516    array is indexed by the high order four bits in the instruction.  */
1517 
1518 struct sh_major_opcode
1519 {
1520   /* A pointer to the instruction list.  This is an array which
1521      contains all the instructions with this major opcode.  */
1522   const struct sh_minor_opcode *minor_opcodes;
1523   /* The number of elements in minor_opcodes.  */
1524   unsigned short count;
1525 };
1526 
1527 /* This structure holds information for a set of SH opcodes.  The
1528    instruction code is anded with the mask value, and the resulting
1529    value is used to search the order opcode list.  */
1530 
1531 struct sh_minor_opcode
1532 {
1533   /* The sorted opcode list.  */
1534   const struct sh_opcode *opcodes;
1535   /* The number of elements in opcodes.  */
1536   unsigned short count;
1537   /* The mask value to use when searching the opcode list.  */
1538   unsigned short mask;
1539 };
1540 
1541 /* This structure holds information for an SH instruction.  An array
1542    of these structures is sorted in order by opcode.  */
1543 
1544 struct sh_opcode
1545 {
1546   /* The code for this instruction, after it has been anded with the
1547      mask value in the sh_major_opcode structure.  */
1548   unsigned short opcode;
1549   /* Flags for this instruction.  */
1550   unsigned long flags;
1551 };
1552 
1553 /* Flag which appear in the sh_opcode structure.  */
1554 
1555 /* This instruction loads a value from memory.  */
1556 #define LOAD (0x1)
1557 
1558 /* This instruction stores a value to memory.  */
1559 #define STORE (0x2)
1560 
1561 /* This instruction is a branch.  */
1562 #define BRANCH (0x4)
1563 
1564 /* This instruction has a delay slot.  */
1565 #define DELAY (0x8)
1566 
1567 /* This instruction uses the value in the register in the field at
1568    mask 0x0f00 of the instruction.  */
1569 #define USES1 (0x10)
1570 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1571 
1572 /* This instruction uses the value in the register in the field at
1573    mask 0x00f0 of the instruction.  */
1574 #define USES2 (0x20)
1575 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1576 
1577 /* This instruction uses the value in register 0.  */
1578 #define USESR0 (0x40)
1579 
1580 /* This instruction sets the value in the register in the field at
1581    mask 0x0f00 of the instruction.  */
1582 #define SETS1 (0x80)
1583 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1584 
1585 /* This instruction sets the value in the register in the field at
1586    mask 0x00f0 of the instruction.  */
1587 #define SETS2 (0x100)
1588 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1589 
1590 /* This instruction sets register 0.  */
1591 #define SETSR0 (0x200)
1592 
1593 /* This instruction sets a special register.  */
1594 #define SETSSP (0x400)
1595 
1596 /* This instruction uses a special register.  */
1597 #define USESSP (0x800)
1598 
1599 /* This instruction uses the floating point register in the field at
1600    mask 0x0f00 of the instruction.  */
1601 #define USESF1 (0x1000)
1602 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1603 
1604 /* This instruction uses the floating point register in the field at
1605    mask 0x00f0 of the instruction.  */
1606 #define USESF2 (0x2000)
1607 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1608 
1609 /* This instruction uses floating point register 0.  */
1610 #define USESF0 (0x4000)
1611 
1612 /* This instruction sets the floating point register in the field at
1613    mask 0x0f00 of the instruction.  */
1614 #define SETSF1 (0x8000)
1615 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1616 
1617 #define USESAS (0x10000)
1618 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1619 #define USESR8 (0x20000)
1620 #define SETSAS (0x40000)
1621 #define SETSAS_REG(x) USESAS_REG (x)
1622 
1623 #define MAP(a) a, sizeof a / sizeof a[0]
1624 
1625 #ifndef COFF_IMAGE_WITH_PE
1626 
1627 /* The opcode maps.  */
1628 
1629 static const struct sh_opcode sh_opcode00[] =
1630 {
1631   { 0x0008, SETSSP },			/* clrt */
1632   { 0x0009, 0 },			/* nop */
1633   { 0x000b, BRANCH | DELAY | USESSP },	/* rts */
1634   { 0x0018, SETSSP },			/* sett */
1635   { 0x0019, SETSSP },			/* div0u */
1636   { 0x001b, 0 },			/* sleep */
1637   { 0x0028, SETSSP },			/* clrmac */
1638   { 0x002b, BRANCH | DELAY | SETSSP },	/* rte */
1639   { 0x0038, USESSP | SETSSP },		/* ldtlb */
1640   { 0x0048, SETSSP },			/* clrs */
1641   { 0x0058, SETSSP }			/* sets */
1642 };
1643 
1644 static const struct sh_opcode sh_opcode01[] =
1645 {
1646   { 0x0003, BRANCH | DELAY | USES1 | SETSSP },	/* bsrf rn */
1647   { 0x000a, SETS1 | USESSP },			/* sts mach,rn */
1648   { 0x001a, SETS1 | USESSP },			/* sts macl,rn */
1649   { 0x0023, BRANCH | DELAY | USES1 },		/* braf rn */
1650   { 0x0029, SETS1 | USESSP },			/* movt rn */
1651   { 0x002a, SETS1 | USESSP },			/* sts pr,rn */
1652   { 0x005a, SETS1 | USESSP },			/* sts fpul,rn */
1653   { 0x006a, SETS1 | USESSP },			/* sts fpscr,rn / sts dsr,rn */
1654   { 0x0083, LOAD | USES1 },			/* pref @rn */
1655   { 0x007a, SETS1 | USESSP },			/* sts a0,rn */
1656   { 0x008a, SETS1 | USESSP },			/* sts x0,rn */
1657   { 0x009a, SETS1 | USESSP },			/* sts x1,rn */
1658   { 0x00aa, SETS1 | USESSP },			/* sts y0,rn */
1659   { 0x00ba, SETS1 | USESSP }			/* sts y1,rn */
1660 };
1661 
1662 static const struct sh_opcode sh_opcode02[] =
1663 {
1664   { 0x0002, SETS1 | USESSP },			/* stc <special_reg>,rn */
1665   { 0x0004, STORE | USES1 | USES2 | USESR0 },	/* mov.b rm,@(r0,rn) */
1666   { 0x0005, STORE | USES1 | USES2 | USESR0 },	/* mov.w rm,@(r0,rn) */
1667   { 0x0006, STORE | USES1 | USES2 | USESR0 },	/* mov.l rm,@(r0,rn) */
1668   { 0x0007, SETSSP | USES1 | USES2 },		/* mul.l rm,rn */
1669   { 0x000c, LOAD | SETS1 | USES2 | USESR0 },	/* mov.b @(r0,rm),rn */
1670   { 0x000d, LOAD | SETS1 | USES2 | USESR0 },	/* mov.w @(r0,rm),rn */
1671   { 0x000e, LOAD | SETS1 | USES2 | USESR0 },	/* mov.l @(r0,rm),rn */
1672   { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1673 };
1674 
1675 static const struct sh_minor_opcode sh_opcode0[] =
1676 {
1677   { MAP (sh_opcode00), 0xffff },
1678   { MAP (sh_opcode01), 0xf0ff },
1679   { MAP (sh_opcode02), 0xf00f }
1680 };
1681 
1682 static const struct sh_opcode sh_opcode10[] =
1683 {
1684   { 0x1000, STORE | USES1 | USES2 }	/* mov.l rm,@(disp,rn) */
1685 };
1686 
1687 static const struct sh_minor_opcode sh_opcode1[] =
1688 {
1689   { MAP (sh_opcode10), 0xf000 }
1690 };
1691 
1692 static const struct sh_opcode sh_opcode20[] =
1693 {
1694   { 0x2000, STORE | USES1 | USES2 },		/* mov.b rm,@rn */
1695   { 0x2001, STORE | USES1 | USES2 },		/* mov.w rm,@rn */
1696   { 0x2002, STORE | USES1 | USES2 },		/* mov.l rm,@rn */
1697   { 0x2004, STORE | SETS1 | USES1 | USES2 },	/* mov.b rm,@-rn */
1698   { 0x2005, STORE | SETS1 | USES1 | USES2 },	/* mov.w rm,@-rn */
1699   { 0x2006, STORE | SETS1 | USES1 | USES2 },	/* mov.l rm,@-rn */
1700   { 0x2007, SETSSP | USES1 | USES2 | USESSP },	/* div0s */
1701   { 0x2008, SETSSP | USES1 | USES2 },		/* tst rm,rn */
1702   { 0x2009, SETS1 | USES1 | USES2 },		/* and rm,rn */
1703   { 0x200a, SETS1 | USES1 | USES2 },		/* xor rm,rn */
1704   { 0x200b, SETS1 | USES1 | USES2 },		/* or rm,rn */
1705   { 0x200c, SETSSP | USES1 | USES2 },		/* cmp/str rm,rn */
1706   { 0x200d, SETS1 | USES1 | USES2 },		/* xtrct rm,rn */
1707   { 0x200e, SETSSP | USES1 | USES2 },		/* mulu.w rm,rn */
1708   { 0x200f, SETSSP | USES1 | USES2 }		/* muls.w rm,rn */
1709 };
1710 
1711 static const struct sh_minor_opcode sh_opcode2[] =
1712 {
1713   { MAP (sh_opcode20), 0xf00f }
1714 };
1715 
1716 static const struct sh_opcode sh_opcode30[] =
1717 {
1718   { 0x3000, SETSSP | USES1 | USES2 },		/* cmp/eq rm,rn */
1719   { 0x3002, SETSSP | USES1 | USES2 },		/* cmp/hs rm,rn */
1720   { 0x3003, SETSSP | USES1 | USES2 },		/* cmp/ge rm,rn */
1721   { 0x3004, SETSSP | USESSP | USES1 | USES2 },	/* div1 rm,rn */
1722   { 0x3005, SETSSP | USES1 | USES2 },		/* dmulu.l rm,rn */
1723   { 0x3006, SETSSP | USES1 | USES2 },		/* cmp/hi rm,rn */
1724   { 0x3007, SETSSP | USES1 | USES2 },		/* cmp/gt rm,rn */
1725   { 0x3008, SETS1 | USES1 | USES2 },		/* sub rm,rn */
1726   { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1727   { 0x300b, SETS1 | SETSSP | USES1 | USES2 },	/* subv rm,rn */
1728   { 0x300c, SETS1 | USES1 | USES2 },		/* add rm,rn */
1729   { 0x300d, SETSSP | USES1 | USES2 },		/* dmuls.l rm,rn */
1730   { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1731   { 0x300f, SETS1 | SETSSP | USES1 | USES2 }	/* addv rm,rn */
1732 };
1733 
1734 static const struct sh_minor_opcode sh_opcode3[] =
1735 {
1736   { MAP (sh_opcode30), 0xf00f }
1737 };
1738 
1739 static const struct sh_opcode sh_opcode40[] =
1740 {
1741   { 0x4000, SETS1 | SETSSP | USES1 },		/* shll rn */
1742   { 0x4001, SETS1 | SETSSP | USES1 },		/* shlr rn */
1743   { 0x4002, STORE | SETS1 | USES1 | USESSP },	/* sts.l mach,@-rn */
1744   { 0x4004, SETS1 | SETSSP | USES1 },		/* rotl rn */
1745   { 0x4005, SETS1 | SETSSP | USES1 },		/* rotr rn */
1746   { 0x4006, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,mach */
1747   { 0x4008, SETS1 | USES1 },			/* shll2 rn */
1748   { 0x4009, SETS1 | USES1 },			/* shlr2 rn */
1749   { 0x400a, SETSSP | USES1 },			/* lds rm,mach */
1750   { 0x400b, BRANCH | DELAY | USES1 },		/* jsr @rn */
1751   { 0x4010, SETS1 | SETSSP | USES1 },		/* dt rn */
1752   { 0x4011, SETSSP | USES1 },			/* cmp/pz rn */
1753   { 0x4012, STORE | SETS1 | USES1 | USESSP },	/* sts.l macl,@-rn */
1754   { 0x4014, SETSSP | USES1 },			/* setrc rm */
1755   { 0x4015, SETSSP | USES1 },			/* cmp/pl rn */
1756   { 0x4016, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,macl */
1757   { 0x4018, SETS1 | USES1 },			/* shll8 rn */
1758   { 0x4019, SETS1 | USES1 },			/* shlr8 rn */
1759   { 0x401a, SETSSP | USES1 },			/* lds rm,macl */
1760   { 0x401b, LOAD | SETSSP | USES1 },		/* tas.b @rn */
1761   { 0x4020, SETS1 | SETSSP | USES1 },		/* shal rn */
1762   { 0x4021, SETS1 | SETSSP | USES1 },		/* shar rn */
1763   { 0x4022, STORE | SETS1 | USES1 | USESSP },	/* sts.l pr,@-rn */
1764   { 0x4024, SETS1 | SETSSP | USES1 | USESSP },	/* rotcl rn */
1765   { 0x4025, SETS1 | SETSSP | USES1 | USESSP },	/* rotcr rn */
1766   { 0x4026, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,pr */
1767   { 0x4028, SETS1 | USES1 },			/* shll16 rn */
1768   { 0x4029, SETS1 | USES1 },			/* shlr16 rn */
1769   { 0x402a, SETSSP | USES1 },			/* lds rm,pr */
1770   { 0x402b, BRANCH | DELAY | USES1 },		/* jmp @rn */
1771   { 0x4052, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpul,@-rn */
1772   { 0x4056, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpul */
1773   { 0x405a, SETSSP | USES1 },			/* lds.l rm,fpul */
1774   { 0x4062, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpscr / dsr,@-rn */
1775   { 0x4066, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpscr / dsr */
1776   { 0x406a, SETSSP | USES1 },			/* lds rm,fpscr / lds rm,dsr */
1777   { 0x4072, STORE | SETS1 | USES1 | USESSP },	/* sts.l a0,@-rn */
1778   { 0x4076, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,a0 */
1779   { 0x407a, SETSSP | USES1 },			/* lds.l rm,a0 */
1780   { 0x4082, STORE | SETS1 | USES1 | USESSP },	/* sts.l x0,@-rn */
1781   { 0x4086, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x0 */
1782   { 0x408a, SETSSP | USES1 },			/* lds.l rm,x0 */
1783   { 0x4092, STORE | SETS1 | USES1 | USESSP },	/* sts.l x1,@-rn */
1784   { 0x4096, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x1 */
1785   { 0x409a, SETSSP | USES1 },			/* lds.l rm,x1 */
1786   { 0x40a2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y0,@-rn */
1787   { 0x40a6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y0 */
1788   { 0x40aa, SETSSP | USES1 },			/* lds.l rm,y0 */
1789   { 0x40b2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y1,@-rn */
1790   { 0x40b6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y1 */
1791   { 0x40ba, SETSSP | USES1 }			/* lds.l rm,y1 */
1792 };
1793 
1794 static const struct sh_opcode sh_opcode41[] =
1795 {
1796   { 0x4003, STORE | SETS1 | USES1 | USESSP },	/* stc.l <special_reg>,@-rn */
1797   { 0x4007, LOAD | SETS1 | SETSSP | USES1 },	/* ldc.l @rm+,<special_reg> */
1798   { 0x400c, SETS1 | USES1 | USES2 },		/* shad rm,rn */
1799   { 0x400d, SETS1 | USES1 | USES2 },		/* shld rm,rn */
1800   { 0x400e, SETSSP | USES1 },			/* ldc rm,<special_reg> */
1801   { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1802 };
1803 
1804 static const struct sh_minor_opcode sh_opcode4[] =
1805 {
1806   { MAP (sh_opcode40), 0xf0ff },
1807   { MAP (sh_opcode41), 0xf00f }
1808 };
1809 
1810 static const struct sh_opcode sh_opcode50[] =
1811 {
1812   { 0x5000, LOAD | SETS1 | USES2 }	/* mov.l @(disp,rm),rn */
1813 };
1814 
1815 static const struct sh_minor_opcode sh_opcode5[] =
1816 {
1817   { MAP (sh_opcode50), 0xf000 }
1818 };
1819 
1820 static const struct sh_opcode sh_opcode60[] =
1821 {
1822   { 0x6000, LOAD | SETS1 | USES2 },		/* mov.b @rm,rn */
1823   { 0x6001, LOAD | SETS1 | USES2 },		/* mov.w @rm,rn */
1824   { 0x6002, LOAD | SETS1 | USES2 },		/* mov.l @rm,rn */
1825   { 0x6003, SETS1 | USES2 },			/* mov rm,rn */
1826   { 0x6004, LOAD | SETS1 | SETS2 | USES2 },	/* mov.b @rm+,rn */
1827   { 0x6005, LOAD | SETS1 | SETS2 | USES2 },	/* mov.w @rm+,rn */
1828   { 0x6006, LOAD | SETS1 | SETS2 | USES2 },	/* mov.l @rm+,rn */
1829   { 0x6007, SETS1 | USES2 },			/* not rm,rn */
1830   { 0x6008, SETS1 | USES2 },			/* swap.b rm,rn */
1831   { 0x6009, SETS1 | USES2 },			/* swap.w rm,rn */
1832   { 0x600a, SETS1 | SETSSP | USES2 | USESSP },	/* negc rm,rn */
1833   { 0x600b, SETS1 | USES2 },			/* neg rm,rn */
1834   { 0x600c, SETS1 | USES2 },			/* extu.b rm,rn */
1835   { 0x600d, SETS1 | USES2 },			/* extu.w rm,rn */
1836   { 0x600e, SETS1 | USES2 },			/* exts.b rm,rn */
1837   { 0x600f, SETS1 | USES2 }			/* exts.w rm,rn */
1838 };
1839 
1840 static const struct sh_minor_opcode sh_opcode6[] =
1841 {
1842   { MAP (sh_opcode60), 0xf00f }
1843 };
1844 
1845 static const struct sh_opcode sh_opcode70[] =
1846 {
1847   { 0x7000, SETS1 | USES1 }		/* add #imm,rn */
1848 };
1849 
1850 static const struct sh_minor_opcode sh_opcode7[] =
1851 {
1852   { MAP (sh_opcode70), 0xf000 }
1853 };
1854 
1855 static const struct sh_opcode sh_opcode80[] =
1856 {
1857   { 0x8000, STORE | USES2 | USESR0 },	/* mov.b r0,@(disp,rn) */
1858   { 0x8100, STORE | USES2 | USESR0 },	/* mov.w r0,@(disp,rn) */
1859   { 0x8200, SETSSP },			/* setrc #imm */
1860   { 0x8400, LOAD | SETSR0 | USES2 },	/* mov.b @(disp,rm),r0 */
1861   { 0x8500, LOAD | SETSR0 | USES2 },	/* mov.w @(disp,rn),r0 */
1862   { 0x8800, SETSSP | USESR0 },		/* cmp/eq #imm,r0 */
1863   { 0x8900, BRANCH | USESSP },		/* bt label */
1864   { 0x8b00, BRANCH | USESSP },		/* bf label */
1865   { 0x8c00, SETSSP },			/* ldrs @(disp,pc) */
1866   { 0x8d00, BRANCH | DELAY | USESSP },	/* bt/s label */
1867   { 0x8e00, SETSSP },			/* ldre @(disp,pc) */
1868   { 0x8f00, BRANCH | DELAY | USESSP }	/* bf/s label */
1869 };
1870 
1871 static const struct sh_minor_opcode sh_opcode8[] =
1872 {
1873   { MAP (sh_opcode80), 0xff00 }
1874 };
1875 
1876 static const struct sh_opcode sh_opcode90[] =
1877 {
1878   { 0x9000, LOAD | SETS1 }	/* mov.w @(disp,pc),rn */
1879 };
1880 
1881 static const struct sh_minor_opcode sh_opcode9[] =
1882 {
1883   { MAP (sh_opcode90), 0xf000 }
1884 };
1885 
1886 static const struct sh_opcode sh_opcodea0[] =
1887 {
1888   { 0xa000, BRANCH | DELAY }	/* bra label */
1889 };
1890 
1891 static const struct sh_minor_opcode sh_opcodea[] =
1892 {
1893   { MAP (sh_opcodea0), 0xf000 }
1894 };
1895 
1896 static const struct sh_opcode sh_opcodeb0[] =
1897 {
1898   { 0xb000, BRANCH | DELAY }	/* bsr label */
1899 };
1900 
1901 static const struct sh_minor_opcode sh_opcodeb[] =
1902 {
1903   { MAP (sh_opcodeb0), 0xf000 }
1904 };
1905 
1906 static const struct sh_opcode sh_opcodec0[] =
1907 {
1908   { 0xc000, STORE | USESR0 | USESSP },		/* mov.b r0,@(disp,gbr) */
1909   { 0xc100, STORE | USESR0 | USESSP },		/* mov.w r0,@(disp,gbr) */
1910   { 0xc200, STORE | USESR0 | USESSP },		/* mov.l r0,@(disp,gbr) */
1911   { 0xc300, BRANCH | USESSP },			/* trapa #imm */
1912   { 0xc400, LOAD | SETSR0 | USESSP },		/* mov.b @(disp,gbr),r0 */
1913   { 0xc500, LOAD | SETSR0 | USESSP },		/* mov.w @(disp,gbr),r0 */
1914   { 0xc600, LOAD | SETSR0 | USESSP },		/* mov.l @(disp,gbr),r0 */
1915   { 0xc700, SETSR0 },				/* mova @(disp,pc),r0 */
1916   { 0xc800, SETSSP | USESR0 },			/* tst #imm,r0 */
1917   { 0xc900, SETSR0 | USESR0 },			/* and #imm,r0 */
1918   { 0xca00, SETSR0 | USESR0 },			/* xor #imm,r0 */
1919   { 0xcb00, SETSR0 | USESR0 },			/* or #imm,r0 */
1920   { 0xcc00, LOAD | SETSSP | USESR0 | USESSP },	/* tst.b #imm,@(r0,gbr) */
1921   { 0xcd00, LOAD | STORE | USESR0 | USESSP },	/* and.b #imm,@(r0,gbr) */
1922   { 0xce00, LOAD | STORE | USESR0 | USESSP },	/* xor.b #imm,@(r0,gbr) */
1923   { 0xcf00, LOAD | STORE | USESR0 | USESSP }	/* or.b #imm,@(r0,gbr) */
1924 };
1925 
1926 static const struct sh_minor_opcode sh_opcodec[] =
1927 {
1928   { MAP (sh_opcodec0), 0xff00 }
1929 };
1930 
1931 static const struct sh_opcode sh_opcoded0[] =
1932 {
1933   { 0xd000, LOAD | SETS1 }		/* mov.l @(disp,pc),rn */
1934 };
1935 
1936 static const struct sh_minor_opcode sh_opcoded[] =
1937 {
1938   { MAP (sh_opcoded0), 0xf000 }
1939 };
1940 
1941 static const struct sh_opcode sh_opcodee0[] =
1942 {
1943   { 0xe000, SETS1 }		/* mov #imm,rn */
1944 };
1945 
1946 static const struct sh_minor_opcode sh_opcodee[] =
1947 {
1948   { MAP (sh_opcodee0), 0xf000 }
1949 };
1950 
1951 static const struct sh_opcode sh_opcodef0[] =
1952 {
1953   { 0xf000, SETSF1 | USESF1 | USESF2 },		/* fadd fm,fn */
1954   { 0xf001, SETSF1 | USESF1 | USESF2 },		/* fsub fm,fn */
1955   { 0xf002, SETSF1 | USESF1 | USESF2 },		/* fmul fm,fn */
1956   { 0xf003, SETSF1 | USESF1 | USESF2 },		/* fdiv fm,fn */
1957   { 0xf004, SETSSP | USESF1 | USESF2 },		/* fcmp/eq fm,fn */
1958   { 0xf005, SETSSP | USESF1 | USESF2 },		/* fcmp/gt fm,fn */
1959   { 0xf006, LOAD | SETSF1 | USES2 | USESR0 },	/* fmov.s @(r0,rm),fn */
1960   { 0xf007, STORE | USES1 | USESF2 | USESR0 },	/* fmov.s fm,@(r0,rn) */
1961   { 0xf008, LOAD | SETSF1 | USES2 },		/* fmov.s @rm,fn */
1962   { 0xf009, LOAD | SETS2 | SETSF1 | USES2 },	/* fmov.s @rm+,fn */
1963   { 0xf00a, STORE | USES1 | USESF2 },		/* fmov.s fm,@rn */
1964   { 0xf00b, STORE | SETS1 | USES1 | USESF2 },	/* fmov.s fm,@-rn */
1965   { 0xf00c, SETSF1 | USESF2 },			/* fmov fm,fn */
1966   { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 }	/* fmac f0,fm,fn */
1967 };
1968 
1969 static const struct sh_opcode sh_opcodef1[] =
1970 {
1971   { 0xf00d, SETSF1 | USESSP },	/* fsts fpul,fn */
1972   { 0xf01d, SETSSP | USESF1 },	/* flds fn,fpul */
1973   { 0xf02d, SETSF1 | USESSP },	/* float fpul,fn */
1974   { 0xf03d, SETSSP | USESF1 },	/* ftrc fn,fpul */
1975   { 0xf04d, SETSF1 | USESF1 },	/* fneg fn */
1976   { 0xf05d, SETSF1 | USESF1 },	/* fabs fn */
1977   { 0xf06d, SETSF1 | USESF1 },	/* fsqrt fn */
1978   { 0xf07d, SETSSP | USESF1 },	/* ftst/nan fn */
1979   { 0xf08d, SETSF1 },		/* fldi0 fn */
1980   { 0xf09d, SETSF1 }		/* fldi1 fn */
1981 };
1982 
1983 static const struct sh_minor_opcode sh_opcodef[] =
1984 {
1985   { MAP (sh_opcodef0), 0xf00f },
1986   { MAP (sh_opcodef1), 0xf0ff }
1987 };
1988 
1989 static struct sh_major_opcode sh_opcodes[] =
1990 {
1991   { MAP (sh_opcode0) },
1992   { MAP (sh_opcode1) },
1993   { MAP (sh_opcode2) },
1994   { MAP (sh_opcode3) },
1995   { MAP (sh_opcode4) },
1996   { MAP (sh_opcode5) },
1997   { MAP (sh_opcode6) },
1998   { MAP (sh_opcode7) },
1999   { MAP (sh_opcode8) },
2000   { MAP (sh_opcode9) },
2001   { MAP (sh_opcodea) },
2002   { MAP (sh_opcodeb) },
2003   { MAP (sh_opcodec) },
2004   { MAP (sh_opcoded) },
2005   { MAP (sh_opcodee) },
2006   { MAP (sh_opcodef) }
2007 };
2008 
2009 /* The double data transfer / parallel processing insns are not
2010    described here.  This will cause sh_align_load_span to leave them alone.  */
2011 
2012 static const struct sh_opcode sh_dsp_opcodef0[] =
2013 {
2014   { 0xf400, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @-as,ds */
2015   { 0xf401, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@-as */
2016   { 0xf404, USESAS | LOAD | SETSSP },		/* movs.x @as,ds */
2017   { 0xf405, USESAS | STORE | USESSP },		/* movs.x ds,@as */
2018   { 0xf408, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @as+,ds */
2019   { 0xf409, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@as+ */
2020   { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 },	/* movs.x @as+r8,ds */
2021   { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 }	/* movs.x ds,@as+r8 */
2022 };
2023 
2024 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2025 {
2026   { MAP (sh_dsp_opcodef0), 0xfc0d }
2027 };
2028 
2029 /* Given an instruction, return a pointer to the corresponding
2030    sh_opcode structure.  Return NULL if the instruction is not
2031    recognized.  */
2032 
2033 static const struct sh_opcode *
2034 sh_insn_info (unsigned int insn)
2035 {
2036   const struct sh_major_opcode *maj;
2037   const struct sh_minor_opcode *min, *minend;
2038 
2039   maj = &sh_opcodes[(insn & 0xf000) >> 12];
2040   min = maj->minor_opcodes;
2041   minend = min + maj->count;
2042   for (; min < minend; min++)
2043     {
2044       unsigned int l;
2045       const struct sh_opcode *op, *opend;
2046 
2047       l = insn & min->mask;
2048       op = min->opcodes;
2049       opend = op + min->count;
2050 
2051       /* Since the opcodes tables are sorted, we could use a binary
2052 	 search here if the count were above some cutoff value.  */
2053       for (; op < opend; op++)
2054 	if (op->opcode == l)
2055 	  return op;
2056     }
2057 
2058   return NULL;
2059 }
2060 
2061 /* See whether an instruction uses a general purpose register.  */
2062 
2063 static bfd_boolean
2064 sh_insn_uses_reg (unsigned int insn,
2065 		  const struct sh_opcode *op,
2066 		  unsigned int reg)
2067 {
2068   unsigned int f;
2069 
2070   f = op->flags;
2071 
2072   if ((f & USES1) != 0
2073       && USES1_REG (insn) == reg)
2074     return TRUE;
2075   if ((f & USES2) != 0
2076       && USES2_REG (insn) == reg)
2077     return TRUE;
2078   if ((f & USESR0) != 0
2079       && reg == 0)
2080     return TRUE;
2081   if ((f & USESAS) && reg == USESAS_REG (insn))
2082     return TRUE;
2083   if ((f & USESR8) && reg == 8)
2084     return TRUE;
2085 
2086   return FALSE;
2087 }
2088 
2089 /* See whether an instruction sets a general purpose register.  */
2090 
2091 static bfd_boolean
2092 sh_insn_sets_reg (unsigned int insn,
2093 		  const struct sh_opcode *op,
2094 		  unsigned int reg)
2095 {
2096   unsigned int f;
2097 
2098   f = op->flags;
2099 
2100   if ((f & SETS1) != 0
2101       && SETS1_REG (insn) == reg)
2102     return TRUE;
2103   if ((f & SETS2) != 0
2104       && SETS2_REG (insn) == reg)
2105     return TRUE;
2106   if ((f & SETSR0) != 0
2107       && reg == 0)
2108     return TRUE;
2109   if ((f & SETSAS) && reg == SETSAS_REG (insn))
2110     return TRUE;
2111 
2112   return FALSE;
2113 }
2114 
2115 /* See whether an instruction uses or sets a general purpose register */
2116 
2117 static bfd_boolean
2118 sh_insn_uses_or_sets_reg (unsigned int insn,
2119 			  const struct sh_opcode *op,
2120 			  unsigned int reg)
2121 {
2122   if (sh_insn_uses_reg (insn, op, reg))
2123     return TRUE;
2124 
2125   return sh_insn_sets_reg (insn, op, reg);
2126 }
2127 
2128 /* See whether an instruction uses a floating point register.  */
2129 
2130 static bfd_boolean
2131 sh_insn_uses_freg (unsigned int insn,
2132 		   const struct sh_opcode *op,
2133 		   unsigned int freg)
2134 {
2135   unsigned int f;
2136 
2137   f = op->flags;
2138 
2139   /* We can't tell if this is a double-precision insn, so just play safe
2140      and assume that it might be.  So not only have we test FREG against
2141      itself, but also even FREG against FREG+1 - if the using insn uses
2142      just the low part of a double precision value - but also an odd
2143      FREG against FREG-1 -  if the setting insn sets just the low part
2144      of a double precision value.
2145      So what this all boils down to is that we have to ignore the lowest
2146      bit of the register number.  */
2147 
2148   if ((f & USESF1) != 0
2149       && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2150     return TRUE;
2151   if ((f & USESF2) != 0
2152       && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2153     return TRUE;
2154   if ((f & USESF0) != 0
2155       && freg == 0)
2156     return TRUE;
2157 
2158   return FALSE;
2159 }
2160 
2161 /* See whether an instruction sets a floating point register.  */
2162 
2163 static bfd_boolean
2164 sh_insn_sets_freg (unsigned int insn,
2165 		   const struct sh_opcode *op,
2166 		   unsigned int freg)
2167 {
2168   unsigned int f;
2169 
2170   f = op->flags;
2171 
2172   /* We can't tell if this is a double-precision insn, so just play safe
2173      and assume that it might be.  So not only have we test FREG against
2174      itself, but also even FREG against FREG+1 - if the using insn uses
2175      just the low part of a double precision value - but also an odd
2176      FREG against FREG-1 -  if the setting insn sets just the low part
2177      of a double precision value.
2178      So what this all boils down to is that we have to ignore the lowest
2179      bit of the register number.  */
2180 
2181   if ((f & SETSF1) != 0
2182       && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2183     return TRUE;
2184 
2185   return FALSE;
2186 }
2187 
2188 /* See whether an instruction uses or sets a floating point register */
2189 
2190 static bfd_boolean
2191 sh_insn_uses_or_sets_freg (unsigned int insn,
2192 			   const struct sh_opcode *op,
2193 			   unsigned int reg)
2194 {
2195   if (sh_insn_uses_freg (insn, op, reg))
2196     return TRUE;
2197 
2198   return sh_insn_sets_freg (insn, op, reg);
2199 }
2200 
2201 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2202    before I2.  OP1 and OP2 are the corresponding sh_opcode structures.
2203    This should return TRUE if there is a conflict, or FALSE if the
2204    instructions can be swapped safely.  */
2205 
2206 static bfd_boolean
2207 sh_insns_conflict (unsigned int i1,
2208 		   const struct sh_opcode *op1,
2209 		   unsigned int i2,
2210 		   const struct sh_opcode *op2)
2211 {
2212   unsigned int f1, f2;
2213 
2214   f1 = op1->flags;
2215   f2 = op2->flags;
2216 
2217   /* Load of fpscr conflicts with floating point operations.
2218      FIXME: shouldn't test raw opcodes here.  */
2219   if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2220       || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2221     return TRUE;
2222 
2223   if ((f1 & (BRANCH | DELAY)) != 0
2224       || (f2 & (BRANCH | DELAY)) != 0)
2225     return TRUE;
2226 
2227   if (((f1 | f2) & SETSSP)
2228       && (f1 & (SETSSP | USESSP))
2229       && (f2 & (SETSSP | USESSP)))
2230     return TRUE;
2231 
2232   if ((f1 & SETS1) != 0
2233       && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2234     return TRUE;
2235   if ((f1 & SETS2) != 0
2236       && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2237     return TRUE;
2238   if ((f1 & SETSR0) != 0
2239       && sh_insn_uses_or_sets_reg (i2, op2, 0))
2240     return TRUE;
2241   if ((f1 & SETSAS)
2242       && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2243     return TRUE;
2244   if ((f1 & SETSF1) != 0
2245       && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2246     return TRUE;
2247 
2248   if ((f2 & SETS1) != 0
2249       && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2250     return TRUE;
2251   if ((f2 & SETS2) != 0
2252       && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2253     return TRUE;
2254   if ((f2 & SETSR0) != 0
2255       && sh_insn_uses_or_sets_reg (i1, op1, 0))
2256     return TRUE;
2257   if ((f2 & SETSAS)
2258       && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2259     return TRUE;
2260   if ((f2 & SETSF1) != 0
2261       && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2262     return TRUE;
2263 
2264   /* The instructions do not conflict.  */
2265   return FALSE;
2266 }
2267 
2268 /* I1 is a load instruction, and I2 is some other instruction.  Return
2269    TRUE if I1 loads a register which I2 uses.  */
2270 
2271 static bfd_boolean
2272 sh_load_use (unsigned int i1,
2273 	     const struct sh_opcode *op1,
2274 	     unsigned int i2,
2275 	     const struct sh_opcode *op2)
2276 {
2277   unsigned int f1;
2278 
2279   f1 = op1->flags;
2280 
2281   if ((f1 & LOAD) == 0)
2282     return FALSE;
2283 
2284   /* If both SETS1 and SETSSP are set, that means a load to a special
2285      register using postincrement addressing mode, which we don't care
2286      about here.  */
2287   if ((f1 & SETS1) != 0
2288       && (f1 & SETSSP) == 0
2289       && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2290     return TRUE;
2291 
2292   if ((f1 & SETSR0) != 0
2293       && sh_insn_uses_reg (i2, op2, 0))
2294     return TRUE;
2295 
2296   if ((f1 & SETSF1) != 0
2297       && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2298     return TRUE;
2299 
2300   return FALSE;
2301 }
2302 
2303 /* Try to align loads and stores within a span of memory.  This is
2304    called by both the ELF and the COFF sh targets.  ABFD and SEC are
2305    the BFD and section we are examining.  CONTENTS is the contents of
2306    the section.  SWAP is the routine to call to swap two instructions.
2307    RELOCS is a pointer to the internal relocation information, to be
2308    passed to SWAP.  PLABEL is a pointer to the current label in a
2309    sorted list of labels; LABEL_END is the end of the list.  START and
2310    STOP are the range of memory to examine.  If a swap is made,
2311    *PSWAPPED is set to TRUE.  */
2312 
2313 #ifdef COFF_WITH_PE
2314 static
2315 #endif
2316 bfd_boolean
2317 _bfd_sh_align_load_span (bfd *abfd,
2318 			 asection *sec,
2319 			 bfd_byte *contents,
2320 			 bfd_boolean (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
2321 			 void * relocs,
2322 			 bfd_vma **plabel,
2323 			 bfd_vma *label_end,
2324 			 bfd_vma start,
2325 			 bfd_vma stop,
2326 			 bfd_boolean *pswapped)
2327 {
2328   int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2329 	     || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2330   bfd_vma i;
2331 
2332   /* The SH4 has a Harvard architecture, hence aligning loads is not
2333      desirable.  In fact, it is counter-productive, since it interferes
2334      with the schedules generated by the compiler.  */
2335   if (abfd->arch_info->mach == bfd_mach_sh4)
2336     return TRUE;
2337 
2338   /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2339      instructions.  */
2340   if (dsp)
2341     {
2342       sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2343       sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef [0];
2344     }
2345 
2346   /* Instructions should be aligned on 2 byte boundaries.  */
2347   if ((start & 1) == 1)
2348     ++start;
2349 
2350   /* Now look through the unaligned addresses.  */
2351   i = start;
2352   if ((i & 2) == 0)
2353     i += 2;
2354   for (; i < stop; i += 4)
2355     {
2356       unsigned int insn;
2357       const struct sh_opcode *op;
2358       unsigned int prev_insn = 0;
2359       const struct sh_opcode *prev_op = NULL;
2360 
2361       insn = bfd_get_16 (abfd, contents + i);
2362       op = sh_insn_info (insn);
2363       if (op == NULL
2364 	  || (op->flags & (LOAD | STORE)) == 0)
2365 	continue;
2366 
2367       /* This is a load or store which is not on a four byte boundary.  */
2368 
2369       while (*plabel < label_end && **plabel < i)
2370 	++*plabel;
2371 
2372       if (i > start)
2373 	{
2374 	  prev_insn = bfd_get_16 (abfd, contents + i - 2);
2375 	  /* If INSN is the field b of a parallel processing insn, it is not
2376 	     a load / store after all.  Note that the test here might mistake
2377 	     the field_b of a pcopy insn for the starting code of a parallel
2378 	     processing insn; this might miss a swapping opportunity, but at
2379 	     least we're on the safe side.  */
2380 	  if (dsp && (prev_insn & 0xfc00) == 0xf800)
2381 	    continue;
2382 
2383 	  /* Check if prev_insn is actually the field b of a parallel
2384 	     processing insn.  Again, this can give a spurious match
2385 	     after a pcopy.  */
2386 	  if (dsp && i - 2 > start)
2387 	    {
2388 	      unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2389 
2390 	      if ((pprev_insn & 0xfc00) == 0xf800)
2391 		prev_op = NULL;
2392 	      else
2393 		prev_op = sh_insn_info (prev_insn);
2394 	    }
2395 	  else
2396 	    prev_op = sh_insn_info (prev_insn);
2397 
2398 	  /* If the load/store instruction is in a delay slot, we
2399 	     can't swap.  */
2400 	  if (prev_op == NULL
2401 	      || (prev_op->flags & DELAY) != 0)
2402 	    continue;
2403 	}
2404       if (i > start
2405 	  && (*plabel >= label_end || **plabel != i)
2406 	  && prev_op != NULL
2407 	  && (prev_op->flags & (LOAD | STORE)) == 0
2408 	  && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2409 	{
2410 	  bfd_boolean ok;
2411 
2412 	  /* The load/store instruction does not have a label, and
2413 	     there is a previous instruction; PREV_INSN is not
2414 	     itself a load/store instruction, and PREV_INSN and
2415 	     INSN do not conflict.  */
2416 
2417 	  ok = TRUE;
2418 
2419 	  if (i >= start + 4)
2420 	    {
2421 	      unsigned int prev2_insn;
2422 	      const struct sh_opcode *prev2_op;
2423 
2424 	      prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2425 	      prev2_op = sh_insn_info (prev2_insn);
2426 
2427 	      /* If the instruction before PREV_INSN has a delay
2428 		 slot--that is, PREV_INSN is in a delay slot--we
2429 		 can not swap.  */
2430 	      if (prev2_op == NULL
2431 		  || (prev2_op->flags & DELAY) != 0)
2432 		ok = FALSE;
2433 
2434 	      /* If the instruction before PREV_INSN is a load,
2435 		 and it sets a register which INSN uses, then
2436 		 putting INSN immediately after PREV_INSN will
2437 		 cause a pipeline bubble, so there is no point to
2438 		 making the swap.  */
2439 	      if (ok
2440 		  && (prev2_op->flags & LOAD) != 0
2441 		  && sh_load_use (prev2_insn, prev2_op, insn, op))
2442 		ok = FALSE;
2443 	    }
2444 
2445 	  if (ok)
2446 	    {
2447 	      if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2448 		return FALSE;
2449 	      *pswapped = TRUE;
2450 	      continue;
2451 	    }
2452 	}
2453 
2454       while (*plabel < label_end && **plabel < i + 2)
2455 	++*plabel;
2456 
2457       if (i + 2 < stop
2458 	  && (*plabel >= label_end || **plabel != i + 2))
2459 	{
2460 	  unsigned int next_insn;
2461 	  const struct sh_opcode *next_op;
2462 
2463 	  /* There is an instruction after the load/store
2464 	     instruction, and it does not have a label.  */
2465 	  next_insn = bfd_get_16 (abfd, contents + i + 2);
2466 	  next_op = sh_insn_info (next_insn);
2467 	  if (next_op != NULL
2468 	      && (next_op->flags & (LOAD | STORE)) == 0
2469 	      && ! sh_insns_conflict (insn, op, next_insn, next_op))
2470 	    {
2471 	      bfd_boolean ok;
2472 
2473 	      /* NEXT_INSN is not itself a load/store instruction,
2474 		 and it does not conflict with INSN.  */
2475 
2476 	      ok = TRUE;
2477 
2478 	      /* If PREV_INSN is a load, and it sets a register
2479 		 which NEXT_INSN uses, then putting NEXT_INSN
2480 		 immediately after PREV_INSN will cause a pipeline
2481 		 bubble, so there is no reason to make this swap.  */
2482 	      if (prev_op != NULL
2483 		  && (prev_op->flags & LOAD) != 0
2484 		  && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2485 		ok = FALSE;
2486 
2487 	      /* If INSN is a load, and it sets a register which
2488 		 the insn after NEXT_INSN uses, then doing the
2489 		 swap will cause a pipeline bubble, so there is no
2490 		 reason to make the swap.  However, if the insn
2491 		 after NEXT_INSN is itself a load or store
2492 		 instruction, then it is misaligned, so
2493 		 optimistically hope that it will be swapped
2494 		 itself, and just live with the pipeline bubble if
2495 		 it isn't.  */
2496 	      if (ok
2497 		  && i + 4 < stop
2498 		  && (op->flags & LOAD) != 0)
2499 		{
2500 		  unsigned int next2_insn;
2501 		  const struct sh_opcode *next2_op;
2502 
2503 		  next2_insn = bfd_get_16 (abfd, contents + i + 4);
2504 		  next2_op = sh_insn_info (next2_insn);
2505 		  if (next2_op == NULL
2506 		      || ((next2_op->flags & (LOAD | STORE)) == 0
2507 			  && sh_load_use (insn, op, next2_insn, next2_op)))
2508 		    ok = FALSE;
2509 		}
2510 
2511 	      if (ok)
2512 		{
2513 		  if (! (*swap) (abfd, sec, relocs, contents, i))
2514 		    return FALSE;
2515 		  *pswapped = TRUE;
2516 		  continue;
2517 		}
2518 	    }
2519 	}
2520     }
2521 
2522   return TRUE;
2523 }
2524 #endif /* not COFF_IMAGE_WITH_PE */
2525 
2526 /* Swap two SH instructions.  */
2527 
2528 static bfd_boolean
2529 sh_swap_insns (bfd *      abfd,
2530 	       asection * sec,
2531 	       void *     relocs,
2532 	       bfd_byte * contents,
2533 	       bfd_vma    addr)
2534 {
2535   struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2536   unsigned short i1, i2;
2537   struct internal_reloc *irel, *irelend;
2538 
2539   /* Swap the instructions themselves.  */
2540   i1 = bfd_get_16 (abfd, contents + addr);
2541   i2 = bfd_get_16 (abfd, contents + addr + 2);
2542   bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2543   bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2544 
2545   /* Adjust all reloc addresses.  */
2546   irelend = internal_relocs + sec->reloc_count;
2547   for (irel = internal_relocs; irel < irelend; irel++)
2548     {
2549       int type, add;
2550 
2551       /* There are a few special types of relocs that we don't want to
2552 	 adjust.  These relocs do not apply to the instruction itself,
2553 	 but are only associated with the address.  */
2554       type = irel->r_type;
2555       if (type == R_SH_ALIGN
2556 	  || type == R_SH_CODE
2557 	  || type == R_SH_DATA
2558 	  || type == R_SH_LABEL)
2559 	continue;
2560 
2561       /* If an R_SH_USES reloc points to one of the addresses being
2562 	 swapped, we must adjust it.  It would be incorrect to do this
2563 	 for a jump, though, since we want to execute both
2564 	 instructions after the jump.  (We have avoided swapping
2565 	 around a label, so the jump will not wind up executing an
2566 	 instruction it shouldn't).  */
2567       if (type == R_SH_USES)
2568 	{
2569 	  bfd_vma off;
2570 
2571 	  off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2572 	  if (off == addr)
2573 	    irel->r_offset += 2;
2574 	  else if (off == addr + 2)
2575 	    irel->r_offset -= 2;
2576 	}
2577 
2578       if (irel->r_vaddr - sec->vma == addr)
2579 	{
2580 	  irel->r_vaddr += 2;
2581 	  add = -2;
2582 	}
2583       else if (irel->r_vaddr - sec->vma == addr + 2)
2584 	{
2585 	  irel->r_vaddr -= 2;
2586 	  add = 2;
2587 	}
2588       else
2589 	add = 0;
2590 
2591       if (add != 0)
2592 	{
2593 	  bfd_byte *loc;
2594 	  unsigned short insn, oinsn;
2595 	  bfd_boolean overflow;
2596 
2597 	  loc = contents + irel->r_vaddr - sec->vma;
2598 	  overflow = FALSE;
2599 	  switch (type)
2600 	    {
2601 	    default:
2602 	      break;
2603 
2604 	    case R_SH_PCDISP8BY2:
2605 	    case R_SH_PCRELIMM8BY2:
2606 	      insn = bfd_get_16 (abfd, loc);
2607 	      oinsn = insn;
2608 	      insn += add / 2;
2609 	      if ((oinsn & 0xff00) != (insn & 0xff00))
2610 		overflow = TRUE;
2611 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2612 	      break;
2613 
2614 	    case R_SH_PCDISP:
2615 	      insn = bfd_get_16 (abfd, loc);
2616 	      oinsn = insn;
2617 	      insn += add / 2;
2618 	      if ((oinsn & 0xf000) != (insn & 0xf000))
2619 		overflow = TRUE;
2620 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2621 	      break;
2622 
2623 	    case R_SH_PCRELIMM8BY4:
2624 	      /* This reloc ignores the least significant 3 bits of
2625 		 the program counter before adding in the offset.
2626 		 This means that if ADDR is at an even address, the
2627 		 swap will not affect the offset.  If ADDR is an at an
2628 		 odd address, then the instruction will be crossing a
2629 		 four byte boundary, and must be adjusted.  */
2630 	      if ((addr & 3) != 0)
2631 		{
2632 		  insn = bfd_get_16 (abfd, loc);
2633 		  oinsn = insn;
2634 		  insn += add / 2;
2635 		  if ((oinsn & 0xff00) != (insn & 0xff00))
2636 		    overflow = TRUE;
2637 		  bfd_put_16 (abfd, (bfd_vma) insn, loc);
2638 		}
2639 
2640 	      break;
2641 	    }
2642 
2643 	  if (overflow)
2644 	    {
2645 	      _bfd_error_handler
2646 		/* xgettext: c-format */
2647 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
2648 		 abfd, (uint64_t) irel->r_vaddr);
2649 	      bfd_set_error (bfd_error_bad_value);
2650 	      return FALSE;
2651 	    }
2652 	}
2653     }
2654 
2655   return TRUE;
2656 }
2657 
2658 /* Look for loads and stores which we can align to four byte
2659    boundaries.  See the longer comment above sh_relax_section for why
2660    this is desirable.  This sets *PSWAPPED if some instruction was
2661    swapped.  */
2662 
2663 static bfd_boolean
2664 sh_align_loads (bfd *abfd,
2665 		asection *sec,
2666 		struct internal_reloc *internal_relocs,
2667 		bfd_byte *contents,
2668 		bfd_boolean *pswapped)
2669 {
2670   struct internal_reloc *irel, *irelend;
2671   bfd_vma *labels = NULL;
2672   bfd_vma *label, *label_end;
2673   bfd_size_type amt;
2674 
2675   *pswapped = FALSE;
2676 
2677   irelend = internal_relocs + sec->reloc_count;
2678 
2679   /* Get all the addresses with labels on them.  */
2680   amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2681   labels = (bfd_vma *) bfd_malloc (amt);
2682   if (labels == NULL)
2683     goto error_return;
2684   label_end = labels;
2685   for (irel = internal_relocs; irel < irelend; irel++)
2686     {
2687       if (irel->r_type == R_SH_LABEL)
2688 	{
2689 	  *label_end = irel->r_vaddr - sec->vma;
2690 	  ++label_end;
2691 	}
2692     }
2693 
2694   /* Note that the assembler currently always outputs relocs in
2695      address order.  If that ever changes, this code will need to sort
2696      the label values and the relocs.  */
2697 
2698   label = labels;
2699 
2700   for (irel = internal_relocs; irel < irelend; irel++)
2701     {
2702       bfd_vma start, stop;
2703 
2704       if (irel->r_type != R_SH_CODE)
2705 	continue;
2706 
2707       start = irel->r_vaddr - sec->vma;
2708 
2709       for (irel++; irel < irelend; irel++)
2710 	if (irel->r_type == R_SH_DATA)
2711 	  break;
2712       if (irel < irelend)
2713 	stop = irel->r_vaddr - sec->vma;
2714       else
2715 	stop = sec->size;
2716 
2717       if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2718 				     internal_relocs, &label,
2719 				     label_end, start, stop, pswapped))
2720 	goto error_return;
2721     }
2722 
2723   free (labels);
2724 
2725   return TRUE;
2726 
2727  error_return:
2728   if (labels != NULL)
2729     free (labels);
2730   return FALSE;
2731 }
2732 
2733 /* This is a modification of _bfd_coff_generic_relocate_section, which
2734    will handle SH relaxing.  */
2735 
2736 static bfd_boolean
2737 sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
2738 		     struct bfd_link_info *info,
2739 		     bfd *input_bfd,
2740 		     asection *input_section,
2741 		     bfd_byte *contents,
2742 		     struct internal_reloc *relocs,
2743 		     struct internal_syment *syms,
2744 		     asection **sections)
2745 {
2746   struct internal_reloc *rel;
2747   struct internal_reloc *relend;
2748 
2749   rel = relocs;
2750   relend = rel + input_section->reloc_count;
2751   for (; rel < relend; rel++)
2752     {
2753       long symndx;
2754       struct coff_link_hash_entry *h;
2755       struct internal_syment *sym;
2756       bfd_vma addend;
2757       bfd_vma val;
2758       reloc_howto_type *howto;
2759       bfd_reloc_status_type rstat;
2760 
2761       /* Almost all relocs have to do with relaxing.  If any work must
2762 	 be done for them, it has been done in sh_relax_section.  */
2763       if (rel->r_type != R_SH_IMM32
2764 #ifdef COFF_WITH_PE
2765 	  && rel->r_type != R_SH_IMM32CE
2766 	  && rel->r_type != R_SH_IMAGEBASE
2767 #endif
2768 	  && rel->r_type != R_SH_PCDISP)
2769 	continue;
2770 
2771       symndx = rel->r_symndx;
2772 
2773       if (symndx == -1)
2774 	{
2775 	  h = NULL;
2776 	  sym = NULL;
2777 	}
2778       else
2779 	{
2780 	  if (symndx < 0
2781 	      || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2782 	    {
2783 	      _bfd_error_handler
2784 		/* xgettext: c-format */
2785 		(_("%pB: illegal symbol index %ld in relocs"),
2786 		 input_bfd, symndx);
2787 	      bfd_set_error (bfd_error_bad_value);
2788 	      return FALSE;
2789 	    }
2790 	  h = obj_coff_sym_hashes (input_bfd)[symndx];
2791 	  sym = syms + symndx;
2792 	}
2793 
2794       if (sym != NULL && sym->n_scnum != 0)
2795 	addend = - sym->n_value;
2796       else
2797 	addend = 0;
2798 
2799       if (rel->r_type == R_SH_PCDISP)
2800 	addend -= 4;
2801 
2802       if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2803 	howto = NULL;
2804       else
2805 	howto = &sh_coff_howtos[rel->r_type];
2806 
2807       if (howto == NULL)
2808 	{
2809 	  bfd_set_error (bfd_error_bad_value);
2810 	  return FALSE;
2811 	}
2812 
2813 #ifdef COFF_WITH_PE
2814       if (rel->r_type == R_SH_IMAGEBASE)
2815 	addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2816 #endif
2817 
2818       val = 0;
2819 
2820       if (h == NULL)
2821 	{
2822 	  asection *sec;
2823 
2824 	  /* There is nothing to do for an internal PCDISP reloc.  */
2825 	  if (rel->r_type == R_SH_PCDISP)
2826 	    continue;
2827 
2828 	  if (symndx == -1)
2829 	    {
2830 	      sec = bfd_abs_section_ptr;
2831 	      val = 0;
2832 	    }
2833 	  else
2834 	    {
2835 	      sec = sections[symndx];
2836 	      val = (sec->output_section->vma
2837 		     + sec->output_offset
2838 		     + sym->n_value
2839 		     - sec->vma);
2840 	    }
2841 	}
2842       else
2843 	{
2844 	  if (h->root.type == bfd_link_hash_defined
2845 	      || h->root.type == bfd_link_hash_defweak)
2846 	    {
2847 	      asection *sec;
2848 
2849 	      sec = h->root.u.def.section;
2850 	      val = (h->root.u.def.value
2851 		     + sec->output_section->vma
2852 		     + sec->output_offset);
2853 	    }
2854 	  else if (! bfd_link_relocatable (info))
2855 	    (*info->callbacks->undefined_symbol)
2856 	      (info, h->root.root.string, input_bfd, input_section,
2857 	       rel->r_vaddr - input_section->vma, TRUE);
2858 	}
2859 
2860       rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2861 					contents,
2862 					rel->r_vaddr - input_section->vma,
2863 					val, addend);
2864 
2865       switch (rstat)
2866 	{
2867 	default:
2868 	  abort ();
2869 	case bfd_reloc_ok:
2870 	  break;
2871 	case bfd_reloc_overflow:
2872 	  {
2873 	    const char *name;
2874 	    char buf[SYMNMLEN + 1];
2875 
2876 	    if (symndx == -1)
2877 	      name = "*ABS*";
2878 	    else if (h != NULL)
2879 	      name = NULL;
2880 	    else if (sym->_n._n_n._n_zeroes == 0
2881 		     && sym->_n._n_n._n_offset != 0)
2882 	      name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2883 	    else
2884 	      {
2885 		strncpy (buf, sym->_n._n_name, SYMNMLEN);
2886 		buf[SYMNMLEN] = '\0';
2887 		name = buf;
2888 	      }
2889 
2890 	    (*info->callbacks->reloc_overflow)
2891 	      (info, (h ? &h->root : NULL), name, howto->name,
2892 	       (bfd_vma) 0, input_bfd, input_section,
2893 	       rel->r_vaddr - input_section->vma);
2894 	  }
2895 	}
2896     }
2897 
2898   return TRUE;
2899 }
2900 
2901 /* This is a version of bfd_generic_get_relocated_section_contents
2902    which uses sh_relocate_section.  */
2903 
2904 static bfd_byte *
2905 sh_coff_get_relocated_section_contents (bfd *output_bfd,
2906 					struct bfd_link_info *link_info,
2907 					struct bfd_link_order *link_order,
2908 					bfd_byte *data,
2909 					bfd_boolean relocatable,
2910 					asymbol **symbols)
2911 {
2912   asection *input_section = link_order->u.indirect.section;
2913   bfd *input_bfd = input_section->owner;
2914   asection **sections = NULL;
2915   struct internal_reloc *internal_relocs = NULL;
2916   struct internal_syment *internal_syms = NULL;
2917 
2918   /* We only need to handle the case of relaxing, or of having a
2919      particular set of section contents, specially.  */
2920   if (relocatable
2921       || coff_section_data (input_bfd, input_section) == NULL
2922       || coff_section_data (input_bfd, input_section)->contents == NULL)
2923     return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2924 						       link_order, data,
2925 						       relocatable,
2926 						       symbols);
2927 
2928   memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2929 	  (size_t) input_section->size);
2930 
2931   if ((input_section->flags & SEC_RELOC) != 0
2932       && input_section->reloc_count > 0)
2933     {
2934       bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2935       bfd_byte *esym, *esymend;
2936       struct internal_syment *isymp;
2937       asection **secpp;
2938       bfd_size_type amt;
2939 
2940       if (! _bfd_coff_get_external_symbols (input_bfd))
2941 	goto error_return;
2942 
2943       internal_relocs = (_bfd_coff_read_internal_relocs
2944 			 (input_bfd, input_section, FALSE, (bfd_byte *) NULL,
2945 			  FALSE, (struct internal_reloc *) NULL));
2946       if (internal_relocs == NULL)
2947 	goto error_return;
2948 
2949       amt = obj_raw_syment_count (input_bfd);
2950       amt *= sizeof (struct internal_syment);
2951       internal_syms = (struct internal_syment *) bfd_malloc (amt);
2952       if (internal_syms == NULL)
2953 	goto error_return;
2954 
2955       amt = obj_raw_syment_count (input_bfd);
2956       amt *= sizeof (asection *);
2957       sections = (asection **) bfd_malloc (amt);
2958       if (sections == NULL)
2959 	goto error_return;
2960 
2961       isymp = internal_syms;
2962       secpp = sections;
2963       esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2964       esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2965       while (esym < esymend)
2966 	{
2967 	  bfd_coff_swap_sym_in (input_bfd, esym, isymp);
2968 
2969 	  if (isymp->n_scnum != 0)
2970 	    *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2971 	  else
2972 	    {
2973 	      if (isymp->n_value == 0)
2974 		*secpp = bfd_und_section_ptr;
2975 	      else
2976 		*secpp = bfd_com_section_ptr;
2977 	    }
2978 
2979 	  esym += (isymp->n_numaux + 1) * symesz;
2980 	  secpp += isymp->n_numaux + 1;
2981 	  isymp += isymp->n_numaux + 1;
2982 	}
2983 
2984       if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2985 				 input_section, data, internal_relocs,
2986 				 internal_syms, sections))
2987 	goto error_return;
2988 
2989       free (sections);
2990       sections = NULL;
2991       free (internal_syms);
2992       internal_syms = NULL;
2993       free (internal_relocs);
2994       internal_relocs = NULL;
2995     }
2996 
2997   return data;
2998 
2999  error_return:
3000   if (internal_relocs != NULL)
3001     free (internal_relocs);
3002   if (internal_syms != NULL)
3003     free (internal_syms);
3004   if (sections != NULL)
3005     free (sections);
3006   return NULL;
3007 }
3008 
3009 /* The target vectors.  */
3010 
3011 #ifndef TARGET_SHL_SYM
3012 CREATE_BIG_COFF_TARGET_VEC (sh_coff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3013 #endif
3014 
3015 #ifdef TARGET_SHL_SYM
3016 #define TARGET_SYM TARGET_SHL_SYM
3017 #else
3018 #define TARGET_SYM sh_coff_le_vec
3019 #endif
3020 
3021 #ifndef TARGET_SHL_NAME
3022 #define TARGET_SHL_NAME "coff-shl"
3023 #endif
3024 
3025 #ifdef COFF_WITH_PE
3026 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3027 			       SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3028 #else
3029 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3030 			       0, '_', NULL, COFF_SWAP_TABLE)
3031 #endif
3032 
3033 #ifndef TARGET_SHL_SYM
3034 
3035 /* Some people want versions of the SH COFF target which do not align
3036    to 16 byte boundaries.  We implement that by adding a couple of new
3037    target vectors.  These are just like the ones above, but they
3038    change the default section alignment.  To generate them in the
3039    assembler, use -small.  To use them in the linker, use -b
3040    coff-sh{l}-small and -oformat coff-sh{l}-small.
3041 
3042    Yes, this is a horrible hack.  A general solution for setting
3043    section alignment in COFF is rather complex.  ELF handles this
3044    correctly.  */
3045 
3046 /* Only recognize the small versions if the target was not defaulted.
3047    Otherwise we won't recognize the non default endianness.  */
3048 
3049 static const bfd_target *
3050 coff_small_object_p (bfd *abfd)
3051 {
3052   if (abfd->target_defaulted)
3053     {
3054       bfd_set_error (bfd_error_wrong_format);
3055       return NULL;
3056     }
3057   return coff_object_p (abfd);
3058 }
3059 
3060 /* Set the section alignment for the small versions.  */
3061 
3062 static bfd_boolean
3063 coff_small_new_section_hook (bfd *abfd, asection *section)
3064 {
3065   if (! coff_new_section_hook (abfd, section))
3066     return FALSE;
3067 
3068   /* We must align to at least a four byte boundary, because longword
3069      accesses must be on a four byte boundary.  */
3070   if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3071     section->alignment_power = 2;
3072 
3073   return TRUE;
3074 }
3075 
3076 /* This is copied from bfd_coff_std_swap_table so that we can change
3077    the default section alignment power.  */
3078 
3079 static bfd_coff_backend_data bfd_coff_small_swap_table =
3080 {
3081   coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3082   coff_swap_aux_out, coff_swap_sym_out,
3083   coff_swap_lineno_out, coff_swap_reloc_out,
3084   coff_swap_filehdr_out, coff_swap_aouthdr_out,
3085   coff_swap_scnhdr_out,
3086   FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3087 #ifdef COFF_LONG_FILENAMES
3088   TRUE,
3089 #else
3090   FALSE,
3091 #endif
3092   COFF_DEFAULT_LONG_SECTION_NAMES,
3093   2,
3094 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3095   TRUE,
3096 #else
3097   FALSE,
3098 #endif
3099 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3100   4,
3101 #else
3102   2,
3103 #endif
3104   32768,
3105   coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3106   coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3107   coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3108   coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3109   coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3110   coff_classify_symbol, coff_compute_section_file_positions,
3111   coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3112   coff_adjust_symndx, coff_link_add_one_symbol,
3113   coff_link_output_has_begun, coff_final_link_postscript,
3114   bfd_pe_print_pdata
3115 };
3116 
3117 #define coff_small_close_and_cleanup \
3118   coff_close_and_cleanup
3119 #define coff_small_bfd_free_cached_info \
3120   coff_bfd_free_cached_info
3121 #define coff_small_get_section_contents \
3122   coff_get_section_contents
3123 #define coff_small_get_section_contents_in_window \
3124   coff_get_section_contents_in_window
3125 
3126 extern const bfd_target sh_coff_small_le_vec;
3127 
3128 const bfd_target sh_coff_small_vec =
3129 {
3130   "coff-sh-small",		/* name */
3131   bfd_target_coff_flavour,
3132   BFD_ENDIAN_BIG,		/* data byte order is big */
3133   BFD_ENDIAN_BIG,		/* header byte order is big */
3134 
3135   (HAS_RELOC | EXEC_P		/* object flags */
3136    | HAS_LINENO | HAS_DEBUG
3137    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3138 
3139   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3140   '_',				/* leading symbol underscore */
3141   '/',				/* ar_pad_char */
3142   15,				/* ar_max_namelen */
3143   0,				/* match priority.  */
3144   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3145   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3146   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3147   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3148   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3149   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3150 
3151   {				/* bfd_check_format */
3152     _bfd_dummy_target,
3153     coff_small_object_p,
3154     bfd_generic_archive_p,
3155     _bfd_dummy_target
3156   },
3157   {				/* bfd_set_format */
3158     _bfd_bool_bfd_false_error,
3159     coff_mkobject,
3160     _bfd_generic_mkarchive,
3161     _bfd_bool_bfd_false_error
3162   },
3163   {				/* bfd_write_contents */
3164     _bfd_bool_bfd_false_error,
3165     coff_write_object_contents,
3166     _bfd_write_archive_contents,
3167     _bfd_bool_bfd_false_error
3168   },
3169 
3170   BFD_JUMP_TABLE_GENERIC (coff_small),
3171   BFD_JUMP_TABLE_COPY (coff),
3172   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3173   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3174   BFD_JUMP_TABLE_SYMBOLS (coff),
3175   BFD_JUMP_TABLE_RELOCS (coff),
3176   BFD_JUMP_TABLE_WRITE (coff),
3177   BFD_JUMP_TABLE_LINK (coff),
3178   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3179 
3180   &sh_coff_small_le_vec,
3181 
3182   &bfd_coff_small_swap_table
3183 };
3184 
3185 const bfd_target sh_coff_small_le_vec =
3186 {
3187   "coff-shl-small",		/* name */
3188   bfd_target_coff_flavour,
3189   BFD_ENDIAN_LITTLE,		/* data byte order is little */
3190   BFD_ENDIAN_LITTLE,		/* header byte order is little endian too*/
3191 
3192   (HAS_RELOC | EXEC_P		/* object flags */
3193    | HAS_LINENO | HAS_DEBUG
3194    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3195 
3196   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3197   '_',				/* leading symbol underscore */
3198   '/',				/* ar_pad_char */
3199   15,				/* ar_max_namelen */
3200   0,				/* match priority.  */
3201   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3202   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3203   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3204   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3205   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3206   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3207 
3208   {				/* bfd_check_format */
3209     _bfd_dummy_target,
3210     coff_small_object_p,
3211     bfd_generic_archive_p,
3212     _bfd_dummy_target
3213   },
3214   {				/* bfd_set_format */
3215     _bfd_bool_bfd_false_error,
3216     coff_mkobject,
3217     _bfd_generic_mkarchive,
3218     _bfd_bool_bfd_false_error
3219   },
3220   {				/* bfd_write_contents */
3221     _bfd_bool_bfd_false_error,
3222     coff_write_object_contents,
3223     _bfd_write_archive_contents,
3224     _bfd_bool_bfd_false_error
3225   },
3226 
3227   BFD_JUMP_TABLE_GENERIC (coff_small),
3228   BFD_JUMP_TABLE_COPY (coff),
3229   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3230   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3231   BFD_JUMP_TABLE_SYMBOLS (coff),
3232   BFD_JUMP_TABLE_RELOCS (coff),
3233   BFD_JUMP_TABLE_WRITE (coff),
3234   BFD_JUMP_TABLE_LINK (coff),
3235   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3236 
3237   &sh_coff_small_vec,
3238 
3239   &bfd_coff_small_swap_table
3240 };
3241 #endif
3242