xref: /netbsd-src/external/gpl3/gdb.old/dist/bfd/coff-sh.c (revision 8b657b0747480f8989760d71343d6dd33f8d4cf9)
1 /* BFD back-end for Renesas Super-H COFF binaries.
2    Copyright (C) 1993-2022 Free Software Foundation, Inc.
3    Contributed by Cygnus Support.
4    Written by Steve Chamberlain, <sac@cygnus.com>.
5    Relaxing code written by Ian Lance Taylor, <ian@cygnus.com>.
6 
7    This file is part of BFD, the Binary File Descriptor library.
8 
9    This program is free software; you can redistribute it and/or modify
10    it under the terms of the GNU General Public License as published by
11    the Free Software Foundation; either version 3 of the License, or
12    (at your option) any later version.
13 
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18 
19    You should have received a copy of the GNU General Public License
20    along with this program; if not, write to the Free Software
21    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
22    MA 02110-1301, USA.  */
23 
24 #include "sysdep.h"
25 #include "bfd.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "bfdlink.h"
29 #include "coff/sh.h"
30 #include "coff/internal.h"
31 
32 #undef  bfd_pe_print_pdata
33 
34 #ifdef COFF_WITH_PE
35 #include "coff/pe.h"
36 
37 #ifndef COFF_IMAGE_WITH_PE
38 static bool sh_align_load_span
39   (bfd *, asection *, bfd_byte *,
40    bool (*) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
41    void *, bfd_vma **, bfd_vma *, bfd_vma, bfd_vma, bool *);
42 
43 #define _bfd_sh_align_load_span sh_align_load_span
44 #endif
45 
46 #define	bfd_pe_print_pdata   _bfd_pe_print_ce_compressed_pdata
47 
48 #else
49 
50 #define	bfd_pe_print_pdata   NULL
51 
52 #endif /* COFF_WITH_PE.  */
53 
54 #include "libcoff.h"
55 
56 /* Internal functions.  */
57 
58 #ifdef COFF_WITH_PE
59 /* Can't build import tables with 2**4 alignment.  */
60 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	2
61 #else
62 /* Default section alignment to 2**4.  */
63 #define COFF_DEFAULT_SECTION_ALIGNMENT_POWER	4
64 #endif
65 
66 #ifdef COFF_IMAGE_WITH_PE
67 /* Align PE executables.  */
68 #define COFF_PAGE_SIZE 0x1000
69 #endif
70 
71 /* Generate long file names.  */
72 #define COFF_LONG_FILENAMES
73 
74 #ifdef COFF_WITH_PE
75 /* Return TRUE if this relocation should
76    appear in the output .reloc section.  */
77 
78 static bool
79 in_reloc_p (bfd * abfd ATTRIBUTE_UNUSED,
80 	    reloc_howto_type * howto)
81 {
82   return ! howto->pc_relative && howto->type != R_SH_IMAGEBASE;
83 }
84 #endif
85 
86 static bfd_reloc_status_type
87 sh_reloc (bfd *, arelent *, asymbol *, void *, asection *, bfd *, char **);
88 static bool
89 sh_relocate_section (bfd *, struct bfd_link_info *, bfd *, asection *,
90 		     bfd_byte *, struct internal_reloc *,
91 		     struct internal_syment *, asection **);
92 static bool
93 sh_align_loads (bfd *, asection *, struct internal_reloc *,
94 		bfd_byte *, bool *);
95 
96 /* The supported relocations.  There are a lot of relocations defined
97    in coff/internal.h which we do not expect to ever see.  */
98 static reloc_howto_type sh_coff_howtos[] =
99 {
100   EMPTY_HOWTO (0),
101   EMPTY_HOWTO (1),
102 #ifdef COFF_WITH_PE
103   /* Windows CE */
104   HOWTO (R_SH_IMM32CE,		/* type */
105 	 0,			/* rightshift */
106 	 4,			/* size */
107 	 32,			/* bitsize */
108 	 false,			/* pc_relative */
109 	 0,			/* bitpos */
110 	 complain_overflow_bitfield, /* complain_on_overflow */
111 	 sh_reloc,		/* special_function */
112 	 "r_imm32ce",		/* name */
113 	 true,			/* partial_inplace */
114 	 0xffffffff,		/* src_mask */
115 	 0xffffffff,		/* dst_mask */
116 	 false),		/* pcrel_offset */
117 #else
118   EMPTY_HOWTO (2),
119 #endif
120   EMPTY_HOWTO (3), /* R_SH_PCREL8 */
121   EMPTY_HOWTO (4), /* R_SH_PCREL16 */
122   EMPTY_HOWTO (5), /* R_SH_HIGH8 */
123   EMPTY_HOWTO (6), /* R_SH_IMM24 */
124   EMPTY_HOWTO (7), /* R_SH_LOW16 */
125   EMPTY_HOWTO (8),
126   EMPTY_HOWTO (9), /* R_SH_PCDISP8BY4 */
127 
128   HOWTO (R_SH_PCDISP8BY2,	/* type */
129 	 1,			/* rightshift */
130 	 2,			/* size */
131 	 8,			/* bitsize */
132 	 true,			/* pc_relative */
133 	 0,			/* bitpos */
134 	 complain_overflow_signed, /* complain_on_overflow */
135 	 sh_reloc,		/* special_function */
136 	 "r_pcdisp8by2",	/* name */
137 	 true,			/* partial_inplace */
138 	 0xff,			/* src_mask */
139 	 0xff,			/* dst_mask */
140 	 true),			/* pcrel_offset */
141 
142   EMPTY_HOWTO (11), /* R_SH_PCDISP8 */
143 
144   HOWTO (R_SH_PCDISP,		/* type */
145 	 1,			/* rightshift */
146 	 2,			/* size */
147 	 12,			/* bitsize */
148 	 true,			/* pc_relative */
149 	 0,			/* bitpos */
150 	 complain_overflow_signed, /* complain_on_overflow */
151 	 sh_reloc,		/* special_function */
152 	 "r_pcdisp12by2",	/* name */
153 	 true,			/* partial_inplace */
154 	 0xfff,			/* src_mask */
155 	 0xfff,			/* dst_mask */
156 	 true),			/* pcrel_offset */
157 
158   EMPTY_HOWTO (13),
159 
160   HOWTO (R_SH_IMM32,		/* type */
161 	 0,			/* rightshift */
162 	 4,			/* size */
163 	 32,			/* bitsize */
164 	 false,			/* pc_relative */
165 	 0,			/* bitpos */
166 	 complain_overflow_bitfield, /* complain_on_overflow */
167 	 sh_reloc,		/* special_function */
168 	 "r_imm32",		/* name */
169 	 true,			/* partial_inplace */
170 	 0xffffffff,		/* src_mask */
171 	 0xffffffff,		/* dst_mask */
172 	 false),		/* pcrel_offset */
173 
174   EMPTY_HOWTO (15),
175 #ifdef COFF_WITH_PE
176   HOWTO (R_SH_IMAGEBASE,	/* type */
177 	 0,			/* rightshift */
178 	 4,			/* size */
179 	 32,			/* bitsize */
180 	 false,			/* pc_relative */
181 	 0,			/* bitpos */
182 	 complain_overflow_bitfield, /* complain_on_overflow */
183 	 sh_reloc,		/* special_function */
184 	 "rva32",		/* name */
185 	 true,			/* partial_inplace */
186 	 0xffffffff,		/* src_mask */
187 	 0xffffffff,		/* dst_mask */
188 	 false),		/* pcrel_offset */
189 #else
190   EMPTY_HOWTO (16), /* R_SH_IMM8 */
191 #endif
192   EMPTY_HOWTO (17), /* R_SH_IMM8BY2 */
193   EMPTY_HOWTO (18), /* R_SH_IMM8BY4 */
194   EMPTY_HOWTO (19), /* R_SH_IMM4 */
195   EMPTY_HOWTO (20), /* R_SH_IMM4BY2 */
196   EMPTY_HOWTO (21), /* R_SH_IMM4BY4 */
197 
198   HOWTO (R_SH_PCRELIMM8BY2,	/* type */
199 	 1,			/* rightshift */
200 	 2,			/* size */
201 	 8,			/* bitsize */
202 	 true,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_unsigned, /* complain_on_overflow */
205 	 sh_reloc,		/* special_function */
206 	 "r_pcrelimm8by2",	/* name */
207 	 true,			/* partial_inplace */
208 	 0xff,			/* src_mask */
209 	 0xff,			/* dst_mask */
210 	 true),			/* pcrel_offset */
211 
212   HOWTO (R_SH_PCRELIMM8BY4,	/* type */
213 	 2,			/* rightshift */
214 	 2,			/* size */
215 	 8,			/* bitsize */
216 	 true,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_unsigned, /* complain_on_overflow */
219 	 sh_reloc,		/* special_function */
220 	 "r_pcrelimm8by4",	/* name */
221 	 true,			/* partial_inplace */
222 	 0xff,			/* src_mask */
223 	 0xff,			/* dst_mask */
224 	 true),			/* pcrel_offset */
225 
226   HOWTO (R_SH_IMM16,		/* type */
227 	 0,			/* rightshift */
228 	 2,			/* size */
229 	 16,			/* bitsize */
230 	 false,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_bitfield, /* complain_on_overflow */
233 	 sh_reloc,		/* special_function */
234 	 "r_imm16",		/* name */
235 	 true,			/* partial_inplace */
236 	 0xffff,		/* src_mask */
237 	 0xffff,		/* dst_mask */
238 	 false),		/* pcrel_offset */
239 
240   HOWTO (R_SH_SWITCH16,		/* type */
241 	 0,			/* rightshift */
242 	 2,			/* size */
243 	 16,			/* bitsize */
244 	 false,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_bitfield, /* complain_on_overflow */
247 	 sh_reloc,		/* special_function */
248 	 "r_switch16",		/* name */
249 	 true,			/* partial_inplace */
250 	 0xffff,		/* src_mask */
251 	 0xffff,		/* dst_mask */
252 	 false),		/* pcrel_offset */
253 
254   HOWTO (R_SH_SWITCH32,		/* type */
255 	 0,			/* rightshift */
256 	 4,			/* size */
257 	 32,			/* bitsize */
258 	 false,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_bitfield, /* complain_on_overflow */
261 	 sh_reloc,		/* special_function */
262 	 "r_switch32",		/* name */
263 	 true,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 false),		/* pcrel_offset */
267 
268   HOWTO (R_SH_USES,		/* type */
269 	 0,			/* rightshift */
270 	 2,			/* size */
271 	 16,			/* bitsize */
272 	 false,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield, /* complain_on_overflow */
275 	 sh_reloc,		/* special_function */
276 	 "r_uses",		/* name */
277 	 true,			/* partial_inplace */
278 	 0xffff,		/* src_mask */
279 	 0xffff,		/* dst_mask */
280 	 false),		/* pcrel_offset */
281 
282   HOWTO (R_SH_COUNT,		/* type */
283 	 0,			/* rightshift */
284 	 4,			/* size */
285 	 32,			/* bitsize */
286 	 false,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_bitfield, /* complain_on_overflow */
289 	 sh_reloc,		/* special_function */
290 	 "r_count",		/* name */
291 	 true,			/* partial_inplace */
292 	 0xffffffff,		/* src_mask */
293 	 0xffffffff,		/* dst_mask */
294 	 false),		/* pcrel_offset */
295 
296   HOWTO (R_SH_ALIGN,		/* type */
297 	 0,			/* rightshift */
298 	 4,			/* size */
299 	 32,			/* bitsize */
300 	 false,			/* pc_relative */
301 	 0,			/* bitpos */
302 	 complain_overflow_bitfield, /* complain_on_overflow */
303 	 sh_reloc,		/* special_function */
304 	 "r_align",		/* name */
305 	 true,			/* partial_inplace */
306 	 0xffffffff,		/* src_mask */
307 	 0xffffffff,		/* dst_mask */
308 	 false),		/* pcrel_offset */
309 
310   HOWTO (R_SH_CODE,		/* type */
311 	 0,			/* rightshift */
312 	 4,			/* size */
313 	 32,			/* bitsize */
314 	 false,			/* pc_relative */
315 	 0,			/* bitpos */
316 	 complain_overflow_bitfield, /* complain_on_overflow */
317 	 sh_reloc,		/* special_function */
318 	 "r_code",		/* name */
319 	 true,			/* partial_inplace */
320 	 0xffffffff,		/* src_mask */
321 	 0xffffffff,		/* dst_mask */
322 	 false),		/* pcrel_offset */
323 
324   HOWTO (R_SH_DATA,		/* type */
325 	 0,			/* rightshift */
326 	 4,			/* size */
327 	 32,			/* bitsize */
328 	 false,			/* pc_relative */
329 	 0,			/* bitpos */
330 	 complain_overflow_bitfield, /* complain_on_overflow */
331 	 sh_reloc,		/* special_function */
332 	 "r_data",		/* name */
333 	 true,			/* partial_inplace */
334 	 0xffffffff,		/* src_mask */
335 	 0xffffffff,		/* dst_mask */
336 	 false),		/* pcrel_offset */
337 
338   HOWTO (R_SH_LABEL,		/* type */
339 	 0,			/* rightshift */
340 	 4,			/* size */
341 	 32,			/* bitsize */
342 	 false,			/* pc_relative */
343 	 0,			/* bitpos */
344 	 complain_overflow_bitfield, /* complain_on_overflow */
345 	 sh_reloc,		/* special_function */
346 	 "r_label",		/* name */
347 	 true,			/* partial_inplace */
348 	 0xffffffff,		/* src_mask */
349 	 0xffffffff,		/* dst_mask */
350 	 false),		/* pcrel_offset */
351 
352   HOWTO (R_SH_SWITCH8,		/* type */
353 	 0,			/* rightshift */
354 	 1,			/* size */
355 	 8,			/* bitsize */
356 	 false,			/* pc_relative */
357 	 0,			/* bitpos */
358 	 complain_overflow_bitfield, /* complain_on_overflow */
359 	 sh_reloc,		/* special_function */
360 	 "r_switch8",		/* name */
361 	 true,			/* partial_inplace */
362 	 0xff,			/* src_mask */
363 	 0xff,			/* dst_mask */
364 	 false)			/* pcrel_offset */
365 };
366 
367 #define SH_COFF_HOWTO_COUNT (sizeof sh_coff_howtos / sizeof sh_coff_howtos[0])
368 
369 /* Check for a bad magic number.  */
370 #define BADMAG(x) SHBADMAG(x)
371 
372 /* Customize coffcode.h (this is not currently used).  */
373 #define SH 1
374 
375 /* FIXME: This should not be set here.  */
376 #define __A_MAGIC_SET__
377 
378 #ifndef COFF_WITH_PE
379 /* Swap the r_offset field in and out.  */
380 #define SWAP_IN_RELOC_OFFSET  H_GET_32
381 #define SWAP_OUT_RELOC_OFFSET H_PUT_32
382 
383 /* Swap out extra information in the reloc structure.  */
384 #define SWAP_OUT_RELOC_EXTRA(abfd, src, dst)	\
385   do						\
386     {						\
387       dst->r_stuff[0] = 'S';			\
388       dst->r_stuff[1] = 'C';			\
389     }						\
390   while (0)
391 #endif
392 
393 /* Get the value of a symbol, when performing a relocation.  */
394 
395 static long
396 get_symbol_value (asymbol *symbol)
397 {
398   bfd_vma relocation;
399 
400   if (bfd_is_com_section (symbol->section))
401     relocation = 0;
402   else
403     relocation = (symbol->value +
404 		  symbol->section->output_section->vma +
405 		  symbol->section->output_offset);
406 
407   return relocation;
408 }
409 
410 #ifdef COFF_WITH_PE
411 /* Convert an rtype to howto for the COFF backend linker.
412    Copied from coff-i386.  */
413 #define coff_rtype_to_howto coff_sh_rtype_to_howto
414 
415 
416 static reloc_howto_type *
417 coff_sh_rtype_to_howto (bfd * abfd ATTRIBUTE_UNUSED,
418 			asection * sec,
419 			struct internal_reloc * rel,
420 			struct coff_link_hash_entry * h,
421 			struct internal_syment * sym,
422 			bfd_vma * addendp)
423 {
424   reloc_howto_type * howto;
425 
426   howto = sh_coff_howtos + rel->r_type;
427 
428   *addendp = 0;
429 
430   if (howto->pc_relative)
431     *addendp += sec->vma;
432 
433   if (sym != NULL && sym->n_scnum == 0 && sym->n_value != 0)
434     {
435       /* This is a common symbol.  The section contents include the
436 	 size (sym->n_value) as an addend.  The relocate_section
437 	 function will be adding in the final value of the symbol.  We
438 	 need to subtract out the current size in order to get the
439 	 correct result.  */
440       BFD_ASSERT (h != NULL);
441     }
442 
443   if (howto->pc_relative)
444     {
445       *addendp -= 4;
446 
447       /* If the symbol is defined, then the generic code is going to
448 	 add back the symbol value in order to cancel out an
449 	 adjustment it made to the addend.  However, we set the addend
450 	 to 0 at the start of this function.  We need to adjust here,
451 	 to avoid the adjustment the generic code will make.  FIXME:
452 	 This is getting a bit hackish.  */
453       if (sym != NULL && sym->n_scnum != 0)
454 	*addendp -= sym->n_value;
455     }
456 
457   if (rel->r_type == R_SH_IMAGEBASE)
458     *addendp -= pe_data (sec->output_section->owner)->pe_opthdr.ImageBase;
459 
460   return howto;
461 }
462 
463 #endif /* COFF_WITH_PE */
464 
465 /* This structure is used to map BFD reloc codes to SH PE relocs.  */
466 struct shcoff_reloc_map
467 {
468   bfd_reloc_code_real_type bfd_reloc_val;
469   unsigned char shcoff_reloc_val;
470 };
471 
472 #ifdef COFF_WITH_PE
473 /* An array mapping BFD reloc codes to SH PE relocs.  */
474 static const struct shcoff_reloc_map sh_reloc_map[] =
475 {
476   { BFD_RELOC_32, R_SH_IMM32CE },
477   { BFD_RELOC_RVA, R_SH_IMAGEBASE },
478   { BFD_RELOC_CTOR, R_SH_IMM32CE },
479 };
480 #else
481 /* An array mapping BFD reloc codes to SH PE relocs.  */
482 static const struct shcoff_reloc_map sh_reloc_map[] =
483 {
484   { BFD_RELOC_32, R_SH_IMM32 },
485   { BFD_RELOC_CTOR, R_SH_IMM32 },
486 };
487 #endif
488 
489 /* Given a BFD reloc code, return the howto structure for the
490    corresponding SH PE reloc.  */
491 #define coff_bfd_reloc_type_lookup	sh_coff_reloc_type_lookup
492 #define coff_bfd_reloc_name_lookup sh_coff_reloc_name_lookup
493 
494 static reloc_howto_type *
495 sh_coff_reloc_type_lookup (bfd *abfd,
496 			   bfd_reloc_code_real_type code)
497 {
498   unsigned int i;
499 
500   for (i = ARRAY_SIZE (sh_reloc_map); i--;)
501     if (sh_reloc_map[i].bfd_reloc_val == code)
502       return &sh_coff_howtos[(int) sh_reloc_map[i].shcoff_reloc_val];
503 
504   _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
505 		      abfd, (unsigned int) code);
506   return NULL;
507 }
508 
509 static reloc_howto_type *
510 sh_coff_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
511 			   const char *r_name)
512 {
513   unsigned int i;
514 
515   for (i = 0; i < sizeof (sh_coff_howtos) / sizeof (sh_coff_howtos[0]); i++)
516     if (sh_coff_howtos[i].name != NULL
517 	&& strcasecmp (sh_coff_howtos[i].name, r_name) == 0)
518       return &sh_coff_howtos[i];
519 
520   return NULL;
521 }
522 
523 /* This macro is used in coffcode.h to get the howto corresponding to
524    an internal reloc.  */
525 
526 #define RTYPE2HOWTO(relent, internal)		\
527   ((relent)->howto =				\
528    ((internal)->r_type < SH_COFF_HOWTO_COUNT	\
529     ? &sh_coff_howtos[(internal)->r_type]	\
530     : (reloc_howto_type *) NULL))
531 
532 /* This is the same as the macro in coffcode.h, except that it copies
533    r_offset into reloc_entry->addend for some relocs.  */
534 #define CALC_ADDEND(abfd, ptr, reloc, cache_ptr)		\
535   {								\
536     coff_symbol_type *coffsym = (coff_symbol_type *) NULL;	\
537     if (ptr && bfd_asymbol_bfd (ptr) != abfd)			\
538       coffsym = (obj_symbols (abfd)				\
539 		 + (cache_ptr->sym_ptr_ptr - symbols));		\
540     else if (ptr)						\
541       coffsym = coff_symbol_from (ptr);				\
542     if (coffsym != (coff_symbol_type *) NULL			\
543 	&& coffsym->native->u.syment.n_scnum == 0)		\
544       cache_ptr->addend = 0;					\
545     else if (ptr && bfd_asymbol_bfd (ptr) == abfd		\
546 	     && ptr->section != (asection *) NULL)		\
547       cache_ptr->addend = - (ptr->section->vma + ptr->value);	\
548     else							\
549       cache_ptr->addend = 0;					\
550     if ((reloc).r_type == R_SH_SWITCH8				\
551 	|| (reloc).r_type == R_SH_SWITCH16			\
552 	|| (reloc).r_type == R_SH_SWITCH32			\
553 	|| (reloc).r_type == R_SH_USES				\
554 	|| (reloc).r_type == R_SH_COUNT				\
555 	|| (reloc).r_type == R_SH_ALIGN)			\
556       cache_ptr->addend = (reloc).r_offset;			\
557   }
558 
559 /* This is the howto function for the SH relocations.  */
560 
561 static bfd_reloc_status_type
562 sh_reloc (bfd *      abfd,
563 	  arelent *  reloc_entry,
564 	  asymbol *  symbol_in,
565 	  void *     data,
566 	  asection * input_section,
567 	  bfd *      output_bfd,
568 	  char **    error_message ATTRIBUTE_UNUSED)
569 {
570   bfd_vma insn;
571   bfd_vma sym_value;
572   unsigned short r_type;
573   bfd_vma addr = reloc_entry->address;
574   bfd_byte *hit_data = addr + (bfd_byte *) data;
575 
576   r_type = reloc_entry->howto->type;
577 
578   if (output_bfd != NULL)
579     {
580       /* Partial linking--do nothing.  */
581       reloc_entry->address += input_section->output_offset;
582       return bfd_reloc_ok;
583     }
584 
585   /* Almost all relocs have to do with relaxing.  If any work must be
586      done for them, it has been done in sh_relax_section.  */
587   if (r_type != R_SH_IMM32
588 #ifdef COFF_WITH_PE
589       && r_type != R_SH_IMM32CE
590       && r_type != R_SH_IMAGEBASE
591 #endif
592       && (r_type != R_SH_PCDISP
593 	  || (symbol_in->flags & BSF_LOCAL) != 0))
594     return bfd_reloc_ok;
595 
596   if (symbol_in != NULL
597       && bfd_is_und_section (symbol_in->section))
598     return bfd_reloc_undefined;
599 
600   if (!bfd_reloc_offset_in_range (reloc_entry->howto, abfd, input_section,
601 				  addr))
602     return bfd_reloc_outofrange;
603 
604   sym_value = get_symbol_value (symbol_in);
605 
606   switch (r_type)
607     {
608     case R_SH_IMM32:
609 #ifdef COFF_WITH_PE
610     case R_SH_IMM32CE:
611 #endif
612       insn = bfd_get_32 (abfd, hit_data);
613       insn += sym_value + reloc_entry->addend;
614       bfd_put_32 (abfd, insn, hit_data);
615       break;
616 #ifdef COFF_WITH_PE
617     case R_SH_IMAGEBASE:
618       insn = bfd_get_32 (abfd, hit_data);
619       insn += sym_value + reloc_entry->addend;
620       insn -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
621       bfd_put_32 (abfd, insn, hit_data);
622       break;
623 #endif
624     case R_SH_PCDISP:
625       insn = bfd_get_16 (abfd, hit_data);
626       sym_value += reloc_entry->addend;
627       sym_value -= (input_section->output_section->vma
628 		    + input_section->output_offset
629 		    + addr
630 		    + 4);
631       sym_value += (((insn & 0xfff) ^ 0x800) - 0x800) << 1;
632       insn = (insn & 0xf000) | ((sym_value >> 1) & 0xfff);
633       bfd_put_16 (abfd, insn, hit_data);
634       if (sym_value + 0x1000 >= 0x2000 || (sym_value & 1) != 0)
635 	return bfd_reloc_overflow;
636       break;
637     default:
638       abort ();
639       break;
640     }
641 
642   return bfd_reloc_ok;
643 }
644 
645 #define coff_bfd_merge_private_bfd_data _bfd_generic_verify_endian_match
646 
647 /* We can do relaxing.  */
648 #define coff_bfd_relax_section sh_relax_section
649 
650 /* We use the special COFF backend linker.  */
651 #define coff_relocate_section sh_relocate_section
652 
653 /* When relaxing, we need to use special code to get the relocated
654    section contents.  */
655 #define coff_bfd_get_relocated_section_contents \
656   sh_coff_get_relocated_section_contents
657 
658 #include "coffcode.h"
659 
660 static bool
661 sh_relax_delete_bytes (bfd *, asection *, bfd_vma, int);
662 
663 /* This function handles relaxing on the SH.
664 
665    Function calls on the SH look like this:
666 
667        movl  L1,r0
668        ...
669        jsr   @r0
670        ...
671      L1:
672        .long function
673 
674    The compiler and assembler will cooperate to create R_SH_USES
675    relocs on the jsr instructions.  The r_offset field of the
676    R_SH_USES reloc is the PC relative offset to the instruction which
677    loads the register (the r_offset field is computed as though it
678    were a jump instruction, so the offset value is actually from four
679    bytes past the instruction).  The linker can use this reloc to
680    determine just which function is being called, and thus decide
681    whether it is possible to replace the jsr with a bsr.
682 
683    If multiple function calls are all based on a single register load
684    (i.e., the same function is called multiple times), the compiler
685    guarantees that each function call will have an R_SH_USES reloc.
686    Therefore, if the linker is able to convert each R_SH_USES reloc
687    which refers to that address, it can safely eliminate the register
688    load.
689 
690    When the assembler creates an R_SH_USES reloc, it examines it to
691    determine which address is being loaded (L1 in the above example).
692    It then counts the number of references to that address, and
693    creates an R_SH_COUNT reloc at that address.  The r_offset field of
694    the R_SH_COUNT reloc will be the number of references.  If the
695    linker is able to eliminate a register load, it can use the
696    R_SH_COUNT reloc to see whether it can also eliminate the function
697    address.
698 
699    SH relaxing also handles another, unrelated, matter.  On the SH, if
700    a load or store instruction is not aligned on a four byte boundary,
701    the memory cycle interferes with the 32 bit instruction fetch,
702    causing a one cycle bubble in the pipeline.  Therefore, we try to
703    align load and store instructions on four byte boundaries if we
704    can, by swapping them with one of the adjacent instructions.  */
705 
706 static bool
707 sh_relax_section (bfd *abfd,
708 		  asection *sec,
709 		  struct bfd_link_info *link_info,
710 		  bool *again)
711 {
712   struct internal_reloc *internal_relocs;
713   bool have_code;
714   struct internal_reloc *irel, *irelend;
715   bfd_byte *contents = NULL;
716 
717   *again = false;
718 
719   if (bfd_link_relocatable (link_info)
720       || (sec->flags & SEC_RELOC) == 0
721       || sec->reloc_count == 0)
722     return true;
723 
724   if (coff_section_data (abfd, sec) == NULL)
725     {
726       size_t amt = sizeof (struct coff_section_tdata);
727       sec->used_by_bfd = bfd_zalloc (abfd, amt);
728       if (sec->used_by_bfd == NULL)
729 	return false;
730     }
731 
732   internal_relocs = (_bfd_coff_read_internal_relocs
733 		     (abfd, sec, link_info->keep_memory,
734 		      (bfd_byte *) NULL, false,
735 		      (struct internal_reloc *) NULL));
736   if (internal_relocs == NULL)
737     goto error_return;
738 
739   have_code = false;
740 
741   irelend = internal_relocs + sec->reloc_count;
742   for (irel = internal_relocs; irel < irelend; irel++)
743     {
744       bfd_vma laddr, paddr, symval;
745       unsigned short insn;
746       struct internal_reloc *irelfn, *irelscan, *irelcount;
747       struct internal_syment sym;
748       bfd_signed_vma foff;
749 
750       if (irel->r_type == R_SH_CODE)
751 	have_code = true;
752 
753       if (irel->r_type != R_SH_USES)
754 	continue;
755 
756       /* Get the section contents.  */
757       if (contents == NULL)
758 	{
759 	  if (coff_section_data (abfd, sec)->contents != NULL)
760 	    contents = coff_section_data (abfd, sec)->contents;
761 	  else
762 	    {
763 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
764 		goto error_return;
765 	    }
766 	}
767 
768       /* The r_offset field of the R_SH_USES reloc will point us to
769 	 the register load.  The 4 is because the r_offset field is
770 	 computed as though it were a jump offset, which are based
771 	 from 4 bytes after the jump instruction.  */
772       laddr = irel->r_vaddr - sec->vma + 4;
773       /* Careful to sign extend the 32-bit offset.  */
774       laddr += ((irel->r_offset & 0xffffffff) ^ 0x80000000) - 0x80000000;
775       if (laddr >= sec->size)
776 	{
777 	  /* xgettext: c-format */
778 	  _bfd_error_handler
779 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES offset"),
780 	     abfd, (uint64_t) irel->r_vaddr);
781 	  continue;
782 	}
783       insn = bfd_get_16 (abfd, contents + laddr);
784 
785       /* If the instruction is not mov.l NN,rN, we don't know what to do.  */
786       if ((insn & 0xf000) != 0xd000)
787 	{
788 	  _bfd_error_handler
789 	    /* xgettext: c-format */
790 	    (_("%pB: %#" PRIx64 ": warning: R_SH_USES points to unrecognized insn %#x"),
791 	     abfd, (uint64_t) irel->r_vaddr, insn);
792 	  continue;
793 	}
794 
795       /* Get the address from which the register is being loaded.  The
796 	 displacement in the mov.l instruction is quadrupled.  It is a
797 	 displacement from four bytes after the movl instruction, but,
798 	 before adding in the PC address, two least significant bits
799 	 of the PC are cleared.  We assume that the section is aligned
800 	 on a four byte boundary.  */
801       paddr = insn & 0xff;
802       paddr *= 4;
803       paddr += (laddr + 4) &~ (bfd_vma) 3;
804       if (paddr >= sec->size)
805 	{
806 	  _bfd_error_handler
807 	    /* xgettext: c-format */
808 	    (_("%pB: %#" PRIx64 ": warning: bad R_SH_USES load offset"),
809 	     abfd, (uint64_t) irel->r_vaddr);
810 	  continue;
811 	}
812 
813       /* Get the reloc for the address from which the register is
814 	 being loaded.  This reloc will tell us which function is
815 	 actually being called.  */
816       paddr += sec->vma;
817       for (irelfn = internal_relocs; irelfn < irelend; irelfn++)
818 	if (irelfn->r_vaddr == paddr
819 #ifdef COFF_WITH_PE
820 	    && (irelfn->r_type == R_SH_IMM32
821 		|| irelfn->r_type == R_SH_IMM32CE
822 		|| irelfn->r_type == R_SH_IMAGEBASE)
823 
824 #else
825 	    && irelfn->r_type == R_SH_IMM32
826 #endif
827 	    )
828 	  break;
829       if (irelfn >= irelend)
830 	{
831 	  _bfd_error_handler
832 	    /* xgettext: c-format */
833 	    (_("%pB: %#" PRIx64 ": warning: could not find expected reloc"),
834 	     abfd, (uint64_t) paddr);
835 	  continue;
836 	}
837 
838       /* Get the value of the symbol referred to by the reloc.  */
839       if (! _bfd_coff_get_external_symbols (abfd))
840 	goto error_return;
841       bfd_coff_swap_sym_in (abfd,
842 			    ((bfd_byte *) obj_coff_external_syms (abfd)
843 			     + (irelfn->r_symndx
844 				* bfd_coff_symesz (abfd))),
845 			    &sym);
846       if (sym.n_scnum != 0 && sym.n_scnum != sec->target_index)
847 	{
848 	  _bfd_error_handler
849 	    /* xgettext: c-format */
850 	    (_("%pB: %#" PRIx64 ": warning: symbol in unexpected section"),
851 	     abfd, (uint64_t) paddr);
852 	  continue;
853 	}
854 
855       if (sym.n_sclass != C_EXT)
856 	{
857 	  symval = (sym.n_value
858 		    - sec->vma
859 		    + sec->output_section->vma
860 		    + sec->output_offset);
861 	}
862       else
863 	{
864 	  struct coff_link_hash_entry *h;
865 
866 	  h = obj_coff_sym_hashes (abfd)[irelfn->r_symndx];
867 	  BFD_ASSERT (h != NULL);
868 	  if (h->root.type != bfd_link_hash_defined
869 	      && h->root.type != bfd_link_hash_defweak)
870 	    {
871 	      /* This appears to be a reference to an undefined
872 		 symbol.  Just ignore it--it will be caught by the
873 		 regular reloc processing.  */
874 	      continue;
875 	    }
876 
877 	  symval = (h->root.u.def.value
878 		    + h->root.u.def.section->output_section->vma
879 		    + h->root.u.def.section->output_offset);
880 	}
881 
882       symval += bfd_get_32 (abfd, contents + paddr - sec->vma);
883 
884       /* See if this function call can be shortened.  */
885       foff = (symval
886 	      - (irel->r_vaddr
887 		 - sec->vma
888 		 + sec->output_section->vma
889 		 + sec->output_offset
890 		 + 4));
891       if (foff < -0x1000 || foff >= 0x1000)
892 	{
893 	  /* After all that work, we can't shorten this function call.  */
894 	  continue;
895 	}
896 
897       /* Shorten the function call.  */
898 
899       /* For simplicity of coding, we are going to modify the section
900 	 contents, the section relocs, and the BFD symbol table.  We
901 	 must tell the rest of the code not to free up this
902 	 information.  It would be possible to instead create a table
903 	 of changes which have to be made, as is done in coff-mips.c;
904 	 that would be more work, but would require less memory when
905 	 the linker is run.  */
906 
907       coff_section_data (abfd, sec)->relocs = internal_relocs;
908       coff_section_data (abfd, sec)->keep_relocs = true;
909 
910       coff_section_data (abfd, sec)->contents = contents;
911       coff_section_data (abfd, sec)->keep_contents = true;
912 
913       obj_coff_keep_syms (abfd) = true;
914 
915       /* Replace the jsr with a bsr.  */
916 
917       /* Change the R_SH_USES reloc into an R_SH_PCDISP reloc, and
918 	 replace the jsr with a bsr.  */
919       irel->r_type = R_SH_PCDISP;
920       irel->r_symndx = irelfn->r_symndx;
921       if (sym.n_sclass != C_EXT)
922 	{
923 	  /* If this needs to be changed because of future relaxing,
924 	     it will be handled here like other internal PCDISP
925 	     relocs.  */
926 	  bfd_put_16 (abfd,
927 		      (bfd_vma) 0xb000 | ((foff >> 1) & 0xfff),
928 		      contents + irel->r_vaddr - sec->vma);
929 	}
930       else
931 	{
932 	  /* We can't fully resolve this yet, because the external
933 	     symbol value may be changed by future relaxing.  We let
934 	     the final link phase handle it.  */
935 	  bfd_put_16 (abfd, (bfd_vma) 0xb000,
936 		      contents + irel->r_vaddr - sec->vma);
937 	}
938 
939       /* See if there is another R_SH_USES reloc referring to the same
940 	 register load.  */
941       for (irelscan = internal_relocs; irelscan < irelend; irelscan++)
942 	if (irelscan->r_type == R_SH_USES
943 	    && laddr == irelscan->r_vaddr - sec->vma + 4 + irelscan->r_offset)
944 	  break;
945       if (irelscan < irelend)
946 	{
947 	  /* Some other function call depends upon this register load,
948 	     and we have not yet converted that function call.
949 	     Indeed, we may never be able to convert it.  There is
950 	     nothing else we can do at this point.  */
951 	  continue;
952 	}
953 
954       /* Look for a R_SH_COUNT reloc on the location where the
955 	 function address is stored.  Do this before deleting any
956 	 bytes, to avoid confusion about the address.  */
957       for (irelcount = internal_relocs; irelcount < irelend; irelcount++)
958 	if (irelcount->r_vaddr == paddr
959 	    && irelcount->r_type == R_SH_COUNT)
960 	  break;
961 
962       /* Delete the register load.  */
963       if (! sh_relax_delete_bytes (abfd, sec, laddr, 2))
964 	goto error_return;
965 
966       /* That will change things, so, just in case it permits some
967 	 other function call to come within range, we should relax
968 	 again.  Note that this is not required, and it may be slow.  */
969       *again = true;
970 
971       /* Now check whether we got a COUNT reloc.  */
972       if (irelcount >= irelend)
973 	{
974 	  _bfd_error_handler
975 	    /* xgettext: c-format */
976 	    (_("%pB: %#" PRIx64 ": warning: could not find expected COUNT reloc"),
977 	     abfd, (uint64_t) paddr);
978 	  continue;
979 	}
980 
981       /* The number of uses is stored in the r_offset field.  We've
982 	 just deleted one.  */
983       if (irelcount->r_offset == 0)
984 	{
985 	  /* xgettext: c-format */
986 	  _bfd_error_handler (_("%pB: %#" PRIx64 ": warning: bad count"),
987 			      abfd, (uint64_t) paddr);
988 	  continue;
989 	}
990 
991       --irelcount->r_offset;
992 
993       /* If there are no more uses, we can delete the address.  Reload
994 	 the address from irelfn, in case it was changed by the
995 	 previous call to sh_relax_delete_bytes.  */
996       if (irelcount->r_offset == 0)
997 	{
998 	  if (! sh_relax_delete_bytes (abfd, sec,
999 				       irelfn->r_vaddr - sec->vma, 4))
1000 	    goto error_return;
1001 	}
1002 
1003       /* We've done all we can with that function call.  */
1004     }
1005 
1006   /* Look for load and store instructions that we can align on four
1007      byte boundaries.  */
1008   if (have_code)
1009     {
1010       bool swapped;
1011 
1012       /* Get the section contents.  */
1013       if (contents == NULL)
1014 	{
1015 	  if (coff_section_data (abfd, sec)->contents != NULL)
1016 	    contents = coff_section_data (abfd, sec)->contents;
1017 	  else
1018 	    {
1019 	      if (!bfd_malloc_and_get_section (abfd, sec, &contents))
1020 		goto error_return;
1021 	    }
1022 	}
1023 
1024       if (! sh_align_loads (abfd, sec, internal_relocs, contents, &swapped))
1025 	goto error_return;
1026 
1027       if (swapped)
1028 	{
1029 	  coff_section_data (abfd, sec)->relocs = internal_relocs;
1030 	  coff_section_data (abfd, sec)->keep_relocs = true;
1031 
1032 	  coff_section_data (abfd, sec)->contents = contents;
1033 	  coff_section_data (abfd, sec)->keep_contents = true;
1034 
1035 	  obj_coff_keep_syms (abfd) = true;
1036 	}
1037     }
1038 
1039   if (internal_relocs != NULL
1040       && internal_relocs != coff_section_data (abfd, sec)->relocs)
1041     {
1042       if (! link_info->keep_memory)
1043 	free (internal_relocs);
1044       else
1045 	coff_section_data (abfd, sec)->relocs = internal_relocs;
1046     }
1047 
1048   if (contents != NULL && contents != coff_section_data (abfd, sec)->contents)
1049     {
1050       if (! link_info->keep_memory)
1051 	free (contents);
1052       else
1053 	/* Cache the section contents for coff_link_input_bfd.  */
1054 	coff_section_data (abfd, sec)->contents = contents;
1055     }
1056 
1057   return true;
1058 
1059  error_return:
1060   if (internal_relocs != coff_section_data (abfd, sec)->relocs)
1061     free (internal_relocs);
1062   if (contents != coff_section_data (abfd, sec)->contents)
1063     free (contents);
1064   return false;
1065 }
1066 
1067 /* Delete some bytes from a section while relaxing.  */
1068 
1069 static bool
1070 sh_relax_delete_bytes (bfd *abfd,
1071 		       asection *sec,
1072 		       bfd_vma addr,
1073 		       int count)
1074 {
1075   bfd_byte *contents;
1076   struct internal_reloc *irel, *irelend;
1077   struct internal_reloc *irelalign;
1078   bfd_vma toaddr;
1079   bfd_byte *esym, *esymend;
1080   bfd_size_type symesz;
1081   struct coff_link_hash_entry **sym_hash;
1082   asection *o;
1083 
1084   contents = coff_section_data (abfd, sec)->contents;
1085 
1086   /* The deletion must stop at the next ALIGN reloc for an alignment
1087      power larger than the number of bytes we are deleting.  */
1088 
1089   irelalign = NULL;
1090   toaddr = sec->size;
1091 
1092   irel = coff_section_data (abfd, sec)->relocs;
1093   irelend = irel + sec->reloc_count;
1094   for (; irel < irelend; irel++)
1095     {
1096       if (irel->r_type == R_SH_ALIGN
1097 	  && irel->r_vaddr - sec->vma > addr
1098 	  && count < (1 << irel->r_offset))
1099 	{
1100 	  irelalign = irel;
1101 	  toaddr = irel->r_vaddr - sec->vma;
1102 	  break;
1103 	}
1104     }
1105 
1106   /* Actually delete the bytes.  */
1107   memmove (contents + addr, contents + addr + count,
1108 	   (size_t) (toaddr - addr - count));
1109   if (irelalign == NULL)
1110     sec->size -= count;
1111   else
1112     {
1113       int i;
1114 
1115 #define NOP_OPCODE (0x0009)
1116 
1117       BFD_ASSERT ((count & 1) == 0);
1118       for (i = 0; i < count; i += 2)
1119 	bfd_put_16 (abfd, (bfd_vma) NOP_OPCODE, contents + toaddr - count + i);
1120     }
1121 
1122   /* Adjust all the relocs.  */
1123   for (irel = coff_section_data (abfd, sec)->relocs; irel < irelend; irel++)
1124     {
1125       bfd_vma nraddr, stop;
1126       bfd_vma start = 0;
1127       int insn = 0;
1128       struct internal_syment sym;
1129       int off, adjust, oinsn;
1130       bfd_signed_vma voff = 0;
1131       bool overflow;
1132 
1133       /* Get the new reloc address.  */
1134       nraddr = irel->r_vaddr - sec->vma;
1135       if ((irel->r_vaddr - sec->vma > addr
1136 	   && irel->r_vaddr - sec->vma < toaddr)
1137 	  || (irel->r_type == R_SH_ALIGN
1138 	      && irel->r_vaddr - sec->vma == toaddr))
1139 	nraddr -= count;
1140 
1141       /* See if this reloc was for the bytes we have deleted, in which
1142 	 case we no longer care about it.  Don't delete relocs which
1143 	 represent addresses, though.  */
1144       if (irel->r_vaddr - sec->vma >= addr
1145 	  && irel->r_vaddr - sec->vma < addr + count
1146 	  && irel->r_type != R_SH_ALIGN
1147 	  && irel->r_type != R_SH_CODE
1148 	  && irel->r_type != R_SH_DATA
1149 	  && irel->r_type != R_SH_LABEL)
1150 	irel->r_type = R_SH_UNUSED;
1151 
1152       /* If this is a PC relative reloc, see if the range it covers
1153 	 includes the bytes we have deleted.  */
1154       switch (irel->r_type)
1155 	{
1156 	default:
1157 	  break;
1158 
1159 	case R_SH_PCDISP8BY2:
1160 	case R_SH_PCDISP:
1161 	case R_SH_PCRELIMM8BY2:
1162 	case R_SH_PCRELIMM8BY4:
1163 	  start = irel->r_vaddr - sec->vma;
1164 	  insn = bfd_get_16 (abfd, contents + nraddr);
1165 	  break;
1166 	}
1167 
1168       switch (irel->r_type)
1169 	{
1170 	default:
1171 	  start = stop = addr;
1172 	  break;
1173 
1174 	case R_SH_IMM32:
1175 #ifdef COFF_WITH_PE
1176 	case R_SH_IMM32CE:
1177 	case R_SH_IMAGEBASE:
1178 #endif
1179 	  /* If this reloc is against a symbol defined in this
1180 	     section, and the symbol will not be adjusted below, we
1181 	     must check the addend to see it will put the value in
1182 	     range to be adjusted, and hence must be changed.  */
1183 	  bfd_coff_swap_sym_in (abfd,
1184 				((bfd_byte *) obj_coff_external_syms (abfd)
1185 				 + (irel->r_symndx
1186 				    * bfd_coff_symesz (abfd))),
1187 				&sym);
1188 	  if (sym.n_sclass != C_EXT
1189 	      && sym.n_scnum == sec->target_index
1190 	      && ((bfd_vma) sym.n_value <= addr
1191 		  || (bfd_vma) sym.n_value >= toaddr))
1192 	    {
1193 	      bfd_vma val;
1194 
1195 	      val = bfd_get_32 (abfd, contents + nraddr);
1196 	      val += sym.n_value;
1197 	      if (val > addr && val < toaddr)
1198 		bfd_put_32 (abfd, val - count, contents + nraddr);
1199 	    }
1200 	  start = stop = addr;
1201 	  break;
1202 
1203 	case R_SH_PCDISP8BY2:
1204 	  off = insn & 0xff;
1205 	  if (off & 0x80)
1206 	    off -= 0x100;
1207 	  stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1208 	  break;
1209 
1210 	case R_SH_PCDISP:
1211 	  bfd_coff_swap_sym_in (abfd,
1212 				((bfd_byte *) obj_coff_external_syms (abfd)
1213 				 + (irel->r_symndx
1214 				    * bfd_coff_symesz (abfd))),
1215 				&sym);
1216 	  if (sym.n_sclass == C_EXT)
1217 	    start = stop = addr;
1218 	  else
1219 	    {
1220 	      off = insn & 0xfff;
1221 	      if (off & 0x800)
1222 		off -= 0x1000;
1223 	      stop = (bfd_vma) ((bfd_signed_vma) start + 4 + off * 2);
1224 	    }
1225 	  break;
1226 
1227 	case R_SH_PCRELIMM8BY2:
1228 	  off = insn & 0xff;
1229 	  stop = start + 4 + off * 2;
1230 	  break;
1231 
1232 	case R_SH_PCRELIMM8BY4:
1233 	  off = insn & 0xff;
1234 	  stop = (start &~ (bfd_vma) 3) + 4 + off * 4;
1235 	  break;
1236 
1237 	case R_SH_SWITCH8:
1238 	case R_SH_SWITCH16:
1239 	case R_SH_SWITCH32:
1240 	  /* These relocs types represent
1241 	       .word L2-L1
1242 	     The r_offset field holds the difference between the reloc
1243 	     address and L1.  That is the start of the reloc, and
1244 	     adding in the contents gives us the top.  We must adjust
1245 	     both the r_offset field and the section contents.  */
1246 
1247 	  start = irel->r_vaddr - sec->vma;
1248 	  stop = (bfd_vma) ((bfd_signed_vma) start - (long) irel->r_offset);
1249 
1250 	  if (start > addr
1251 	      && start < toaddr
1252 	      && (stop <= addr || stop >= toaddr))
1253 	    irel->r_offset += count;
1254 	  else if (stop > addr
1255 		   && stop < toaddr
1256 		   && (start <= addr || start >= toaddr))
1257 	    irel->r_offset -= count;
1258 
1259 	  start = stop;
1260 
1261 	  if (irel->r_type == R_SH_SWITCH16)
1262 	    voff = bfd_get_signed_16 (abfd, contents + nraddr);
1263 	  else if (irel->r_type == R_SH_SWITCH8)
1264 	    voff = bfd_get_8 (abfd, contents + nraddr);
1265 	  else
1266 	    voff = bfd_get_signed_32 (abfd, contents + nraddr);
1267 	  stop = (bfd_vma) ((bfd_signed_vma) start + voff);
1268 
1269 	  break;
1270 
1271 	case R_SH_USES:
1272 	  start = irel->r_vaddr - sec->vma;
1273 	  stop = (bfd_vma) ((bfd_signed_vma) start
1274 			    + (long) irel->r_offset
1275 			    + 4);
1276 	  break;
1277 	}
1278 
1279       if (start > addr
1280 	  && start < toaddr
1281 	  && (stop <= addr || stop >= toaddr))
1282 	adjust = count;
1283       else if (stop > addr
1284 	       && stop < toaddr
1285 	       && (start <= addr || start >= toaddr))
1286 	adjust = - count;
1287       else
1288 	adjust = 0;
1289 
1290       if (adjust != 0)
1291 	{
1292 	  oinsn = insn;
1293 	  overflow = false;
1294 	  switch (irel->r_type)
1295 	    {
1296 	    default:
1297 	      abort ();
1298 	      break;
1299 
1300 	    case R_SH_PCDISP8BY2:
1301 	    case R_SH_PCRELIMM8BY2:
1302 	      insn += adjust / 2;
1303 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1304 		overflow = true;
1305 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1306 	      break;
1307 
1308 	    case R_SH_PCDISP:
1309 	      insn += adjust / 2;
1310 	      if ((oinsn & 0xf000) != (insn & 0xf000))
1311 		overflow = true;
1312 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1313 	      break;
1314 
1315 	    case R_SH_PCRELIMM8BY4:
1316 	      BFD_ASSERT (adjust == count || count >= 4);
1317 	      if (count >= 4)
1318 		insn += adjust / 4;
1319 	      else
1320 		{
1321 		  if ((irel->r_vaddr & 3) == 0)
1322 		    ++insn;
1323 		}
1324 	      if ((oinsn & 0xff00) != (insn & 0xff00))
1325 		overflow = true;
1326 	      bfd_put_16 (abfd, (bfd_vma) insn, contents + nraddr);
1327 	      break;
1328 
1329 	    case R_SH_SWITCH8:
1330 	      voff += adjust;
1331 	      if (voff < 0 || voff >= 0xff)
1332 		overflow = true;
1333 	      bfd_put_8 (abfd, (bfd_vma) voff, contents + nraddr);
1334 	      break;
1335 
1336 	    case R_SH_SWITCH16:
1337 	      voff += adjust;
1338 	      if (voff < - 0x8000 || voff >= 0x8000)
1339 		overflow = true;
1340 	      bfd_put_signed_16 (abfd, (bfd_vma) voff, contents + nraddr);
1341 	      break;
1342 
1343 	    case R_SH_SWITCH32:
1344 	      voff += adjust;
1345 	      bfd_put_signed_32 (abfd, (bfd_vma) voff, contents + nraddr);
1346 	      break;
1347 
1348 	    case R_SH_USES:
1349 	      irel->r_offset += adjust;
1350 	      break;
1351 	    }
1352 
1353 	  if (overflow)
1354 	    {
1355 	      _bfd_error_handler
1356 		/* xgettext: c-format */
1357 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
1358 		 abfd, (uint64_t) irel->r_vaddr);
1359 	      bfd_set_error (bfd_error_bad_value);
1360 	      return false;
1361 	    }
1362 	}
1363 
1364       irel->r_vaddr = nraddr + sec->vma;
1365     }
1366 
1367   /* Look through all the other sections.  If there contain any IMM32
1368      relocs against internal symbols which we are not going to adjust
1369      below, we may need to adjust the addends.  */
1370   for (o = abfd->sections; o != NULL; o = o->next)
1371     {
1372       struct internal_reloc *internal_relocs;
1373       struct internal_reloc *irelscan, *irelscanend;
1374       bfd_byte *ocontents;
1375 
1376       if (o == sec
1377 	  || (o->flags & SEC_RELOC) == 0
1378 	  || o->reloc_count == 0)
1379 	continue;
1380 
1381       /* We always cache the relocs.  Perhaps, if info->keep_memory is
1382 	 FALSE, we should free them, if we are permitted to, when we
1383 	 leave sh_coff_relax_section.  */
1384       internal_relocs = (_bfd_coff_read_internal_relocs
1385 			 (abfd, o, true, (bfd_byte *) NULL, false,
1386 			  (struct internal_reloc *) NULL));
1387       if (internal_relocs == NULL)
1388 	return false;
1389 
1390       ocontents = NULL;
1391       irelscanend = internal_relocs + o->reloc_count;
1392       for (irelscan = internal_relocs; irelscan < irelscanend; irelscan++)
1393 	{
1394 	  struct internal_syment sym;
1395 
1396 #ifdef COFF_WITH_PE
1397 	  if (irelscan->r_type != R_SH_IMM32
1398 	      && irelscan->r_type != R_SH_IMAGEBASE
1399 	      && irelscan->r_type != R_SH_IMM32CE)
1400 #else
1401 	  if (irelscan->r_type != R_SH_IMM32)
1402 #endif
1403 	    continue;
1404 
1405 	  bfd_coff_swap_sym_in (abfd,
1406 				((bfd_byte *) obj_coff_external_syms (abfd)
1407 				 + (irelscan->r_symndx
1408 				    * bfd_coff_symesz (abfd))),
1409 				&sym);
1410 	  if (sym.n_sclass != C_EXT
1411 	      && sym.n_scnum == sec->target_index
1412 	      && ((bfd_vma) sym.n_value <= addr
1413 		  || (bfd_vma) sym.n_value >= toaddr))
1414 	    {
1415 	      bfd_vma val;
1416 
1417 	      if (ocontents == NULL)
1418 		{
1419 		  if (coff_section_data (abfd, o)->contents != NULL)
1420 		    ocontents = coff_section_data (abfd, o)->contents;
1421 		  else
1422 		    {
1423 		      if (!bfd_malloc_and_get_section (abfd, o, &ocontents))
1424 			return false;
1425 		      /* We always cache the section contents.
1426 			 Perhaps, if info->keep_memory is FALSE, we
1427 			 should free them, if we are permitted to,
1428 			 when we leave sh_coff_relax_section.  */
1429 		      coff_section_data (abfd, o)->contents = ocontents;
1430 		    }
1431 		}
1432 
1433 	      val = bfd_get_32 (abfd, ocontents + irelscan->r_vaddr - o->vma);
1434 	      val += sym.n_value;
1435 	      if (val > addr && val < toaddr)
1436 		bfd_put_32 (abfd, val - count,
1437 			    ocontents + irelscan->r_vaddr - o->vma);
1438 
1439 	      coff_section_data (abfd, o)->keep_contents = true;
1440 	    }
1441 	}
1442     }
1443 
1444   /* Adjusting the internal symbols will not work if something has
1445      already retrieved the generic symbols.  It would be possible to
1446      make this work by adjusting the generic symbols at the same time.
1447      However, this case should not arise in normal usage.  */
1448   if (obj_symbols (abfd) != NULL
1449       || obj_raw_syments (abfd) != NULL)
1450     {
1451       _bfd_error_handler
1452 	(_("%pB: fatal: generic symbols retrieved before relaxing"), abfd);
1453       bfd_set_error (bfd_error_invalid_operation);
1454       return false;
1455     }
1456 
1457   /* Adjust all the symbols.  */
1458   sym_hash = obj_coff_sym_hashes (abfd);
1459   symesz = bfd_coff_symesz (abfd);
1460   esym = (bfd_byte *) obj_coff_external_syms (abfd);
1461   esymend = esym + obj_raw_syment_count (abfd) * symesz;
1462   while (esym < esymend)
1463     {
1464       struct internal_syment isym;
1465 
1466       bfd_coff_swap_sym_in (abfd, esym, &isym);
1467 
1468       if (isym.n_scnum == sec->target_index
1469 	  && (bfd_vma) isym.n_value > addr
1470 	  && (bfd_vma) isym.n_value < toaddr)
1471 	{
1472 	  isym.n_value -= count;
1473 
1474 	  bfd_coff_swap_sym_out (abfd, &isym, esym);
1475 
1476 	  if (*sym_hash != NULL)
1477 	    {
1478 	      BFD_ASSERT ((*sym_hash)->root.type == bfd_link_hash_defined
1479 			  || (*sym_hash)->root.type == bfd_link_hash_defweak);
1480 	      BFD_ASSERT ((*sym_hash)->root.u.def.value >= addr
1481 			  && (*sym_hash)->root.u.def.value < toaddr);
1482 	      (*sym_hash)->root.u.def.value -= count;
1483 	    }
1484 	}
1485 
1486       esym += (isym.n_numaux + 1) * symesz;
1487       sym_hash += isym.n_numaux + 1;
1488     }
1489 
1490   /* See if we can move the ALIGN reloc forward.  We have adjusted
1491      r_vaddr for it already.  */
1492   if (irelalign != NULL)
1493     {
1494       bfd_vma alignto, alignaddr;
1495 
1496       alignto = BFD_ALIGN (toaddr, 1 << irelalign->r_offset);
1497       alignaddr = BFD_ALIGN (irelalign->r_vaddr - sec->vma,
1498 			     1 << irelalign->r_offset);
1499       if (alignto != alignaddr)
1500 	{
1501 	  /* Tail recursion.  */
1502 	  return sh_relax_delete_bytes (abfd, sec, alignaddr,
1503 					(int) (alignto - alignaddr));
1504 	}
1505     }
1506 
1507   return true;
1508 }
1509 
1510 /* This is yet another version of the SH opcode table, used to rapidly
1511    get information about a particular instruction.  */
1512 
1513 /* The opcode map is represented by an array of these structures.  The
1514    array is indexed by the high order four bits in the instruction.  */
1515 
1516 struct sh_major_opcode
1517 {
1518   /* A pointer to the instruction list.  This is an array which
1519      contains all the instructions with this major opcode.  */
1520   const struct sh_minor_opcode *minor_opcodes;
1521   /* The number of elements in minor_opcodes.  */
1522   unsigned short count;
1523 };
1524 
1525 /* This structure holds information for a set of SH opcodes.  The
1526    instruction code is anded with the mask value, and the resulting
1527    value is used to search the order opcode list.  */
1528 
1529 struct sh_minor_opcode
1530 {
1531   /* The sorted opcode list.  */
1532   const struct sh_opcode *opcodes;
1533   /* The number of elements in opcodes.  */
1534   unsigned short count;
1535   /* The mask value to use when searching the opcode list.  */
1536   unsigned short mask;
1537 };
1538 
1539 /* This structure holds information for an SH instruction.  An array
1540    of these structures is sorted in order by opcode.  */
1541 
1542 struct sh_opcode
1543 {
1544   /* The code for this instruction, after it has been anded with the
1545      mask value in the sh_major_opcode structure.  */
1546   unsigned short opcode;
1547   /* Flags for this instruction.  */
1548   unsigned long flags;
1549 };
1550 
1551 /* Flag which appear in the sh_opcode structure.  */
1552 
1553 /* This instruction loads a value from memory.  */
1554 #define LOAD (0x1)
1555 
1556 /* This instruction stores a value to memory.  */
1557 #define STORE (0x2)
1558 
1559 /* This instruction is a branch.  */
1560 #define BRANCH (0x4)
1561 
1562 /* This instruction has a delay slot.  */
1563 #define DELAY (0x8)
1564 
1565 /* This instruction uses the value in the register in the field at
1566    mask 0x0f00 of the instruction.  */
1567 #define USES1 (0x10)
1568 #define USES1_REG(x) ((x & 0x0f00) >> 8)
1569 
1570 /* This instruction uses the value in the register in the field at
1571    mask 0x00f0 of the instruction.  */
1572 #define USES2 (0x20)
1573 #define USES2_REG(x) ((x & 0x00f0) >> 4)
1574 
1575 /* This instruction uses the value in register 0.  */
1576 #define USESR0 (0x40)
1577 
1578 /* This instruction sets the value in the register in the field at
1579    mask 0x0f00 of the instruction.  */
1580 #define SETS1 (0x80)
1581 #define SETS1_REG(x) ((x & 0x0f00) >> 8)
1582 
1583 /* This instruction sets the value in the register in the field at
1584    mask 0x00f0 of the instruction.  */
1585 #define SETS2 (0x100)
1586 #define SETS2_REG(x) ((x & 0x00f0) >> 4)
1587 
1588 /* This instruction sets register 0.  */
1589 #define SETSR0 (0x200)
1590 
1591 /* This instruction sets a special register.  */
1592 #define SETSSP (0x400)
1593 
1594 /* This instruction uses a special register.  */
1595 #define USESSP (0x800)
1596 
1597 /* This instruction uses the floating point register in the field at
1598    mask 0x0f00 of the instruction.  */
1599 #define USESF1 (0x1000)
1600 #define USESF1_REG(x) ((x & 0x0f00) >> 8)
1601 
1602 /* This instruction uses the floating point register in the field at
1603    mask 0x00f0 of the instruction.  */
1604 #define USESF2 (0x2000)
1605 #define USESF2_REG(x) ((x & 0x00f0) >> 4)
1606 
1607 /* This instruction uses floating point register 0.  */
1608 #define USESF0 (0x4000)
1609 
1610 /* This instruction sets the floating point register in the field at
1611    mask 0x0f00 of the instruction.  */
1612 #define SETSF1 (0x8000)
1613 #define SETSF1_REG(x) ((x & 0x0f00) >> 8)
1614 
1615 #define USESAS (0x10000)
1616 #define USESAS_REG(x) (((((x) >> 8) - 2) & 3) + 2)
1617 #define USESR8 (0x20000)
1618 #define SETSAS (0x40000)
1619 #define SETSAS_REG(x) USESAS_REG (x)
1620 
1621 #define MAP(a) a, sizeof a / sizeof a[0]
1622 
1623 #ifndef COFF_IMAGE_WITH_PE
1624 
1625 /* The opcode maps.  */
1626 
1627 static const struct sh_opcode sh_opcode00[] =
1628 {
1629   { 0x0008, SETSSP },			/* clrt */
1630   { 0x0009, 0 },			/* nop */
1631   { 0x000b, BRANCH | DELAY | USESSP },	/* rts */
1632   { 0x0018, SETSSP },			/* sett */
1633   { 0x0019, SETSSP },			/* div0u */
1634   { 0x001b, 0 },			/* sleep */
1635   { 0x0028, SETSSP },			/* clrmac */
1636   { 0x002b, BRANCH | DELAY | SETSSP },	/* rte */
1637   { 0x0038, USESSP | SETSSP },		/* ldtlb */
1638   { 0x0048, SETSSP },			/* clrs */
1639   { 0x0058, SETSSP }			/* sets */
1640 };
1641 
1642 static const struct sh_opcode sh_opcode01[] =
1643 {
1644   { 0x0003, BRANCH | DELAY | USES1 | SETSSP },	/* bsrf rn */
1645   { 0x000a, SETS1 | USESSP },			/* sts mach,rn */
1646   { 0x001a, SETS1 | USESSP },			/* sts macl,rn */
1647   { 0x0023, BRANCH | DELAY | USES1 },		/* braf rn */
1648   { 0x0029, SETS1 | USESSP },			/* movt rn */
1649   { 0x002a, SETS1 | USESSP },			/* sts pr,rn */
1650   { 0x005a, SETS1 | USESSP },			/* sts fpul,rn */
1651   { 0x006a, SETS1 | USESSP },			/* sts fpscr,rn / sts dsr,rn */
1652   { 0x0083, LOAD | USES1 },			/* pref @rn */
1653   { 0x007a, SETS1 | USESSP },			/* sts a0,rn */
1654   { 0x008a, SETS1 | USESSP },			/* sts x0,rn */
1655   { 0x009a, SETS1 | USESSP },			/* sts x1,rn */
1656   { 0x00aa, SETS1 | USESSP },			/* sts y0,rn */
1657   { 0x00ba, SETS1 | USESSP }			/* sts y1,rn */
1658 };
1659 
1660 static const struct sh_opcode sh_opcode02[] =
1661 {
1662   { 0x0002, SETS1 | USESSP },			/* stc <special_reg>,rn */
1663   { 0x0004, STORE | USES1 | USES2 | USESR0 },	/* mov.b rm,@(r0,rn) */
1664   { 0x0005, STORE | USES1 | USES2 | USESR0 },	/* mov.w rm,@(r0,rn) */
1665   { 0x0006, STORE | USES1 | USES2 | USESR0 },	/* mov.l rm,@(r0,rn) */
1666   { 0x0007, SETSSP | USES1 | USES2 },		/* mul.l rm,rn */
1667   { 0x000c, LOAD | SETS1 | USES2 | USESR0 },	/* mov.b @(r0,rm),rn */
1668   { 0x000d, LOAD | SETS1 | USES2 | USESR0 },	/* mov.w @(r0,rm),rn */
1669   { 0x000e, LOAD | SETS1 | USES2 | USESR0 },	/* mov.l @(r0,rm),rn */
1670   { 0x000f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.l @rm+,@rn+ */
1671 };
1672 
1673 static const struct sh_minor_opcode sh_opcode0[] =
1674 {
1675   { MAP (sh_opcode00), 0xffff },
1676   { MAP (sh_opcode01), 0xf0ff },
1677   { MAP (sh_opcode02), 0xf00f }
1678 };
1679 
1680 static const struct sh_opcode sh_opcode10[] =
1681 {
1682   { 0x1000, STORE | USES1 | USES2 }	/* mov.l rm,@(disp,rn) */
1683 };
1684 
1685 static const struct sh_minor_opcode sh_opcode1[] =
1686 {
1687   { MAP (sh_opcode10), 0xf000 }
1688 };
1689 
1690 static const struct sh_opcode sh_opcode20[] =
1691 {
1692   { 0x2000, STORE | USES1 | USES2 },		/* mov.b rm,@rn */
1693   { 0x2001, STORE | USES1 | USES2 },		/* mov.w rm,@rn */
1694   { 0x2002, STORE | USES1 | USES2 },		/* mov.l rm,@rn */
1695   { 0x2004, STORE | SETS1 | USES1 | USES2 },	/* mov.b rm,@-rn */
1696   { 0x2005, STORE | SETS1 | USES1 | USES2 },	/* mov.w rm,@-rn */
1697   { 0x2006, STORE | SETS1 | USES1 | USES2 },	/* mov.l rm,@-rn */
1698   { 0x2007, SETSSP | USES1 | USES2 | USESSP },	/* div0s */
1699   { 0x2008, SETSSP | USES1 | USES2 },		/* tst rm,rn */
1700   { 0x2009, SETS1 | USES1 | USES2 },		/* and rm,rn */
1701   { 0x200a, SETS1 | USES1 | USES2 },		/* xor rm,rn */
1702   { 0x200b, SETS1 | USES1 | USES2 },		/* or rm,rn */
1703   { 0x200c, SETSSP | USES1 | USES2 },		/* cmp/str rm,rn */
1704   { 0x200d, SETS1 | USES1 | USES2 },		/* xtrct rm,rn */
1705   { 0x200e, SETSSP | USES1 | USES2 },		/* mulu.w rm,rn */
1706   { 0x200f, SETSSP | USES1 | USES2 }		/* muls.w rm,rn */
1707 };
1708 
1709 static const struct sh_minor_opcode sh_opcode2[] =
1710 {
1711   { MAP (sh_opcode20), 0xf00f }
1712 };
1713 
1714 static const struct sh_opcode sh_opcode30[] =
1715 {
1716   { 0x3000, SETSSP | USES1 | USES2 },		/* cmp/eq rm,rn */
1717   { 0x3002, SETSSP | USES1 | USES2 },		/* cmp/hs rm,rn */
1718   { 0x3003, SETSSP | USES1 | USES2 },		/* cmp/ge rm,rn */
1719   { 0x3004, SETSSP | USESSP | USES1 | USES2 },	/* div1 rm,rn */
1720   { 0x3005, SETSSP | USES1 | USES2 },		/* dmulu.l rm,rn */
1721   { 0x3006, SETSSP | USES1 | USES2 },		/* cmp/hi rm,rn */
1722   { 0x3007, SETSSP | USES1 | USES2 },		/* cmp/gt rm,rn */
1723   { 0x3008, SETS1 | USES1 | USES2 },		/* sub rm,rn */
1724   { 0x300a, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* subc rm,rn */
1725   { 0x300b, SETS1 | SETSSP | USES1 | USES2 },	/* subv rm,rn */
1726   { 0x300c, SETS1 | USES1 | USES2 },		/* add rm,rn */
1727   { 0x300d, SETSSP | USES1 | USES2 },		/* dmuls.l rm,rn */
1728   { 0x300e, SETS1 | SETSSP | USES1 | USES2 | USESSP }, /* addc rm,rn */
1729   { 0x300f, SETS1 | SETSSP | USES1 | USES2 }	/* addv rm,rn */
1730 };
1731 
1732 static const struct sh_minor_opcode sh_opcode3[] =
1733 {
1734   { MAP (sh_opcode30), 0xf00f }
1735 };
1736 
1737 static const struct sh_opcode sh_opcode40[] =
1738 {
1739   { 0x4000, SETS1 | SETSSP | USES1 },		/* shll rn */
1740   { 0x4001, SETS1 | SETSSP | USES1 },		/* shlr rn */
1741   { 0x4002, STORE | SETS1 | USES1 | USESSP },	/* sts.l mach,@-rn */
1742   { 0x4004, SETS1 | SETSSP | USES1 },		/* rotl rn */
1743   { 0x4005, SETS1 | SETSSP | USES1 },		/* rotr rn */
1744   { 0x4006, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,mach */
1745   { 0x4008, SETS1 | USES1 },			/* shll2 rn */
1746   { 0x4009, SETS1 | USES1 },			/* shlr2 rn */
1747   { 0x400a, SETSSP | USES1 },			/* lds rm,mach */
1748   { 0x400b, BRANCH | DELAY | USES1 },		/* jsr @rn */
1749   { 0x4010, SETS1 | SETSSP | USES1 },		/* dt rn */
1750   { 0x4011, SETSSP | USES1 },			/* cmp/pz rn */
1751   { 0x4012, STORE | SETS1 | USES1 | USESSP },	/* sts.l macl,@-rn */
1752   { 0x4014, SETSSP | USES1 },			/* setrc rm */
1753   { 0x4015, SETSSP | USES1 },			/* cmp/pl rn */
1754   { 0x4016, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,macl */
1755   { 0x4018, SETS1 | USES1 },			/* shll8 rn */
1756   { 0x4019, SETS1 | USES1 },			/* shlr8 rn */
1757   { 0x401a, SETSSP | USES1 },			/* lds rm,macl */
1758   { 0x401b, LOAD | SETSSP | USES1 },		/* tas.b @rn */
1759   { 0x4020, SETS1 | SETSSP | USES1 },		/* shal rn */
1760   { 0x4021, SETS1 | SETSSP | USES1 },		/* shar rn */
1761   { 0x4022, STORE | SETS1 | USES1 | USESSP },	/* sts.l pr,@-rn */
1762   { 0x4024, SETS1 | SETSSP | USES1 | USESSP },	/* rotcl rn */
1763   { 0x4025, SETS1 | SETSSP | USES1 | USESSP },	/* rotcr rn */
1764   { 0x4026, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,pr */
1765   { 0x4028, SETS1 | USES1 },			/* shll16 rn */
1766   { 0x4029, SETS1 | USES1 },			/* shlr16 rn */
1767   { 0x402a, SETSSP | USES1 },			/* lds rm,pr */
1768   { 0x402b, BRANCH | DELAY | USES1 },		/* jmp @rn */
1769   { 0x4052, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpul,@-rn */
1770   { 0x4056, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpul */
1771   { 0x405a, SETSSP | USES1 },			/* lds.l rm,fpul */
1772   { 0x4062, STORE | SETS1 | USES1 | USESSP },	/* sts.l fpscr / dsr,@-rn */
1773   { 0x4066, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,fpscr / dsr */
1774   { 0x406a, SETSSP | USES1 },			/* lds rm,fpscr / lds rm,dsr */
1775   { 0x4072, STORE | SETS1 | USES1 | USESSP },	/* sts.l a0,@-rn */
1776   { 0x4076, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,a0 */
1777   { 0x407a, SETSSP | USES1 },			/* lds.l rm,a0 */
1778   { 0x4082, STORE | SETS1 | USES1 | USESSP },	/* sts.l x0,@-rn */
1779   { 0x4086, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x0 */
1780   { 0x408a, SETSSP | USES1 },			/* lds.l rm,x0 */
1781   { 0x4092, STORE | SETS1 | USES1 | USESSP },	/* sts.l x1,@-rn */
1782   { 0x4096, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,x1 */
1783   { 0x409a, SETSSP | USES1 },			/* lds.l rm,x1 */
1784   { 0x40a2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y0,@-rn */
1785   { 0x40a6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y0 */
1786   { 0x40aa, SETSSP | USES1 },			/* lds.l rm,y0 */
1787   { 0x40b2, STORE | SETS1 | USES1 | USESSP },	/* sts.l y1,@-rn */
1788   { 0x40b6, LOAD | SETS1 | SETSSP | USES1 },	/* lds.l @rm+,y1 */
1789   { 0x40ba, SETSSP | USES1 }			/* lds.l rm,y1 */
1790 };
1791 
1792 static const struct sh_opcode sh_opcode41[] =
1793 {
1794   { 0x4003, STORE | SETS1 | USES1 | USESSP },	/* stc.l <special_reg>,@-rn */
1795   { 0x4007, LOAD | SETS1 | SETSSP | USES1 },	/* ldc.l @rm+,<special_reg> */
1796   { 0x400c, SETS1 | USES1 | USES2 },		/* shad rm,rn */
1797   { 0x400d, SETS1 | USES1 | USES2 },		/* shld rm,rn */
1798   { 0x400e, SETSSP | USES1 },			/* ldc rm,<special_reg> */
1799   { 0x400f, LOAD|SETS1|SETS2|SETSSP|USES1|USES2|USESSP }, /* mac.w @rm+,@rn+ */
1800 };
1801 
1802 static const struct sh_minor_opcode sh_opcode4[] =
1803 {
1804   { MAP (sh_opcode40), 0xf0ff },
1805   { MAP (sh_opcode41), 0xf00f }
1806 };
1807 
1808 static const struct sh_opcode sh_opcode50[] =
1809 {
1810   { 0x5000, LOAD | SETS1 | USES2 }	/* mov.l @(disp,rm),rn */
1811 };
1812 
1813 static const struct sh_minor_opcode sh_opcode5[] =
1814 {
1815   { MAP (sh_opcode50), 0xf000 }
1816 };
1817 
1818 static const struct sh_opcode sh_opcode60[] =
1819 {
1820   { 0x6000, LOAD | SETS1 | USES2 },		/* mov.b @rm,rn */
1821   { 0x6001, LOAD | SETS1 | USES2 },		/* mov.w @rm,rn */
1822   { 0x6002, LOAD | SETS1 | USES2 },		/* mov.l @rm,rn */
1823   { 0x6003, SETS1 | USES2 },			/* mov rm,rn */
1824   { 0x6004, LOAD | SETS1 | SETS2 | USES2 },	/* mov.b @rm+,rn */
1825   { 0x6005, LOAD | SETS1 | SETS2 | USES2 },	/* mov.w @rm+,rn */
1826   { 0x6006, LOAD | SETS1 | SETS2 | USES2 },	/* mov.l @rm+,rn */
1827   { 0x6007, SETS1 | USES2 },			/* not rm,rn */
1828   { 0x6008, SETS1 | USES2 },			/* swap.b rm,rn */
1829   { 0x6009, SETS1 | USES2 },			/* swap.w rm,rn */
1830   { 0x600a, SETS1 | SETSSP | USES2 | USESSP },	/* negc rm,rn */
1831   { 0x600b, SETS1 | USES2 },			/* neg rm,rn */
1832   { 0x600c, SETS1 | USES2 },			/* extu.b rm,rn */
1833   { 0x600d, SETS1 | USES2 },			/* extu.w rm,rn */
1834   { 0x600e, SETS1 | USES2 },			/* exts.b rm,rn */
1835   { 0x600f, SETS1 | USES2 }			/* exts.w rm,rn */
1836 };
1837 
1838 static const struct sh_minor_opcode sh_opcode6[] =
1839 {
1840   { MAP (sh_opcode60), 0xf00f }
1841 };
1842 
1843 static const struct sh_opcode sh_opcode70[] =
1844 {
1845   { 0x7000, SETS1 | USES1 }		/* add #imm,rn */
1846 };
1847 
1848 static const struct sh_minor_opcode sh_opcode7[] =
1849 {
1850   { MAP (sh_opcode70), 0xf000 }
1851 };
1852 
1853 static const struct sh_opcode sh_opcode80[] =
1854 {
1855   { 0x8000, STORE | USES2 | USESR0 },	/* mov.b r0,@(disp,rn) */
1856   { 0x8100, STORE | USES2 | USESR0 },	/* mov.w r0,@(disp,rn) */
1857   { 0x8200, SETSSP },			/* setrc #imm */
1858   { 0x8400, LOAD | SETSR0 | USES2 },	/* mov.b @(disp,rm),r0 */
1859   { 0x8500, LOAD | SETSR0 | USES2 },	/* mov.w @(disp,rn),r0 */
1860   { 0x8800, SETSSP | USESR0 },		/* cmp/eq #imm,r0 */
1861   { 0x8900, BRANCH | USESSP },		/* bt label */
1862   { 0x8b00, BRANCH | USESSP },		/* bf label */
1863   { 0x8c00, SETSSP },			/* ldrs @(disp,pc) */
1864   { 0x8d00, BRANCH | DELAY | USESSP },	/* bt/s label */
1865   { 0x8e00, SETSSP },			/* ldre @(disp,pc) */
1866   { 0x8f00, BRANCH | DELAY | USESSP }	/* bf/s label */
1867 };
1868 
1869 static const struct sh_minor_opcode sh_opcode8[] =
1870 {
1871   { MAP (sh_opcode80), 0xff00 }
1872 };
1873 
1874 static const struct sh_opcode sh_opcode90[] =
1875 {
1876   { 0x9000, LOAD | SETS1 }	/* mov.w @(disp,pc),rn */
1877 };
1878 
1879 static const struct sh_minor_opcode sh_opcode9[] =
1880 {
1881   { MAP (sh_opcode90), 0xf000 }
1882 };
1883 
1884 static const struct sh_opcode sh_opcodea0[] =
1885 {
1886   { 0xa000, BRANCH | DELAY }	/* bra label */
1887 };
1888 
1889 static const struct sh_minor_opcode sh_opcodea[] =
1890 {
1891   { MAP (sh_opcodea0), 0xf000 }
1892 };
1893 
1894 static const struct sh_opcode sh_opcodeb0[] =
1895 {
1896   { 0xb000, BRANCH | DELAY }	/* bsr label */
1897 };
1898 
1899 static const struct sh_minor_opcode sh_opcodeb[] =
1900 {
1901   { MAP (sh_opcodeb0), 0xf000 }
1902 };
1903 
1904 static const struct sh_opcode sh_opcodec0[] =
1905 {
1906   { 0xc000, STORE | USESR0 | USESSP },		/* mov.b r0,@(disp,gbr) */
1907   { 0xc100, STORE | USESR0 | USESSP },		/* mov.w r0,@(disp,gbr) */
1908   { 0xc200, STORE | USESR0 | USESSP },		/* mov.l r0,@(disp,gbr) */
1909   { 0xc300, BRANCH | USESSP },			/* trapa #imm */
1910   { 0xc400, LOAD | SETSR0 | USESSP },		/* mov.b @(disp,gbr),r0 */
1911   { 0xc500, LOAD | SETSR0 | USESSP },		/* mov.w @(disp,gbr),r0 */
1912   { 0xc600, LOAD | SETSR0 | USESSP },		/* mov.l @(disp,gbr),r0 */
1913   { 0xc700, SETSR0 },				/* mova @(disp,pc),r0 */
1914   { 0xc800, SETSSP | USESR0 },			/* tst #imm,r0 */
1915   { 0xc900, SETSR0 | USESR0 },			/* and #imm,r0 */
1916   { 0xca00, SETSR0 | USESR0 },			/* xor #imm,r0 */
1917   { 0xcb00, SETSR0 | USESR0 },			/* or #imm,r0 */
1918   { 0xcc00, LOAD | SETSSP | USESR0 | USESSP },	/* tst.b #imm,@(r0,gbr) */
1919   { 0xcd00, LOAD | STORE | USESR0 | USESSP },	/* and.b #imm,@(r0,gbr) */
1920   { 0xce00, LOAD | STORE | USESR0 | USESSP },	/* xor.b #imm,@(r0,gbr) */
1921   { 0xcf00, LOAD | STORE | USESR0 | USESSP }	/* or.b #imm,@(r0,gbr) */
1922 };
1923 
1924 static const struct sh_minor_opcode sh_opcodec[] =
1925 {
1926   { MAP (sh_opcodec0), 0xff00 }
1927 };
1928 
1929 static const struct sh_opcode sh_opcoded0[] =
1930 {
1931   { 0xd000, LOAD | SETS1 }		/* mov.l @(disp,pc),rn */
1932 };
1933 
1934 static const struct sh_minor_opcode sh_opcoded[] =
1935 {
1936   { MAP (sh_opcoded0), 0xf000 }
1937 };
1938 
1939 static const struct sh_opcode sh_opcodee0[] =
1940 {
1941   { 0xe000, SETS1 }		/* mov #imm,rn */
1942 };
1943 
1944 static const struct sh_minor_opcode sh_opcodee[] =
1945 {
1946   { MAP (sh_opcodee0), 0xf000 }
1947 };
1948 
1949 static const struct sh_opcode sh_opcodef0[] =
1950 {
1951   { 0xf000, SETSF1 | USESF1 | USESF2 },		/* fadd fm,fn */
1952   { 0xf001, SETSF1 | USESF1 | USESF2 },		/* fsub fm,fn */
1953   { 0xf002, SETSF1 | USESF1 | USESF2 },		/* fmul fm,fn */
1954   { 0xf003, SETSF1 | USESF1 | USESF2 },		/* fdiv fm,fn */
1955   { 0xf004, SETSSP | USESF1 | USESF2 },		/* fcmp/eq fm,fn */
1956   { 0xf005, SETSSP | USESF1 | USESF2 },		/* fcmp/gt fm,fn */
1957   { 0xf006, LOAD | SETSF1 | USES2 | USESR0 },	/* fmov.s @(r0,rm),fn */
1958   { 0xf007, STORE | USES1 | USESF2 | USESR0 },	/* fmov.s fm,@(r0,rn) */
1959   { 0xf008, LOAD | SETSF1 | USES2 },		/* fmov.s @rm,fn */
1960   { 0xf009, LOAD | SETS2 | SETSF1 | USES2 },	/* fmov.s @rm+,fn */
1961   { 0xf00a, STORE | USES1 | USESF2 },		/* fmov.s fm,@rn */
1962   { 0xf00b, STORE | SETS1 | USES1 | USESF2 },	/* fmov.s fm,@-rn */
1963   { 0xf00c, SETSF1 | USESF2 },			/* fmov fm,fn */
1964   { 0xf00e, SETSF1 | USESF1 | USESF2 | USESF0 }	/* fmac f0,fm,fn */
1965 };
1966 
1967 static const struct sh_opcode sh_opcodef1[] =
1968 {
1969   { 0xf00d, SETSF1 | USESSP },	/* fsts fpul,fn */
1970   { 0xf01d, SETSSP | USESF1 },	/* flds fn,fpul */
1971   { 0xf02d, SETSF1 | USESSP },	/* float fpul,fn */
1972   { 0xf03d, SETSSP | USESF1 },	/* ftrc fn,fpul */
1973   { 0xf04d, SETSF1 | USESF1 },	/* fneg fn */
1974   { 0xf05d, SETSF1 | USESF1 },	/* fabs fn */
1975   { 0xf06d, SETSF1 | USESF1 },	/* fsqrt fn */
1976   { 0xf07d, SETSSP | USESF1 },	/* ftst/nan fn */
1977   { 0xf08d, SETSF1 },		/* fldi0 fn */
1978   { 0xf09d, SETSF1 }		/* fldi1 fn */
1979 };
1980 
1981 static const struct sh_minor_opcode sh_opcodef[] =
1982 {
1983   { MAP (sh_opcodef0), 0xf00f },
1984   { MAP (sh_opcodef1), 0xf0ff }
1985 };
1986 
1987 static struct sh_major_opcode sh_opcodes[] =
1988 {
1989   { MAP (sh_opcode0) },
1990   { MAP (sh_opcode1) },
1991   { MAP (sh_opcode2) },
1992   { MAP (sh_opcode3) },
1993   { MAP (sh_opcode4) },
1994   { MAP (sh_opcode5) },
1995   { MAP (sh_opcode6) },
1996   { MAP (sh_opcode7) },
1997   { MAP (sh_opcode8) },
1998   { MAP (sh_opcode9) },
1999   { MAP (sh_opcodea) },
2000   { MAP (sh_opcodeb) },
2001   { MAP (sh_opcodec) },
2002   { MAP (sh_opcoded) },
2003   { MAP (sh_opcodee) },
2004   { MAP (sh_opcodef) }
2005 };
2006 
2007 /* The double data transfer / parallel processing insns are not
2008    described here.  This will cause sh_align_load_span to leave them alone.  */
2009 
2010 static const struct sh_opcode sh_dsp_opcodef0[] =
2011 {
2012   { 0xf400, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @-as,ds */
2013   { 0xf401, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@-as */
2014   { 0xf404, USESAS | LOAD | SETSSP },		/* movs.x @as,ds */
2015   { 0xf405, USESAS | STORE | USESSP },		/* movs.x ds,@as */
2016   { 0xf408, USESAS | SETSAS | LOAD | SETSSP },	/* movs.x @as+,ds */
2017   { 0xf409, USESAS | SETSAS | STORE | USESSP },	/* movs.x ds,@as+ */
2018   { 0xf40c, USESAS | SETSAS | LOAD | SETSSP | USESR8 },	/* movs.x @as+r8,ds */
2019   { 0xf40d, USESAS | SETSAS | STORE | USESSP | USESR8 }	/* movs.x ds,@as+r8 */
2020 };
2021 
2022 static const struct sh_minor_opcode sh_dsp_opcodef[] =
2023 {
2024   { MAP (sh_dsp_opcodef0), 0xfc0d }
2025 };
2026 
2027 /* Given an instruction, return a pointer to the corresponding
2028    sh_opcode structure.  Return NULL if the instruction is not
2029    recognized.  */
2030 
2031 static const struct sh_opcode *
2032 sh_insn_info (unsigned int insn)
2033 {
2034   const struct sh_major_opcode *maj;
2035   const struct sh_minor_opcode *min, *minend;
2036 
2037   maj = &sh_opcodes[(insn & 0xf000) >> 12];
2038   min = maj->minor_opcodes;
2039   minend = min + maj->count;
2040   for (; min < minend; min++)
2041     {
2042       unsigned int l;
2043       const struct sh_opcode *op, *opend;
2044 
2045       l = insn & min->mask;
2046       op = min->opcodes;
2047       opend = op + min->count;
2048 
2049       /* Since the opcodes tables are sorted, we could use a binary
2050 	 search here if the count were above some cutoff value.  */
2051       for (; op < opend; op++)
2052 	if (op->opcode == l)
2053 	  return op;
2054     }
2055 
2056   return NULL;
2057 }
2058 
2059 /* See whether an instruction uses a general purpose register.  */
2060 
2061 static bool
2062 sh_insn_uses_reg (unsigned int insn,
2063 		  const struct sh_opcode *op,
2064 		  unsigned int reg)
2065 {
2066   unsigned int f;
2067 
2068   f = op->flags;
2069 
2070   if ((f & USES1) != 0
2071       && USES1_REG (insn) == reg)
2072     return true;
2073   if ((f & USES2) != 0
2074       && USES2_REG (insn) == reg)
2075     return true;
2076   if ((f & USESR0) != 0
2077       && reg == 0)
2078     return true;
2079   if ((f & USESAS) && reg == USESAS_REG (insn))
2080     return true;
2081   if ((f & USESR8) && reg == 8)
2082     return true;
2083 
2084   return false;
2085 }
2086 
2087 /* See whether an instruction sets a general purpose register.  */
2088 
2089 static bool
2090 sh_insn_sets_reg (unsigned int insn,
2091 		  const struct sh_opcode *op,
2092 		  unsigned int reg)
2093 {
2094   unsigned int f;
2095 
2096   f = op->flags;
2097 
2098   if ((f & SETS1) != 0
2099       && SETS1_REG (insn) == reg)
2100     return true;
2101   if ((f & SETS2) != 0
2102       && SETS2_REG (insn) == reg)
2103     return true;
2104   if ((f & SETSR0) != 0
2105       && reg == 0)
2106     return true;
2107   if ((f & SETSAS) && reg == SETSAS_REG (insn))
2108     return true;
2109 
2110   return false;
2111 }
2112 
2113 /* See whether an instruction uses or sets a general purpose register */
2114 
2115 static bool
2116 sh_insn_uses_or_sets_reg (unsigned int insn,
2117 			  const struct sh_opcode *op,
2118 			  unsigned int reg)
2119 {
2120   if (sh_insn_uses_reg (insn, op, reg))
2121     return true;
2122 
2123   return sh_insn_sets_reg (insn, op, reg);
2124 }
2125 
2126 /* See whether an instruction uses a floating point register.  */
2127 
2128 static bool
2129 sh_insn_uses_freg (unsigned int insn,
2130 		   const struct sh_opcode *op,
2131 		   unsigned int freg)
2132 {
2133   unsigned int f;
2134 
2135   f = op->flags;
2136 
2137   /* We can't tell if this is a double-precision insn, so just play safe
2138      and assume that it might be.  So not only have we test FREG against
2139      itself, but also even FREG against FREG+1 - if the using insn uses
2140      just the low part of a double precision value - but also an odd
2141      FREG against FREG-1 -  if the setting insn sets just the low part
2142      of a double precision value.
2143      So what this all boils down to is that we have to ignore the lowest
2144      bit of the register number.  */
2145 
2146   if ((f & USESF1) != 0
2147       && (USESF1_REG (insn) & 0xe) == (freg & 0xe))
2148     return true;
2149   if ((f & USESF2) != 0
2150       && (USESF2_REG (insn) & 0xe) == (freg & 0xe))
2151     return true;
2152   if ((f & USESF0) != 0
2153       && freg == 0)
2154     return true;
2155 
2156   return false;
2157 }
2158 
2159 /* See whether an instruction sets a floating point register.  */
2160 
2161 static bool
2162 sh_insn_sets_freg (unsigned int insn,
2163 		   const struct sh_opcode *op,
2164 		   unsigned int freg)
2165 {
2166   unsigned int f;
2167 
2168   f = op->flags;
2169 
2170   /* We can't tell if this is a double-precision insn, so just play safe
2171      and assume that it might be.  So not only have we test FREG against
2172      itself, but also even FREG against FREG+1 - if the using insn uses
2173      just the low part of a double precision value - but also an odd
2174      FREG against FREG-1 -  if the setting insn sets just the low part
2175      of a double precision value.
2176      So what this all boils down to is that we have to ignore the lowest
2177      bit of the register number.  */
2178 
2179   if ((f & SETSF1) != 0
2180       && (SETSF1_REG (insn) & 0xe) == (freg & 0xe))
2181     return true;
2182 
2183   return false;
2184 }
2185 
2186 /* See whether an instruction uses or sets a floating point register */
2187 
2188 static bool
2189 sh_insn_uses_or_sets_freg (unsigned int insn,
2190 			   const struct sh_opcode *op,
2191 			   unsigned int reg)
2192 {
2193   if (sh_insn_uses_freg (insn, op, reg))
2194     return true;
2195 
2196   return sh_insn_sets_freg (insn, op, reg);
2197 }
2198 
2199 /* See whether instructions I1 and I2 conflict, assuming I1 comes
2200    before I2.  OP1 and OP2 are the corresponding sh_opcode structures.
2201    This should return TRUE if there is a conflict, or FALSE if the
2202    instructions can be swapped safely.  */
2203 
2204 static bool
2205 sh_insns_conflict (unsigned int i1,
2206 		   const struct sh_opcode *op1,
2207 		   unsigned int i2,
2208 		   const struct sh_opcode *op2)
2209 {
2210   unsigned int f1, f2;
2211 
2212   f1 = op1->flags;
2213   f2 = op2->flags;
2214 
2215   /* Load of fpscr conflicts with floating point operations.
2216      FIXME: shouldn't test raw opcodes here.  */
2217   if (((i1 & 0xf0ff) == 0x4066 && (i2 & 0xf000) == 0xf000)
2218       || ((i2 & 0xf0ff) == 0x4066 && (i1 & 0xf000) == 0xf000))
2219     return true;
2220 
2221   if ((f1 & (BRANCH | DELAY)) != 0
2222       || (f2 & (BRANCH | DELAY)) != 0)
2223     return true;
2224 
2225   if (((f1 | f2) & SETSSP)
2226       && (f1 & (SETSSP | USESSP))
2227       && (f2 & (SETSSP | USESSP)))
2228     return true;
2229 
2230   if ((f1 & SETS1) != 0
2231       && sh_insn_uses_or_sets_reg (i2, op2, SETS1_REG (i1)))
2232     return true;
2233   if ((f1 & SETS2) != 0
2234       && sh_insn_uses_or_sets_reg (i2, op2, SETS2_REG (i1)))
2235     return true;
2236   if ((f1 & SETSR0) != 0
2237       && sh_insn_uses_or_sets_reg (i2, op2, 0))
2238     return true;
2239   if ((f1 & SETSAS)
2240       && sh_insn_uses_or_sets_reg (i2, op2, SETSAS_REG (i1)))
2241     return true;
2242   if ((f1 & SETSF1) != 0
2243       && sh_insn_uses_or_sets_freg (i2, op2, SETSF1_REG (i1)))
2244     return true;
2245 
2246   if ((f2 & SETS1) != 0
2247       && sh_insn_uses_or_sets_reg (i1, op1, SETS1_REG (i2)))
2248     return true;
2249   if ((f2 & SETS2) != 0
2250       && sh_insn_uses_or_sets_reg (i1, op1, SETS2_REG (i2)))
2251     return true;
2252   if ((f2 & SETSR0) != 0
2253       && sh_insn_uses_or_sets_reg (i1, op1, 0))
2254     return true;
2255   if ((f2 & SETSAS)
2256       && sh_insn_uses_or_sets_reg (i1, op1, SETSAS_REG (i2)))
2257     return true;
2258   if ((f2 & SETSF1) != 0
2259       && sh_insn_uses_or_sets_freg (i1, op1, SETSF1_REG (i2)))
2260     return true;
2261 
2262   /* The instructions do not conflict.  */
2263   return false;
2264 }
2265 
2266 /* I1 is a load instruction, and I2 is some other instruction.  Return
2267    TRUE if I1 loads a register which I2 uses.  */
2268 
2269 static bool
2270 sh_load_use (unsigned int i1,
2271 	     const struct sh_opcode *op1,
2272 	     unsigned int i2,
2273 	     const struct sh_opcode *op2)
2274 {
2275   unsigned int f1;
2276 
2277   f1 = op1->flags;
2278 
2279   if ((f1 & LOAD) == 0)
2280     return false;
2281 
2282   /* If both SETS1 and SETSSP are set, that means a load to a special
2283      register using postincrement addressing mode, which we don't care
2284      about here.  */
2285   if ((f1 & SETS1) != 0
2286       && (f1 & SETSSP) == 0
2287       && sh_insn_uses_reg (i2, op2, (i1 & 0x0f00) >> 8))
2288     return true;
2289 
2290   if ((f1 & SETSR0) != 0
2291       && sh_insn_uses_reg (i2, op2, 0))
2292     return true;
2293 
2294   if ((f1 & SETSF1) != 0
2295       && sh_insn_uses_freg (i2, op2, (i1 & 0x0f00) >> 8))
2296     return true;
2297 
2298   return false;
2299 }
2300 
2301 /* Try to align loads and stores within a span of memory.  This is
2302    called by both the ELF and the COFF sh targets.  ABFD and SEC are
2303    the BFD and section we are examining.  CONTENTS is the contents of
2304    the section.  SWAP is the routine to call to swap two instructions.
2305    RELOCS is a pointer to the internal relocation information, to be
2306    passed to SWAP.  PLABEL is a pointer to the current label in a
2307    sorted list of labels; LABEL_END is the end of the list.  START and
2308    STOP are the range of memory to examine.  If a swap is made,
2309    *PSWAPPED is set to TRUE.  */
2310 
2311 #ifdef COFF_WITH_PE
2312 static
2313 #endif
2314 bool
2315 _bfd_sh_align_load_span (bfd *abfd,
2316 			 asection *sec,
2317 			 bfd_byte *contents,
2318 			 bool (*swap) (bfd *, asection *, void *, bfd_byte *, bfd_vma),
2319 			 void * relocs,
2320 			 bfd_vma **plabel,
2321 			 bfd_vma *label_end,
2322 			 bfd_vma start,
2323 			 bfd_vma stop,
2324 			 bool *pswapped)
2325 {
2326   int dsp = (abfd->arch_info->mach == bfd_mach_sh_dsp
2327 	     || abfd->arch_info->mach == bfd_mach_sh3_dsp);
2328   bfd_vma i;
2329 
2330   /* The SH4 has a Harvard architecture, hence aligning loads is not
2331      desirable.  In fact, it is counter-productive, since it interferes
2332      with the schedules generated by the compiler.  */
2333   if (abfd->arch_info->mach == bfd_mach_sh4)
2334     return true;
2335 
2336   /* If we are linking sh[3]-dsp code, swap the FPU instructions for DSP
2337      instructions.  */
2338   if (dsp)
2339     {
2340       sh_opcodes[0xf].minor_opcodes = sh_dsp_opcodef;
2341       sh_opcodes[0xf].count = sizeof sh_dsp_opcodef / sizeof sh_dsp_opcodef [0];
2342     }
2343 
2344   /* Instructions should be aligned on 2 byte boundaries.  */
2345   if ((start & 1) == 1)
2346     ++start;
2347 
2348   /* Now look through the unaligned addresses.  */
2349   i = start;
2350   if ((i & 2) == 0)
2351     i += 2;
2352   for (; i < stop; i += 4)
2353     {
2354       unsigned int insn;
2355       const struct sh_opcode *op;
2356       unsigned int prev_insn = 0;
2357       const struct sh_opcode *prev_op = NULL;
2358 
2359       insn = bfd_get_16 (abfd, contents + i);
2360       op = sh_insn_info (insn);
2361       if (op == NULL
2362 	  || (op->flags & (LOAD | STORE)) == 0)
2363 	continue;
2364 
2365       /* This is a load or store which is not on a four byte boundary.  */
2366 
2367       while (*plabel < label_end && **plabel < i)
2368 	++*plabel;
2369 
2370       if (i > start)
2371 	{
2372 	  prev_insn = bfd_get_16 (abfd, contents + i - 2);
2373 	  /* If INSN is the field b of a parallel processing insn, it is not
2374 	     a load / store after all.  Note that the test here might mistake
2375 	     the field_b of a pcopy insn for the starting code of a parallel
2376 	     processing insn; this might miss a swapping opportunity, but at
2377 	     least we're on the safe side.  */
2378 	  if (dsp && (prev_insn & 0xfc00) == 0xf800)
2379 	    continue;
2380 
2381 	  /* Check if prev_insn is actually the field b of a parallel
2382 	     processing insn.  Again, this can give a spurious match
2383 	     after a pcopy.  */
2384 	  if (dsp && i - 2 > start)
2385 	    {
2386 	      unsigned pprev_insn = bfd_get_16 (abfd, contents + i - 4);
2387 
2388 	      if ((pprev_insn & 0xfc00) == 0xf800)
2389 		prev_op = NULL;
2390 	      else
2391 		prev_op = sh_insn_info (prev_insn);
2392 	    }
2393 	  else
2394 	    prev_op = sh_insn_info (prev_insn);
2395 
2396 	  /* If the load/store instruction is in a delay slot, we
2397 	     can't swap.  */
2398 	  if (prev_op == NULL
2399 	      || (prev_op->flags & DELAY) != 0)
2400 	    continue;
2401 	}
2402       if (i > start
2403 	  && (*plabel >= label_end || **plabel != i)
2404 	  && prev_op != NULL
2405 	  && (prev_op->flags & (LOAD | STORE)) == 0
2406 	  && ! sh_insns_conflict (prev_insn, prev_op, insn, op))
2407 	{
2408 	  bool ok;
2409 
2410 	  /* The load/store instruction does not have a label, and
2411 	     there is a previous instruction; PREV_INSN is not
2412 	     itself a load/store instruction, and PREV_INSN and
2413 	     INSN do not conflict.  */
2414 
2415 	  ok = true;
2416 
2417 	  if (i >= start + 4)
2418 	    {
2419 	      unsigned int prev2_insn;
2420 	      const struct sh_opcode *prev2_op;
2421 
2422 	      prev2_insn = bfd_get_16 (abfd, contents + i - 4);
2423 	      prev2_op = sh_insn_info (prev2_insn);
2424 
2425 	      /* If the instruction before PREV_INSN has a delay
2426 		 slot--that is, PREV_INSN is in a delay slot--we
2427 		 can not swap.  */
2428 	      if (prev2_op == NULL
2429 		  || (prev2_op->flags & DELAY) != 0)
2430 		ok = false;
2431 
2432 	      /* If the instruction before PREV_INSN is a load,
2433 		 and it sets a register which INSN uses, then
2434 		 putting INSN immediately after PREV_INSN will
2435 		 cause a pipeline bubble, so there is no point to
2436 		 making the swap.  */
2437 	      if (ok
2438 		  && (prev2_op->flags & LOAD) != 0
2439 		  && sh_load_use (prev2_insn, prev2_op, insn, op))
2440 		ok = false;
2441 	    }
2442 
2443 	  if (ok)
2444 	    {
2445 	      if (! (*swap) (abfd, sec, relocs, contents, i - 2))
2446 		return false;
2447 	      *pswapped = true;
2448 	      continue;
2449 	    }
2450 	}
2451 
2452       while (*plabel < label_end && **plabel < i + 2)
2453 	++*plabel;
2454 
2455       if (i + 2 < stop
2456 	  && (*plabel >= label_end || **plabel != i + 2))
2457 	{
2458 	  unsigned int next_insn;
2459 	  const struct sh_opcode *next_op;
2460 
2461 	  /* There is an instruction after the load/store
2462 	     instruction, and it does not have a label.  */
2463 	  next_insn = bfd_get_16 (abfd, contents + i + 2);
2464 	  next_op = sh_insn_info (next_insn);
2465 	  if (next_op != NULL
2466 	      && (next_op->flags & (LOAD | STORE)) == 0
2467 	      && ! sh_insns_conflict (insn, op, next_insn, next_op))
2468 	    {
2469 	      bool ok;
2470 
2471 	      /* NEXT_INSN is not itself a load/store instruction,
2472 		 and it does not conflict with INSN.  */
2473 
2474 	      ok = true;
2475 
2476 	      /* If PREV_INSN is a load, and it sets a register
2477 		 which NEXT_INSN uses, then putting NEXT_INSN
2478 		 immediately after PREV_INSN will cause a pipeline
2479 		 bubble, so there is no reason to make this swap.  */
2480 	      if (prev_op != NULL
2481 		  && (prev_op->flags & LOAD) != 0
2482 		  && sh_load_use (prev_insn, prev_op, next_insn, next_op))
2483 		ok = false;
2484 
2485 	      /* If INSN is a load, and it sets a register which
2486 		 the insn after NEXT_INSN uses, then doing the
2487 		 swap will cause a pipeline bubble, so there is no
2488 		 reason to make the swap.  However, if the insn
2489 		 after NEXT_INSN is itself a load or store
2490 		 instruction, then it is misaligned, so
2491 		 optimistically hope that it will be swapped
2492 		 itself, and just live with the pipeline bubble if
2493 		 it isn't.  */
2494 	      if (ok
2495 		  && i + 4 < stop
2496 		  && (op->flags & LOAD) != 0)
2497 		{
2498 		  unsigned int next2_insn;
2499 		  const struct sh_opcode *next2_op;
2500 
2501 		  next2_insn = bfd_get_16 (abfd, contents + i + 4);
2502 		  next2_op = sh_insn_info (next2_insn);
2503 		  if (next2_op == NULL
2504 		      || ((next2_op->flags & (LOAD | STORE)) == 0
2505 			  && sh_load_use (insn, op, next2_insn, next2_op)))
2506 		    ok = false;
2507 		}
2508 
2509 	      if (ok)
2510 		{
2511 		  if (! (*swap) (abfd, sec, relocs, contents, i))
2512 		    return false;
2513 		  *pswapped = true;
2514 		  continue;
2515 		}
2516 	    }
2517 	}
2518     }
2519 
2520   return true;
2521 }
2522 #endif /* not COFF_IMAGE_WITH_PE */
2523 
2524 /* Swap two SH instructions.  */
2525 
2526 static bool
2527 sh_swap_insns (bfd *      abfd,
2528 	       asection * sec,
2529 	       void *     relocs,
2530 	       bfd_byte * contents,
2531 	       bfd_vma    addr)
2532 {
2533   struct internal_reloc *internal_relocs = (struct internal_reloc *) relocs;
2534   unsigned short i1, i2;
2535   struct internal_reloc *irel, *irelend;
2536 
2537   /* Swap the instructions themselves.  */
2538   i1 = bfd_get_16 (abfd, contents + addr);
2539   i2 = bfd_get_16 (abfd, contents + addr + 2);
2540   bfd_put_16 (abfd, (bfd_vma) i2, contents + addr);
2541   bfd_put_16 (abfd, (bfd_vma) i1, contents + addr + 2);
2542 
2543   /* Adjust all reloc addresses.  */
2544   irelend = internal_relocs + sec->reloc_count;
2545   for (irel = internal_relocs; irel < irelend; irel++)
2546     {
2547       int type, add;
2548 
2549       /* There are a few special types of relocs that we don't want to
2550 	 adjust.  These relocs do not apply to the instruction itself,
2551 	 but are only associated with the address.  */
2552       type = irel->r_type;
2553       if (type == R_SH_ALIGN
2554 	  || type == R_SH_CODE
2555 	  || type == R_SH_DATA
2556 	  || type == R_SH_LABEL)
2557 	continue;
2558 
2559       /* If an R_SH_USES reloc points to one of the addresses being
2560 	 swapped, we must adjust it.  It would be incorrect to do this
2561 	 for a jump, though, since we want to execute both
2562 	 instructions after the jump.  (We have avoided swapping
2563 	 around a label, so the jump will not wind up executing an
2564 	 instruction it shouldn't).  */
2565       if (type == R_SH_USES)
2566 	{
2567 	  bfd_vma off;
2568 
2569 	  off = irel->r_vaddr - sec->vma + 4 + irel->r_offset;
2570 	  if (off == addr)
2571 	    irel->r_offset += 2;
2572 	  else if (off == addr + 2)
2573 	    irel->r_offset -= 2;
2574 	}
2575 
2576       if (irel->r_vaddr - sec->vma == addr)
2577 	{
2578 	  irel->r_vaddr += 2;
2579 	  add = -2;
2580 	}
2581       else if (irel->r_vaddr - sec->vma == addr + 2)
2582 	{
2583 	  irel->r_vaddr -= 2;
2584 	  add = 2;
2585 	}
2586       else
2587 	add = 0;
2588 
2589       if (add != 0)
2590 	{
2591 	  bfd_byte *loc;
2592 	  unsigned short insn, oinsn;
2593 	  bool overflow;
2594 
2595 	  loc = contents + irel->r_vaddr - sec->vma;
2596 	  overflow = false;
2597 	  switch (type)
2598 	    {
2599 	    default:
2600 	      break;
2601 
2602 	    case R_SH_PCDISP8BY2:
2603 	    case R_SH_PCRELIMM8BY2:
2604 	      insn = bfd_get_16 (abfd, loc);
2605 	      oinsn = insn;
2606 	      insn += add / 2;
2607 	      if ((oinsn & 0xff00) != (insn & 0xff00))
2608 		overflow = true;
2609 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2610 	      break;
2611 
2612 	    case R_SH_PCDISP:
2613 	      insn = bfd_get_16 (abfd, loc);
2614 	      oinsn = insn;
2615 	      insn += add / 2;
2616 	      if ((oinsn & 0xf000) != (insn & 0xf000))
2617 		overflow = true;
2618 	      bfd_put_16 (abfd, (bfd_vma) insn, loc);
2619 	      break;
2620 
2621 	    case R_SH_PCRELIMM8BY4:
2622 	      /* This reloc ignores the least significant 3 bits of
2623 		 the program counter before adding in the offset.
2624 		 This means that if ADDR is at an even address, the
2625 		 swap will not affect the offset.  If ADDR is an at an
2626 		 odd address, then the instruction will be crossing a
2627 		 four byte boundary, and must be adjusted.  */
2628 	      if ((addr & 3) != 0)
2629 		{
2630 		  insn = bfd_get_16 (abfd, loc);
2631 		  oinsn = insn;
2632 		  insn += add / 2;
2633 		  if ((oinsn & 0xff00) != (insn & 0xff00))
2634 		    overflow = true;
2635 		  bfd_put_16 (abfd, (bfd_vma) insn, loc);
2636 		}
2637 
2638 	      break;
2639 	    }
2640 
2641 	  if (overflow)
2642 	    {
2643 	      _bfd_error_handler
2644 		/* xgettext: c-format */
2645 		(_("%pB: %#" PRIx64 ": fatal: reloc overflow while relaxing"),
2646 		 abfd, (uint64_t) irel->r_vaddr);
2647 	      bfd_set_error (bfd_error_bad_value);
2648 	      return false;
2649 	    }
2650 	}
2651     }
2652 
2653   return true;
2654 }
2655 
2656 /* Look for loads and stores which we can align to four byte
2657    boundaries.  See the longer comment above sh_relax_section for why
2658    this is desirable.  This sets *PSWAPPED if some instruction was
2659    swapped.  */
2660 
2661 static bool
2662 sh_align_loads (bfd *abfd,
2663 		asection *sec,
2664 		struct internal_reloc *internal_relocs,
2665 		bfd_byte *contents,
2666 		bool *pswapped)
2667 {
2668   struct internal_reloc *irel, *irelend;
2669   bfd_vma *labels = NULL;
2670   bfd_vma *label, *label_end;
2671   bfd_size_type amt;
2672 
2673   *pswapped = false;
2674 
2675   irelend = internal_relocs + sec->reloc_count;
2676 
2677   /* Get all the addresses with labels on them.  */
2678   amt = (bfd_size_type) sec->reloc_count * sizeof (bfd_vma);
2679   labels = (bfd_vma *) bfd_malloc (amt);
2680   if (labels == NULL)
2681     goto error_return;
2682   label_end = labels;
2683   for (irel = internal_relocs; irel < irelend; irel++)
2684     {
2685       if (irel->r_type == R_SH_LABEL)
2686 	{
2687 	  *label_end = irel->r_vaddr - sec->vma;
2688 	  ++label_end;
2689 	}
2690     }
2691 
2692   /* Note that the assembler currently always outputs relocs in
2693      address order.  If that ever changes, this code will need to sort
2694      the label values and the relocs.  */
2695 
2696   label = labels;
2697 
2698   for (irel = internal_relocs; irel < irelend; irel++)
2699     {
2700       bfd_vma start, stop;
2701 
2702       if (irel->r_type != R_SH_CODE)
2703 	continue;
2704 
2705       start = irel->r_vaddr - sec->vma;
2706 
2707       for (irel++; irel < irelend; irel++)
2708 	if (irel->r_type == R_SH_DATA)
2709 	  break;
2710       if (irel < irelend)
2711 	stop = irel->r_vaddr - sec->vma;
2712       else
2713 	stop = sec->size;
2714 
2715       if (! _bfd_sh_align_load_span (abfd, sec, contents, sh_swap_insns,
2716 				     internal_relocs, &label,
2717 				     label_end, start, stop, pswapped))
2718 	goto error_return;
2719     }
2720 
2721   free (labels);
2722 
2723   return true;
2724 
2725  error_return:
2726   free (labels);
2727   return false;
2728 }
2729 
2730 /* This is a modification of _bfd_coff_generic_relocate_section, which
2731    will handle SH relaxing.  */
2732 
2733 static bool
2734 sh_relocate_section (bfd *output_bfd ATTRIBUTE_UNUSED,
2735 		     struct bfd_link_info *info,
2736 		     bfd *input_bfd,
2737 		     asection *input_section,
2738 		     bfd_byte *contents,
2739 		     struct internal_reloc *relocs,
2740 		     struct internal_syment *syms,
2741 		     asection **sections)
2742 {
2743   struct internal_reloc *rel;
2744   struct internal_reloc *relend;
2745 
2746   rel = relocs;
2747   relend = rel + input_section->reloc_count;
2748   for (; rel < relend; rel++)
2749     {
2750       long symndx;
2751       struct coff_link_hash_entry *h;
2752       struct internal_syment *sym;
2753       bfd_vma addend;
2754       bfd_vma val;
2755       reloc_howto_type *howto;
2756       bfd_reloc_status_type rstat;
2757 
2758       /* Almost all relocs have to do with relaxing.  If any work must
2759 	 be done for them, it has been done in sh_relax_section.  */
2760       if (rel->r_type != R_SH_IMM32
2761 #ifdef COFF_WITH_PE
2762 	  && rel->r_type != R_SH_IMM32CE
2763 	  && rel->r_type != R_SH_IMAGEBASE
2764 #endif
2765 	  && rel->r_type != R_SH_PCDISP)
2766 	continue;
2767 
2768       symndx = rel->r_symndx;
2769 
2770       if (symndx == -1)
2771 	{
2772 	  h = NULL;
2773 	  sym = NULL;
2774 	}
2775       else
2776 	{
2777 	  if (symndx < 0
2778 	      || (unsigned long) symndx >= obj_raw_syment_count (input_bfd))
2779 	    {
2780 	      _bfd_error_handler
2781 		/* xgettext: c-format */
2782 		(_("%pB: illegal symbol index %ld in relocs"),
2783 		 input_bfd, symndx);
2784 	      bfd_set_error (bfd_error_bad_value);
2785 	      return false;
2786 	    }
2787 	  h = obj_coff_sym_hashes (input_bfd)[symndx];
2788 	  sym = syms + symndx;
2789 	}
2790 
2791       if (sym != NULL && sym->n_scnum != 0)
2792 	addend = - sym->n_value;
2793       else
2794 	addend = 0;
2795 
2796       if (rel->r_type == R_SH_PCDISP)
2797 	addend -= 4;
2798 
2799       if (rel->r_type >= SH_COFF_HOWTO_COUNT)
2800 	howto = NULL;
2801       else
2802 	howto = &sh_coff_howtos[rel->r_type];
2803 
2804       if (howto == NULL)
2805 	{
2806 	  bfd_set_error (bfd_error_bad_value);
2807 	  return false;
2808 	}
2809 
2810 #ifdef COFF_WITH_PE
2811       if (rel->r_type == R_SH_IMAGEBASE)
2812 	addend -= pe_data (input_section->output_section->owner)->pe_opthdr.ImageBase;
2813 #endif
2814 
2815       val = 0;
2816 
2817       if (h == NULL)
2818 	{
2819 	  asection *sec;
2820 
2821 	  /* There is nothing to do for an internal PCDISP reloc.  */
2822 	  if (rel->r_type == R_SH_PCDISP)
2823 	    continue;
2824 
2825 	  if (symndx == -1)
2826 	    {
2827 	      sec = bfd_abs_section_ptr;
2828 	      val = 0;
2829 	    }
2830 	  else
2831 	    {
2832 	      sec = sections[symndx];
2833 	      val = (sec->output_section->vma
2834 		     + sec->output_offset
2835 		     + sym->n_value
2836 		     - sec->vma);
2837 	    }
2838 	}
2839       else
2840 	{
2841 	  if (h->root.type == bfd_link_hash_defined
2842 	      || h->root.type == bfd_link_hash_defweak)
2843 	    {
2844 	      asection *sec;
2845 
2846 	      sec = h->root.u.def.section;
2847 	      val = (h->root.u.def.value
2848 		     + sec->output_section->vma
2849 		     + sec->output_offset);
2850 	    }
2851 	  else if (! bfd_link_relocatable (info))
2852 	    (*info->callbacks->undefined_symbol)
2853 	      (info, h->root.root.string, input_bfd, input_section,
2854 	       rel->r_vaddr - input_section->vma, true);
2855 	}
2856 
2857       rstat = _bfd_final_link_relocate (howto, input_bfd, input_section,
2858 					contents,
2859 					rel->r_vaddr - input_section->vma,
2860 					val, addend);
2861 
2862       switch (rstat)
2863 	{
2864 	default:
2865 	  abort ();
2866 	case bfd_reloc_ok:
2867 	  break;
2868 	case bfd_reloc_overflow:
2869 	  {
2870 	    const char *name;
2871 	    char buf[SYMNMLEN + 1];
2872 
2873 	    if (symndx == -1)
2874 	      name = "*ABS*";
2875 	    else if (h != NULL)
2876 	      name = NULL;
2877 	    else if (sym->_n._n_n._n_zeroes == 0
2878 		     && sym->_n._n_n._n_offset != 0)
2879 	      name = obj_coff_strings (input_bfd) + sym->_n._n_n._n_offset;
2880 	    else
2881 	      {
2882 		strncpy (buf, sym->_n._n_name, SYMNMLEN);
2883 		buf[SYMNMLEN] = '\0';
2884 		name = buf;
2885 	      }
2886 
2887 	    (*info->callbacks->reloc_overflow)
2888 	      (info, (h ? &h->root : NULL), name, howto->name,
2889 	       (bfd_vma) 0, input_bfd, input_section,
2890 	       rel->r_vaddr - input_section->vma);
2891 	  }
2892 	}
2893     }
2894 
2895   return true;
2896 }
2897 
2898 /* This is a version of bfd_generic_get_relocated_section_contents
2899    which uses sh_relocate_section.  */
2900 
2901 static bfd_byte *
2902 sh_coff_get_relocated_section_contents (bfd *output_bfd,
2903 					struct bfd_link_info *link_info,
2904 					struct bfd_link_order *link_order,
2905 					bfd_byte *data,
2906 					bool relocatable,
2907 					asymbol **symbols)
2908 {
2909   asection *input_section = link_order->u.indirect.section;
2910   bfd *input_bfd = input_section->owner;
2911   asection **sections = NULL;
2912   struct internal_reloc *internal_relocs = NULL;
2913   struct internal_syment *internal_syms = NULL;
2914 
2915   /* We only need to handle the case of relaxing, or of having a
2916      particular set of section contents, specially.  */
2917   if (relocatable
2918       || coff_section_data (input_bfd, input_section) == NULL
2919       || coff_section_data (input_bfd, input_section)->contents == NULL)
2920     return bfd_generic_get_relocated_section_contents (output_bfd, link_info,
2921 						       link_order, data,
2922 						       relocatable,
2923 						       symbols);
2924 
2925   bfd_byte *orig_data = data;
2926   if (data == NULL)
2927     {
2928       data = bfd_malloc (input_section->size);
2929       if (data == NULL)
2930 	return NULL;
2931     }
2932   memcpy (data, coff_section_data (input_bfd, input_section)->contents,
2933 	  (size_t) input_section->size);
2934 
2935   if ((input_section->flags & SEC_RELOC) != 0
2936       && input_section->reloc_count > 0)
2937     {
2938       bfd_size_type symesz = bfd_coff_symesz (input_bfd);
2939       bfd_byte *esym, *esymend;
2940       struct internal_syment *isymp;
2941       asection **secpp;
2942       bfd_size_type amt;
2943 
2944       if (! _bfd_coff_get_external_symbols (input_bfd))
2945 	goto error_return;
2946 
2947       internal_relocs = (_bfd_coff_read_internal_relocs
2948 			 (input_bfd, input_section, false, (bfd_byte *) NULL,
2949 			  false, (struct internal_reloc *) NULL));
2950       if (internal_relocs == NULL)
2951 	goto error_return;
2952 
2953       amt = obj_raw_syment_count (input_bfd);
2954       amt *= sizeof (struct internal_syment);
2955       internal_syms = (struct internal_syment *) bfd_malloc (amt);
2956       if (internal_syms == NULL)
2957 	goto error_return;
2958 
2959       amt = obj_raw_syment_count (input_bfd);
2960       amt *= sizeof (asection *);
2961       sections = (asection **) bfd_malloc (amt);
2962       if (sections == NULL)
2963 	goto error_return;
2964 
2965       isymp = internal_syms;
2966       secpp = sections;
2967       esym = (bfd_byte *) obj_coff_external_syms (input_bfd);
2968       esymend = esym + obj_raw_syment_count (input_bfd) * symesz;
2969       while (esym < esymend)
2970 	{
2971 	  bfd_coff_swap_sym_in (input_bfd, esym, isymp);
2972 
2973 	  if (isymp->n_scnum != 0)
2974 	    *secpp = coff_section_from_bfd_index (input_bfd, isymp->n_scnum);
2975 	  else
2976 	    {
2977 	      if (isymp->n_value == 0)
2978 		*secpp = bfd_und_section_ptr;
2979 	      else
2980 		*secpp = bfd_com_section_ptr;
2981 	    }
2982 
2983 	  esym += (isymp->n_numaux + 1) * symesz;
2984 	  secpp += isymp->n_numaux + 1;
2985 	  isymp += isymp->n_numaux + 1;
2986 	}
2987 
2988       if (! sh_relocate_section (output_bfd, link_info, input_bfd,
2989 				 input_section, data, internal_relocs,
2990 				 internal_syms, sections))
2991 	goto error_return;
2992 
2993       free (sections);
2994       sections = NULL;
2995       free (internal_syms);
2996       internal_syms = NULL;
2997       free (internal_relocs);
2998       internal_relocs = NULL;
2999     }
3000 
3001   return data;
3002 
3003  error_return:
3004   free (internal_relocs);
3005   free (internal_syms);
3006   free (sections);
3007   if (orig_data == NULL)
3008     free (data);
3009   return NULL;
3010 }
3011 
3012 /* The target vectors.  */
3013 
3014 #ifndef TARGET_SHL_SYM
3015 CREATE_BIG_COFF_TARGET_VEC (sh_coff_vec, "coff-sh", BFD_IS_RELAXABLE, 0, '_', NULL, COFF_SWAP_TABLE)
3016 #endif
3017 
3018 #ifdef TARGET_SHL_SYM
3019 #define TARGET_SYM TARGET_SHL_SYM
3020 #else
3021 #define TARGET_SYM sh_coff_le_vec
3022 #endif
3023 
3024 #ifndef TARGET_SHL_NAME
3025 #define TARGET_SHL_NAME "coff-shl"
3026 #endif
3027 
3028 #ifdef COFF_WITH_PE
3029 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3030 			       SEC_CODE | SEC_DATA, '_', NULL, COFF_SWAP_TABLE);
3031 #else
3032 CREATE_LITTLE_COFF_TARGET_VEC (TARGET_SYM, TARGET_SHL_NAME, BFD_IS_RELAXABLE,
3033 			       0, '_', NULL, COFF_SWAP_TABLE)
3034 #endif
3035 
3036 #ifndef TARGET_SHL_SYM
3037 
3038 /* Some people want versions of the SH COFF target which do not align
3039    to 16 byte boundaries.  We implement that by adding a couple of new
3040    target vectors.  These are just like the ones above, but they
3041    change the default section alignment.  To generate them in the
3042    assembler, use -small.  To use them in the linker, use -b
3043    coff-sh{l}-small and -oformat coff-sh{l}-small.
3044 
3045    Yes, this is a horrible hack.  A general solution for setting
3046    section alignment in COFF is rather complex.  ELF handles this
3047    correctly.  */
3048 
3049 /* Only recognize the small versions if the target was not defaulted.
3050    Otherwise we won't recognize the non default endianness.  */
3051 
3052 static bfd_cleanup
3053 coff_small_object_p (bfd *abfd)
3054 {
3055   if (abfd->target_defaulted)
3056     {
3057       bfd_set_error (bfd_error_wrong_format);
3058       return NULL;
3059     }
3060   return coff_object_p (abfd);
3061 }
3062 
3063 /* Set the section alignment for the small versions.  */
3064 
3065 static bool
3066 coff_small_new_section_hook (bfd *abfd, asection *section)
3067 {
3068   if (! coff_new_section_hook (abfd, section))
3069     return false;
3070 
3071   /* We must align to at least a four byte boundary, because longword
3072      accesses must be on a four byte boundary.  */
3073   if (section->alignment_power == COFF_DEFAULT_SECTION_ALIGNMENT_POWER)
3074     section->alignment_power = 2;
3075 
3076   return true;
3077 }
3078 
3079 /* This is copied from bfd_coff_std_swap_table so that we can change
3080    the default section alignment power.  */
3081 
3082 static bfd_coff_backend_data bfd_coff_small_swap_table =
3083 {
3084   coff_swap_aux_in, coff_swap_sym_in, coff_swap_lineno_in,
3085   coff_swap_aux_out, coff_swap_sym_out,
3086   coff_swap_lineno_out, coff_swap_reloc_out,
3087   coff_swap_filehdr_out, coff_swap_aouthdr_out,
3088   coff_swap_scnhdr_out,
3089   FILHSZ, AOUTSZ, SCNHSZ, SYMESZ, AUXESZ, RELSZ, LINESZ, FILNMLEN,
3090 #ifdef COFF_LONG_FILENAMES
3091   true,
3092 #else
3093   false,
3094 #endif
3095   COFF_DEFAULT_LONG_SECTION_NAMES,
3096   2,
3097 #ifdef COFF_FORCE_SYMBOLS_IN_STRINGS
3098   true,
3099 #else
3100   false,
3101 #endif
3102 #ifdef COFF_DEBUG_STRING_WIDE_PREFIX
3103   4,
3104 #else
3105   2,
3106 #endif
3107   32768,
3108   coff_swap_filehdr_in, coff_swap_aouthdr_in, coff_swap_scnhdr_in,
3109   coff_swap_reloc_in, coff_bad_format_hook, coff_set_arch_mach_hook,
3110   coff_mkobject_hook, styp_to_sec_flags, coff_set_alignment_hook,
3111   coff_slurp_symbol_table, symname_in_debug_hook, coff_pointerize_aux_hook,
3112   coff_print_aux, coff_reloc16_extra_cases, coff_reloc16_estimate,
3113   coff_classify_symbol, coff_compute_section_file_positions,
3114   coff_start_final_link, coff_relocate_section, coff_rtype_to_howto,
3115   coff_adjust_symndx, coff_link_add_one_symbol,
3116   coff_link_output_has_begun, coff_final_link_postscript,
3117   bfd_pe_print_pdata
3118 };
3119 
3120 #define coff_small_close_and_cleanup \
3121   coff_close_and_cleanup
3122 #define coff_small_bfd_free_cached_info \
3123   coff_bfd_free_cached_info
3124 #define coff_small_get_section_contents \
3125   coff_get_section_contents
3126 #define coff_small_get_section_contents_in_window \
3127   coff_get_section_contents_in_window
3128 
3129 extern const bfd_target sh_coff_small_le_vec;
3130 
3131 const bfd_target sh_coff_small_vec =
3132 {
3133   "coff-sh-small",		/* name */
3134   bfd_target_coff_flavour,
3135   BFD_ENDIAN_BIG,		/* data byte order is big */
3136   BFD_ENDIAN_BIG,		/* header byte order is big */
3137 
3138   (HAS_RELOC | EXEC_P		/* object flags */
3139    | HAS_LINENO | HAS_DEBUG
3140    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3141 
3142   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3143   '_',				/* leading symbol underscore */
3144   '/',				/* ar_pad_char */
3145   15,				/* ar_max_namelen */
3146   0,				/* match priority.  */
3147   TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols.  */
3148   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3149   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3150   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* data */
3151   bfd_getb64, bfd_getb_signed_64, bfd_putb64,
3152   bfd_getb32, bfd_getb_signed_32, bfd_putb32,
3153   bfd_getb16, bfd_getb_signed_16, bfd_putb16, /* hdrs */
3154 
3155   {				/* bfd_check_format */
3156     _bfd_dummy_target,
3157     coff_small_object_p,
3158     bfd_generic_archive_p,
3159     _bfd_dummy_target
3160   },
3161   {				/* bfd_set_format */
3162     _bfd_bool_bfd_false_error,
3163     coff_mkobject,
3164     _bfd_generic_mkarchive,
3165     _bfd_bool_bfd_false_error
3166   },
3167   {				/* bfd_write_contents */
3168     _bfd_bool_bfd_false_error,
3169     coff_write_object_contents,
3170     _bfd_write_archive_contents,
3171     _bfd_bool_bfd_false_error
3172   },
3173 
3174   BFD_JUMP_TABLE_GENERIC (coff_small),
3175   BFD_JUMP_TABLE_COPY (coff),
3176   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3177   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3178   BFD_JUMP_TABLE_SYMBOLS (coff),
3179   BFD_JUMP_TABLE_RELOCS (coff),
3180   BFD_JUMP_TABLE_WRITE (coff),
3181   BFD_JUMP_TABLE_LINK (coff),
3182   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3183 
3184   &sh_coff_small_le_vec,
3185 
3186   &bfd_coff_small_swap_table
3187 };
3188 
3189 const bfd_target sh_coff_small_le_vec =
3190 {
3191   "coff-shl-small",		/* name */
3192   bfd_target_coff_flavour,
3193   BFD_ENDIAN_LITTLE,		/* data byte order is little */
3194   BFD_ENDIAN_LITTLE,		/* header byte order is little endian too*/
3195 
3196   (HAS_RELOC | EXEC_P		/* object flags */
3197    | HAS_LINENO | HAS_DEBUG
3198    | HAS_SYMS | HAS_LOCALS | WP_TEXT | BFD_IS_RELAXABLE),
3199 
3200   (SEC_HAS_CONTENTS | SEC_ALLOC | SEC_LOAD | SEC_RELOC),
3201   '_',				/* leading symbol underscore */
3202   '/',				/* ar_pad_char */
3203   15,				/* ar_max_namelen */
3204   0,				/* match priority.  */
3205   TARGET_KEEP_UNUSED_SECTION_SYMBOLS, /* keep unused section symbols.  */
3206   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3207   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3208   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* data */
3209   bfd_getl64, bfd_getl_signed_64, bfd_putl64,
3210   bfd_getl32, bfd_getl_signed_32, bfd_putl32,
3211   bfd_getl16, bfd_getl_signed_16, bfd_putl16, /* hdrs */
3212 
3213   {				/* bfd_check_format */
3214     _bfd_dummy_target,
3215     coff_small_object_p,
3216     bfd_generic_archive_p,
3217     _bfd_dummy_target
3218   },
3219   {				/* bfd_set_format */
3220     _bfd_bool_bfd_false_error,
3221     coff_mkobject,
3222     _bfd_generic_mkarchive,
3223     _bfd_bool_bfd_false_error
3224   },
3225   {				/* bfd_write_contents */
3226     _bfd_bool_bfd_false_error,
3227     coff_write_object_contents,
3228     _bfd_write_archive_contents,
3229     _bfd_bool_bfd_false_error
3230   },
3231 
3232   BFD_JUMP_TABLE_GENERIC (coff_small),
3233   BFD_JUMP_TABLE_COPY (coff),
3234   BFD_JUMP_TABLE_CORE (_bfd_nocore),
3235   BFD_JUMP_TABLE_ARCHIVE (_bfd_archive_coff),
3236   BFD_JUMP_TABLE_SYMBOLS (coff),
3237   BFD_JUMP_TABLE_RELOCS (coff),
3238   BFD_JUMP_TABLE_WRITE (coff),
3239   BFD_JUMP_TABLE_LINK (coff),
3240   BFD_JUMP_TABLE_DYNAMIC (_bfd_nodynamic),
3241 
3242   &sh_coff_small_vec,
3243 
3244   &bfd_coff_small_swap_table
3245 };
3246 #endif
3247