xref: /netbsd-src/external/gpl3/gdb.old/dist/bfd/elf32-arm.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* 32-bit ELF support for ARM
2    Copyright (C) 1998-2016 Free Software Foundation, Inc.
3 
4    This file is part of BFD, the Binary File Descriptor library.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19    MA 02110-1301, USA.  */
20 
21 #include "sysdep.h"
22 #include <limits.h>
23 
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32 
33 /* Return the relocation section associated with NAME.  HTAB is the
34    bfd's elf32_arm_link_hash_entry.  */
35 #define RELOC_SECTION(HTAB, NAME) \
36   ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 
38 /* Return size of a relocation entry.  HTAB is the bfd's
39    elf32_arm_link_hash_entry.  */
40 #define RELOC_SIZE(HTAB) \
41   ((HTAB)->use_rel \
42    ? sizeof (Elf32_External_Rel) \
43    : sizeof (Elf32_External_Rela))
44 
45 /* Return function to swap relocations in.  HTAB is the bfd's
46    elf32_arm_link_hash_entry.  */
47 #define SWAP_RELOC_IN(HTAB) \
48   ((HTAB)->use_rel \
49    ? bfd_elf32_swap_reloc_in \
50    : bfd_elf32_swap_reloca_in)
51 
52 /* Return function to swap relocations out.  HTAB is the bfd's
53    elf32_arm_link_hash_entry.  */
54 #define SWAP_RELOC_OUT(HTAB) \
55   ((HTAB)->use_rel \
56    ? bfd_elf32_swap_reloc_out \
57    : bfd_elf32_swap_reloca_out)
58 
59 #define elf_info_to_howto               0
60 #define elf_info_to_howto_rel           elf32_arm_info_to_howto
61 
62 #define ARM_ELF_ABI_VERSION		0
63 #define ARM_ELF_OS_ABI_VERSION		ELFOSABI_ARM
64 
65 /* The Adjusted Place, as defined by AAELF.  */
66 #define Pa(X) ((X) & 0xfffffffc)
67 
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 					    struct bfd_link_info *link_info,
70 					    asection *sec,
71 					    bfd_byte *contents);
72 
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74    R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75    in that slot.  */
76 
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79   /* No relocation.  */
80   HOWTO (R_ARM_NONE,		/* type */
81 	 0,			/* rightshift */
82 	 3,			/* size (0 = byte, 1 = short, 2 = long) */
83 	 0,			/* bitsize */
84 	 FALSE,			/* pc_relative */
85 	 0,			/* bitpos */
86 	 complain_overflow_dont,/* complain_on_overflow */
87 	 bfd_elf_generic_reloc,	/* special_function */
88 	 "R_ARM_NONE",		/* name */
89 	 FALSE,			/* partial_inplace */
90 	 0,			/* src_mask */
91 	 0,			/* dst_mask */
92 	 FALSE),		/* pcrel_offset */
93 
94   HOWTO (R_ARM_PC24,		/* type */
95 	 2,			/* rightshift */
96 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
97 	 24,			/* bitsize */
98 	 TRUE,			/* pc_relative */
99 	 0,			/* bitpos */
100 	 complain_overflow_signed,/* complain_on_overflow */
101 	 bfd_elf_generic_reloc,	/* special_function */
102 	 "R_ARM_PC24",		/* name */
103 	 FALSE,			/* partial_inplace */
104 	 0x00ffffff,		/* src_mask */
105 	 0x00ffffff,		/* dst_mask */
106 	 TRUE),			/* pcrel_offset */
107 
108   /* 32 bit absolute */
109   HOWTO (R_ARM_ABS32,		/* type */
110 	 0,			/* rightshift */
111 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
112 	 32,			/* bitsize */
113 	 FALSE,			/* pc_relative */
114 	 0,			/* bitpos */
115 	 complain_overflow_bitfield,/* complain_on_overflow */
116 	 bfd_elf_generic_reloc,	/* special_function */
117 	 "R_ARM_ABS32",		/* name */
118 	 FALSE,			/* partial_inplace */
119 	 0xffffffff,		/* src_mask */
120 	 0xffffffff,		/* dst_mask */
121 	 FALSE),		/* pcrel_offset */
122 
123   /* standard 32bit pc-relative reloc */
124   HOWTO (R_ARM_REL32,		/* type */
125 	 0,			/* rightshift */
126 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
127 	 32,			/* bitsize */
128 	 TRUE,			/* pc_relative */
129 	 0,			/* bitpos */
130 	 complain_overflow_bitfield,/* complain_on_overflow */
131 	 bfd_elf_generic_reloc,	/* special_function */
132 	 "R_ARM_REL32",		/* name */
133 	 FALSE,			/* partial_inplace */
134 	 0xffffffff,		/* src_mask */
135 	 0xffffffff,		/* dst_mask */
136 	 TRUE),			/* pcrel_offset */
137 
138   /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139   HOWTO (R_ARM_LDR_PC_G0,	/* type */
140 	 0,			/* rightshift */
141 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
142 	 32,			/* bitsize */
143 	 TRUE,			/* pc_relative */
144 	 0,			/* bitpos */
145 	 complain_overflow_dont,/* complain_on_overflow */
146 	 bfd_elf_generic_reloc,	/* special_function */
147 	 "R_ARM_LDR_PC_G0",     /* name */
148 	 FALSE,			/* partial_inplace */
149 	 0xffffffff,		/* src_mask */
150 	 0xffffffff,		/* dst_mask */
151 	 TRUE),			/* pcrel_offset */
152 
153    /* 16 bit absolute */
154   HOWTO (R_ARM_ABS16,		/* type */
155 	 0,			/* rightshift */
156 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
157 	 16,			/* bitsize */
158 	 FALSE,			/* pc_relative */
159 	 0,			/* bitpos */
160 	 complain_overflow_bitfield,/* complain_on_overflow */
161 	 bfd_elf_generic_reloc,	/* special_function */
162 	 "R_ARM_ABS16",		/* name */
163 	 FALSE,			/* partial_inplace */
164 	 0x0000ffff,		/* src_mask */
165 	 0x0000ffff,		/* dst_mask */
166 	 FALSE),		/* pcrel_offset */
167 
168   /* 12 bit absolute */
169   HOWTO (R_ARM_ABS12,		/* type */
170 	 0,			/* rightshift */
171 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
172 	 12,			/* bitsize */
173 	 FALSE,			/* pc_relative */
174 	 0,			/* bitpos */
175 	 complain_overflow_bitfield,/* complain_on_overflow */
176 	 bfd_elf_generic_reloc,	/* special_function */
177 	 "R_ARM_ABS12",		/* name */
178 	 FALSE,			/* partial_inplace */
179 	 0x00000fff,		/* src_mask */
180 	 0x00000fff,		/* dst_mask */
181 	 FALSE),		/* pcrel_offset */
182 
183   HOWTO (R_ARM_THM_ABS5,	/* type */
184 	 6,			/* rightshift */
185 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
186 	 5,			/* bitsize */
187 	 FALSE,			/* pc_relative */
188 	 0,			/* bitpos */
189 	 complain_overflow_bitfield,/* complain_on_overflow */
190 	 bfd_elf_generic_reloc,	/* special_function */
191 	 "R_ARM_THM_ABS5",	/* name */
192 	 FALSE,			/* partial_inplace */
193 	 0x000007e0,		/* src_mask */
194 	 0x000007e0,		/* dst_mask */
195 	 FALSE),		/* pcrel_offset */
196 
197   /* 8 bit absolute */
198   HOWTO (R_ARM_ABS8,		/* type */
199 	 0,			/* rightshift */
200 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
201 	 8,			/* bitsize */
202 	 FALSE,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_bitfield,/* complain_on_overflow */
205 	 bfd_elf_generic_reloc,	/* special_function */
206 	 "R_ARM_ABS8",		/* name */
207 	 FALSE,			/* partial_inplace */
208 	 0x000000ff,		/* src_mask */
209 	 0x000000ff,		/* dst_mask */
210 	 FALSE),		/* pcrel_offset */
211 
212   HOWTO (R_ARM_SBREL32,		/* type */
213 	 0,			/* rightshift */
214 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
215 	 32,			/* bitsize */
216 	 FALSE,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_dont,/* complain_on_overflow */
219 	 bfd_elf_generic_reloc,	/* special_function */
220 	 "R_ARM_SBREL32",	/* name */
221 	 FALSE,			/* partial_inplace */
222 	 0xffffffff,		/* src_mask */
223 	 0xffffffff,		/* dst_mask */
224 	 FALSE),		/* pcrel_offset */
225 
226   HOWTO (R_ARM_THM_CALL,	/* type */
227 	 1,			/* rightshift */
228 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
229 	 24,			/* bitsize */
230 	 TRUE,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_signed,/* complain_on_overflow */
233 	 bfd_elf_generic_reloc,	/* special_function */
234 	 "R_ARM_THM_CALL",	/* name */
235 	 FALSE,			/* partial_inplace */
236 	 0x07ff2fff,		/* src_mask */
237 	 0x07ff2fff,		/* dst_mask */
238 	 TRUE),			/* pcrel_offset */
239 
240   HOWTO (R_ARM_THM_PC8,	        /* type */
241 	 1,			/* rightshift */
242 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
243 	 8,			/* bitsize */
244 	 TRUE,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_signed,/* complain_on_overflow */
247 	 bfd_elf_generic_reloc,	/* special_function */
248 	 "R_ARM_THM_PC8",	/* name */
249 	 FALSE,			/* partial_inplace */
250 	 0x000000ff,		/* src_mask */
251 	 0x000000ff,		/* dst_mask */
252 	 TRUE),			/* pcrel_offset */
253 
254   HOWTO (R_ARM_BREL_ADJ,	/* type */
255 	 1,			/* rightshift */
256 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
257 	 32,			/* bitsize */
258 	 FALSE,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_signed,/* complain_on_overflow */
261 	 bfd_elf_generic_reloc,	/* special_function */
262 	 "R_ARM_BREL_ADJ",	/* name */
263 	 FALSE,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 FALSE),		/* pcrel_offset */
267 
268   HOWTO (R_ARM_TLS_DESC,	/* type */
269 	 0,			/* rightshift */
270 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
271 	 32,			/* bitsize */
272 	 FALSE,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield,/* complain_on_overflow */
275 	 bfd_elf_generic_reloc,	/* special_function */
276 	 "R_ARM_TLS_DESC",	/* name */
277 	 FALSE,			/* partial_inplace */
278 	 0xffffffff,		/* src_mask */
279 	 0xffffffff,		/* dst_mask */
280 	 FALSE),		/* pcrel_offset */
281 
282   HOWTO (R_ARM_THM_SWI8,	/* type */
283 	 0,			/* rightshift */
284 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
285 	 0,			/* bitsize */
286 	 FALSE,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_signed,/* complain_on_overflow */
289 	 bfd_elf_generic_reloc,	/* special_function */
290 	 "R_ARM_SWI8",		/* name */
291 	 FALSE,			/* partial_inplace */
292 	 0x00000000,		/* src_mask */
293 	 0x00000000,		/* dst_mask */
294 	 FALSE),		/* pcrel_offset */
295 
296   /* BLX instruction for the ARM.  */
297   HOWTO (R_ARM_XPC25,		/* type */
298 	 2,			/* rightshift */
299 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
300 	 24,			/* bitsize */
301 	 TRUE,			/* pc_relative */
302 	 0,			/* bitpos */
303 	 complain_overflow_signed,/* complain_on_overflow */
304 	 bfd_elf_generic_reloc,	/* special_function */
305 	 "R_ARM_XPC25",		/* name */
306 	 FALSE,			/* partial_inplace */
307 	 0x00ffffff,		/* src_mask */
308 	 0x00ffffff,		/* dst_mask */
309 	 TRUE),			/* pcrel_offset */
310 
311   /* BLX instruction for the Thumb.  */
312   HOWTO (R_ARM_THM_XPC22,	/* type */
313 	 2,			/* rightshift */
314 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
315 	 24,			/* bitsize */
316 	 TRUE,			/* pc_relative */
317 	 0,			/* bitpos */
318 	 complain_overflow_signed,/* complain_on_overflow */
319 	 bfd_elf_generic_reloc,	/* special_function */
320 	 "R_ARM_THM_XPC22",	/* name */
321 	 FALSE,			/* partial_inplace */
322 	 0x07ff2fff,		/* src_mask */
323 	 0x07ff2fff,		/* dst_mask */
324 	 TRUE),			/* pcrel_offset */
325 
326   /* Dynamic TLS relocations.  */
327 
328   HOWTO (R_ARM_TLS_DTPMOD32,	/* type */
329 	 0,                     /* rightshift */
330 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
331 	 32,                    /* bitsize */
332 	 FALSE,                 /* pc_relative */
333 	 0,                     /* bitpos */
334 	 complain_overflow_bitfield,/* complain_on_overflow */
335 	 bfd_elf_generic_reloc, /* special_function */
336 	 "R_ARM_TLS_DTPMOD32",	/* name */
337 	 TRUE,			/* partial_inplace */
338 	 0xffffffff,		/* src_mask */
339 	 0xffffffff,		/* dst_mask */
340 	 FALSE),                /* pcrel_offset */
341 
342   HOWTO (R_ARM_TLS_DTPOFF32,	/* type */
343 	 0,                     /* rightshift */
344 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
345 	 32,                    /* bitsize */
346 	 FALSE,                 /* pc_relative */
347 	 0,                     /* bitpos */
348 	 complain_overflow_bitfield,/* complain_on_overflow */
349 	 bfd_elf_generic_reloc, /* special_function */
350 	 "R_ARM_TLS_DTPOFF32",	/* name */
351 	 TRUE,			/* partial_inplace */
352 	 0xffffffff,		/* src_mask */
353 	 0xffffffff,		/* dst_mask */
354 	 FALSE),                /* pcrel_offset */
355 
356   HOWTO (R_ARM_TLS_TPOFF32,	/* type */
357 	 0,                     /* rightshift */
358 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
359 	 32,                    /* bitsize */
360 	 FALSE,                 /* pc_relative */
361 	 0,                     /* bitpos */
362 	 complain_overflow_bitfield,/* complain_on_overflow */
363 	 bfd_elf_generic_reloc, /* special_function */
364 	 "R_ARM_TLS_TPOFF32",	/* name */
365 	 TRUE,			/* partial_inplace */
366 	 0xffffffff,		/* src_mask */
367 	 0xffffffff,		/* dst_mask */
368 	 FALSE),                /* pcrel_offset */
369 
370   /* Relocs used in ARM Linux */
371 
372   HOWTO (R_ARM_COPY,		/* type */
373 	 0,                     /* rightshift */
374 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
375 	 32,                    /* bitsize */
376 	 FALSE,                 /* pc_relative */
377 	 0,                     /* bitpos */
378 	 complain_overflow_bitfield,/* complain_on_overflow */
379 	 bfd_elf_generic_reloc, /* special_function */
380 	 "R_ARM_COPY",		/* name */
381 	 TRUE,			/* partial_inplace */
382 	 0xffffffff,		/* src_mask */
383 	 0xffffffff,		/* dst_mask */
384 	 FALSE),                /* pcrel_offset */
385 
386   HOWTO (R_ARM_GLOB_DAT,	/* type */
387 	 0,                     /* rightshift */
388 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
389 	 32,                    /* bitsize */
390 	 FALSE,                 /* pc_relative */
391 	 0,                     /* bitpos */
392 	 complain_overflow_bitfield,/* complain_on_overflow */
393 	 bfd_elf_generic_reloc, /* special_function */
394 	 "R_ARM_GLOB_DAT",	/* name */
395 	 TRUE,			/* partial_inplace */
396 	 0xffffffff,		/* src_mask */
397 	 0xffffffff,		/* dst_mask */
398 	 FALSE),                /* pcrel_offset */
399 
400   HOWTO (R_ARM_JUMP_SLOT,	/* type */
401 	 0,                     /* rightshift */
402 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
403 	 32,                    /* bitsize */
404 	 FALSE,                 /* pc_relative */
405 	 0,                     /* bitpos */
406 	 complain_overflow_bitfield,/* complain_on_overflow */
407 	 bfd_elf_generic_reloc, /* special_function */
408 	 "R_ARM_JUMP_SLOT",	/* name */
409 	 TRUE,			/* partial_inplace */
410 	 0xffffffff,		/* src_mask */
411 	 0xffffffff,		/* dst_mask */
412 	 FALSE),                /* pcrel_offset */
413 
414   HOWTO (R_ARM_RELATIVE,	/* type */
415 	 0,                     /* rightshift */
416 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
417 	 32,                    /* bitsize */
418 	 FALSE,                 /* pc_relative */
419 	 0,                     /* bitpos */
420 	 complain_overflow_bitfield,/* complain_on_overflow */
421 	 bfd_elf_generic_reloc, /* special_function */
422 	 "R_ARM_RELATIVE",	/* name */
423 	 TRUE,			/* partial_inplace */
424 	 0xffffffff,		/* src_mask */
425 	 0xffffffff,		/* dst_mask */
426 	 FALSE),                /* pcrel_offset */
427 
428   HOWTO (R_ARM_GOTOFF32,	/* type */
429 	 0,                     /* rightshift */
430 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
431 	 32,                    /* bitsize */
432 	 FALSE,                 /* pc_relative */
433 	 0,                     /* bitpos */
434 	 complain_overflow_bitfield,/* complain_on_overflow */
435 	 bfd_elf_generic_reloc, /* special_function */
436 	 "R_ARM_GOTOFF32",	/* name */
437 	 TRUE,			/* partial_inplace */
438 	 0xffffffff,		/* src_mask */
439 	 0xffffffff,		/* dst_mask */
440 	 FALSE),                /* pcrel_offset */
441 
442   HOWTO (R_ARM_GOTPC,		/* type */
443 	 0,                     /* rightshift */
444 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
445 	 32,                    /* bitsize */
446 	 TRUE,			/* pc_relative */
447 	 0,                     /* bitpos */
448 	 complain_overflow_bitfield,/* complain_on_overflow */
449 	 bfd_elf_generic_reloc, /* special_function */
450 	 "R_ARM_GOTPC",		/* name */
451 	 TRUE,			/* partial_inplace */
452 	 0xffffffff,		/* src_mask */
453 	 0xffffffff,		/* dst_mask */
454 	 TRUE),			/* pcrel_offset */
455 
456   HOWTO (R_ARM_GOT32,		/* type */
457 	 0,                     /* rightshift */
458 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
459 	 32,                    /* bitsize */
460 	 FALSE,			/* pc_relative */
461 	 0,                     /* bitpos */
462 	 complain_overflow_bitfield,/* complain_on_overflow */
463 	 bfd_elf_generic_reloc, /* special_function */
464 	 "R_ARM_GOT32",		/* name */
465 	 TRUE,			/* partial_inplace */
466 	 0xffffffff,		/* src_mask */
467 	 0xffffffff,		/* dst_mask */
468 	 FALSE),		/* pcrel_offset */
469 
470   HOWTO (R_ARM_PLT32,		/* type */
471 	 2,                     /* rightshift */
472 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
473 	 24,                    /* bitsize */
474 	 TRUE,			/* pc_relative */
475 	 0,                     /* bitpos */
476 	 complain_overflow_bitfield,/* complain_on_overflow */
477 	 bfd_elf_generic_reloc, /* special_function */
478 	 "R_ARM_PLT32",		/* name */
479 	 FALSE,			/* partial_inplace */
480 	 0x00ffffff,		/* src_mask */
481 	 0x00ffffff,		/* dst_mask */
482 	 TRUE),			/* pcrel_offset */
483 
484   HOWTO (R_ARM_CALL,		/* type */
485 	 2,			/* rightshift */
486 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
487 	 24,			/* bitsize */
488 	 TRUE,			/* pc_relative */
489 	 0,			/* bitpos */
490 	 complain_overflow_signed,/* complain_on_overflow */
491 	 bfd_elf_generic_reloc,	/* special_function */
492 	 "R_ARM_CALL",		/* name */
493 	 FALSE,			/* partial_inplace */
494 	 0x00ffffff,		/* src_mask */
495 	 0x00ffffff,		/* dst_mask */
496 	 TRUE),			/* pcrel_offset */
497 
498   HOWTO (R_ARM_JUMP24,		/* type */
499 	 2,			/* rightshift */
500 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
501 	 24,			/* bitsize */
502 	 TRUE,			/* pc_relative */
503 	 0,			/* bitpos */
504 	 complain_overflow_signed,/* complain_on_overflow */
505 	 bfd_elf_generic_reloc,	/* special_function */
506 	 "R_ARM_JUMP24",	/* name */
507 	 FALSE,			/* partial_inplace */
508 	 0x00ffffff,		/* src_mask */
509 	 0x00ffffff,		/* dst_mask */
510 	 TRUE),			/* pcrel_offset */
511 
512   HOWTO (R_ARM_THM_JUMP24,	/* type */
513 	 1,			/* rightshift */
514 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
515 	 24,			/* bitsize */
516 	 TRUE,			/* pc_relative */
517 	 0,			/* bitpos */
518 	 complain_overflow_signed,/* complain_on_overflow */
519 	 bfd_elf_generic_reloc,	/* special_function */
520 	 "R_ARM_THM_JUMP24",	/* name */
521 	 FALSE,			/* partial_inplace */
522 	 0x07ff2fff,		/* src_mask */
523 	 0x07ff2fff,		/* dst_mask */
524 	 TRUE),			/* pcrel_offset */
525 
526   HOWTO (R_ARM_BASE_ABS,	/* type */
527 	 0,			/* rightshift */
528 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
529 	 32,			/* bitsize */
530 	 FALSE,			/* pc_relative */
531 	 0,			/* bitpos */
532 	 complain_overflow_dont,/* complain_on_overflow */
533 	 bfd_elf_generic_reloc,	/* special_function */
534 	 "R_ARM_BASE_ABS",	/* name */
535 	 FALSE,			/* partial_inplace */
536 	 0xffffffff,		/* src_mask */
537 	 0xffffffff,		/* dst_mask */
538 	 FALSE),		/* pcrel_offset */
539 
540   HOWTO (R_ARM_ALU_PCREL7_0,	/* type */
541 	 0,			/* rightshift */
542 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
543 	 12,			/* bitsize */
544 	 TRUE,			/* pc_relative */
545 	 0,			/* bitpos */
546 	 complain_overflow_dont,/* complain_on_overflow */
547 	 bfd_elf_generic_reloc,	/* special_function */
548 	 "R_ARM_ALU_PCREL_7_0",	/* name */
549 	 FALSE,			/* partial_inplace */
550 	 0x00000fff,		/* src_mask */
551 	 0x00000fff,		/* dst_mask */
552 	 TRUE),			/* pcrel_offset */
553 
554   HOWTO (R_ARM_ALU_PCREL15_8,	/* type */
555 	 0,			/* rightshift */
556 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
557 	 12,			/* bitsize */
558 	 TRUE,			/* pc_relative */
559 	 8,			/* bitpos */
560 	 complain_overflow_dont,/* complain_on_overflow */
561 	 bfd_elf_generic_reloc,	/* special_function */
562 	 "R_ARM_ALU_PCREL_15_8",/* name */
563 	 FALSE,			/* partial_inplace */
564 	 0x00000fff,		/* src_mask */
565 	 0x00000fff,		/* dst_mask */
566 	 TRUE),			/* pcrel_offset */
567 
568   HOWTO (R_ARM_ALU_PCREL23_15,	/* type */
569 	 0,			/* rightshift */
570 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
571 	 12,			/* bitsize */
572 	 TRUE,			/* pc_relative */
573 	 16,			/* bitpos */
574 	 complain_overflow_dont,/* complain_on_overflow */
575 	 bfd_elf_generic_reloc,	/* special_function */
576 	 "R_ARM_ALU_PCREL_23_15",/* name */
577 	 FALSE,			/* partial_inplace */
578 	 0x00000fff,		/* src_mask */
579 	 0x00000fff,		/* dst_mask */
580 	 TRUE),			/* pcrel_offset */
581 
582   HOWTO (R_ARM_LDR_SBREL_11_0,	/* type */
583 	 0,			/* rightshift */
584 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
585 	 12,			/* bitsize */
586 	 FALSE,			/* pc_relative */
587 	 0,			/* bitpos */
588 	 complain_overflow_dont,/* complain_on_overflow */
589 	 bfd_elf_generic_reloc,	/* special_function */
590 	 "R_ARM_LDR_SBREL_11_0",/* name */
591 	 FALSE,			/* partial_inplace */
592 	 0x00000fff,		/* src_mask */
593 	 0x00000fff,		/* dst_mask */
594 	 FALSE),		/* pcrel_offset */
595 
596   HOWTO (R_ARM_ALU_SBREL_19_12,	/* type */
597 	 0,			/* rightshift */
598 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
599 	 8,			/* bitsize */
600 	 FALSE,			/* pc_relative */
601 	 12,			/* bitpos */
602 	 complain_overflow_dont,/* complain_on_overflow */
603 	 bfd_elf_generic_reloc,	/* special_function */
604 	 "R_ARM_ALU_SBREL_19_12",/* name */
605 	 FALSE,			/* partial_inplace */
606 	 0x000ff000,		/* src_mask */
607 	 0x000ff000,		/* dst_mask */
608 	 FALSE),		/* pcrel_offset */
609 
610   HOWTO (R_ARM_ALU_SBREL_27_20,	/* type */
611 	 0,			/* rightshift */
612 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
613 	 8,			/* bitsize */
614 	 FALSE,			/* pc_relative */
615 	 20,			/* bitpos */
616 	 complain_overflow_dont,/* complain_on_overflow */
617 	 bfd_elf_generic_reloc,	/* special_function */
618 	 "R_ARM_ALU_SBREL_27_20",/* name */
619 	 FALSE,			/* partial_inplace */
620 	 0x0ff00000,		/* src_mask */
621 	 0x0ff00000,		/* dst_mask */
622 	 FALSE),		/* pcrel_offset */
623 
624   HOWTO (R_ARM_TARGET1,		/* type */
625 	 0,			/* rightshift */
626 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
627 	 32,			/* bitsize */
628 	 FALSE,			/* pc_relative */
629 	 0,			/* bitpos */
630 	 complain_overflow_dont,/* complain_on_overflow */
631 	 bfd_elf_generic_reloc,	/* special_function */
632 	 "R_ARM_TARGET1",	/* name */
633 	 FALSE,			/* partial_inplace */
634 	 0xffffffff,		/* src_mask */
635 	 0xffffffff,		/* dst_mask */
636 	 FALSE),		/* pcrel_offset */
637 
638   HOWTO (R_ARM_ROSEGREL32,	/* type */
639 	 0,			/* rightshift */
640 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
641 	 32,			/* bitsize */
642 	 FALSE,			/* pc_relative */
643 	 0,			/* bitpos */
644 	 complain_overflow_dont,/* complain_on_overflow */
645 	 bfd_elf_generic_reloc,	/* special_function */
646 	 "R_ARM_ROSEGREL32",	/* name */
647 	 FALSE,			/* partial_inplace */
648 	 0xffffffff,		/* src_mask */
649 	 0xffffffff,		/* dst_mask */
650 	 FALSE),		/* pcrel_offset */
651 
652   HOWTO (R_ARM_V4BX,		/* type */
653 	 0,			/* rightshift */
654 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
655 	 32,			/* bitsize */
656 	 FALSE,			/* pc_relative */
657 	 0,			/* bitpos */
658 	 complain_overflow_dont,/* complain_on_overflow */
659 	 bfd_elf_generic_reloc,	/* special_function */
660 	 "R_ARM_V4BX",		/* name */
661 	 FALSE,			/* partial_inplace */
662 	 0xffffffff,		/* src_mask */
663 	 0xffffffff,		/* dst_mask */
664 	 FALSE),		/* pcrel_offset */
665 
666   HOWTO (R_ARM_TARGET2,		/* type */
667 	 0,			/* rightshift */
668 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
669 	 32,			/* bitsize */
670 	 FALSE,			/* pc_relative */
671 	 0,			/* bitpos */
672 	 complain_overflow_signed,/* complain_on_overflow */
673 	 bfd_elf_generic_reloc,	/* special_function */
674 	 "R_ARM_TARGET2",	/* name */
675 	 FALSE,			/* partial_inplace */
676 	 0xffffffff,		/* src_mask */
677 	 0xffffffff,		/* dst_mask */
678 	 TRUE),			/* pcrel_offset */
679 
680   HOWTO (R_ARM_PREL31,		/* type */
681 	 0,			/* rightshift */
682 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
683 	 31,			/* bitsize */
684 	 TRUE,			/* pc_relative */
685 	 0,			/* bitpos */
686 	 complain_overflow_signed,/* complain_on_overflow */
687 	 bfd_elf_generic_reloc,	/* special_function */
688 	 "R_ARM_PREL31",	/* name */
689 	 FALSE,			/* partial_inplace */
690 	 0x7fffffff,		/* src_mask */
691 	 0x7fffffff,		/* dst_mask */
692 	 TRUE),			/* pcrel_offset */
693 
694   HOWTO (R_ARM_MOVW_ABS_NC,	/* type */
695 	 0,			/* rightshift */
696 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
697 	 16,			/* bitsize */
698 	 FALSE,			/* pc_relative */
699 	 0,			/* bitpos */
700 	 complain_overflow_dont,/* complain_on_overflow */
701 	 bfd_elf_generic_reloc,	/* special_function */
702 	 "R_ARM_MOVW_ABS_NC",	/* name */
703 	 FALSE,			/* partial_inplace */
704 	 0x000f0fff,		/* src_mask */
705 	 0x000f0fff,		/* dst_mask */
706 	 FALSE),		/* pcrel_offset */
707 
708   HOWTO (R_ARM_MOVT_ABS,	/* type */
709 	 0,			/* rightshift */
710 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
711 	 16,			/* bitsize */
712 	 FALSE,			/* pc_relative */
713 	 0,			/* bitpos */
714 	 complain_overflow_bitfield,/* complain_on_overflow */
715 	 bfd_elf_generic_reloc,	/* special_function */
716 	 "R_ARM_MOVT_ABS",	/* name */
717 	 FALSE,			/* partial_inplace */
718 	 0x000f0fff,		/* src_mask */
719 	 0x000f0fff,		/* dst_mask */
720 	 FALSE),		/* pcrel_offset */
721 
722   HOWTO (R_ARM_MOVW_PREL_NC,	/* type */
723 	 0,			/* rightshift */
724 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
725 	 16,			/* bitsize */
726 	 TRUE,			/* pc_relative */
727 	 0,			/* bitpos */
728 	 complain_overflow_dont,/* complain_on_overflow */
729 	 bfd_elf_generic_reloc,	/* special_function */
730 	 "R_ARM_MOVW_PREL_NC",	/* name */
731 	 FALSE,			/* partial_inplace */
732 	 0x000f0fff,		/* src_mask */
733 	 0x000f0fff,		/* dst_mask */
734 	 TRUE),			/* pcrel_offset */
735 
736   HOWTO (R_ARM_MOVT_PREL,	/* type */
737 	 0,			/* rightshift */
738 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
739 	 16,			/* bitsize */
740 	 TRUE,			/* pc_relative */
741 	 0,			/* bitpos */
742 	 complain_overflow_bitfield,/* complain_on_overflow */
743 	 bfd_elf_generic_reloc,	/* special_function */
744 	 "R_ARM_MOVT_PREL",	/* name */
745 	 FALSE,			/* partial_inplace */
746 	 0x000f0fff,		/* src_mask */
747 	 0x000f0fff,		/* dst_mask */
748 	 TRUE),			/* pcrel_offset */
749 
750   HOWTO (R_ARM_THM_MOVW_ABS_NC,	/* type */
751 	 0,			/* rightshift */
752 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
753 	 16,			/* bitsize */
754 	 FALSE,			/* pc_relative */
755 	 0,			/* bitpos */
756 	 complain_overflow_dont,/* complain_on_overflow */
757 	 bfd_elf_generic_reloc,	/* special_function */
758 	 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 	 FALSE,			/* partial_inplace */
760 	 0x040f70ff,		/* src_mask */
761 	 0x040f70ff,		/* dst_mask */
762 	 FALSE),		/* pcrel_offset */
763 
764   HOWTO (R_ARM_THM_MOVT_ABS,	/* type */
765 	 0,			/* rightshift */
766 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
767 	 16,			/* bitsize */
768 	 FALSE,			/* pc_relative */
769 	 0,			/* bitpos */
770 	 complain_overflow_bitfield,/* complain_on_overflow */
771 	 bfd_elf_generic_reloc,	/* special_function */
772 	 "R_ARM_THM_MOVT_ABS",	/* name */
773 	 FALSE,			/* partial_inplace */
774 	 0x040f70ff,		/* src_mask */
775 	 0x040f70ff,		/* dst_mask */
776 	 FALSE),		/* pcrel_offset */
777 
778   HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 	 0,			/* rightshift */
780 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
781 	 16,			/* bitsize */
782 	 TRUE,			/* pc_relative */
783 	 0,			/* bitpos */
784 	 complain_overflow_dont,/* complain_on_overflow */
785 	 bfd_elf_generic_reloc,	/* special_function */
786 	 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 	 FALSE,			/* partial_inplace */
788 	 0x040f70ff,		/* src_mask */
789 	 0x040f70ff,		/* dst_mask */
790 	 TRUE),			/* pcrel_offset */
791 
792   HOWTO (R_ARM_THM_MOVT_PREL,	/* type */
793 	 0,			/* rightshift */
794 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
795 	 16,			/* bitsize */
796 	 TRUE,			/* pc_relative */
797 	 0,			/* bitpos */
798 	 complain_overflow_bitfield,/* complain_on_overflow */
799 	 bfd_elf_generic_reloc,	/* special_function */
800 	 "R_ARM_THM_MOVT_PREL",	/* name */
801 	 FALSE,			/* partial_inplace */
802 	 0x040f70ff,		/* src_mask */
803 	 0x040f70ff,		/* dst_mask */
804 	 TRUE),			/* pcrel_offset */
805 
806   HOWTO (R_ARM_THM_JUMP19,	/* type */
807 	 1,			/* rightshift */
808 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
809 	 19,			/* bitsize */
810 	 TRUE,			/* pc_relative */
811 	 0,			/* bitpos */
812 	 complain_overflow_signed,/* complain_on_overflow */
813 	 bfd_elf_generic_reloc, /* special_function */
814 	 "R_ARM_THM_JUMP19",	/* name */
815 	 FALSE,			/* partial_inplace */
816 	 0x043f2fff,		/* src_mask */
817 	 0x043f2fff,		/* dst_mask */
818 	 TRUE),			/* pcrel_offset */
819 
820   HOWTO (R_ARM_THM_JUMP6,	/* type */
821 	 1,			/* rightshift */
822 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
823 	 6,			/* bitsize */
824 	 TRUE,			/* pc_relative */
825 	 0,			/* bitpos */
826 	 complain_overflow_unsigned,/* complain_on_overflow */
827 	 bfd_elf_generic_reloc,	/* special_function */
828 	 "R_ARM_THM_JUMP6",	/* name */
829 	 FALSE,			/* partial_inplace */
830 	 0x02f8,		/* src_mask */
831 	 0x02f8,		/* dst_mask */
832 	 TRUE),			/* pcrel_offset */
833 
834   /* These are declared as 13-bit signed relocations because we can
835      address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836      versa.  */
837   HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 	 0,			/* rightshift */
839 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
840 	 13,			/* bitsize */
841 	 TRUE,			/* pc_relative */
842 	 0,			/* bitpos */
843 	 complain_overflow_dont,/* complain_on_overflow */
844 	 bfd_elf_generic_reloc,	/* special_function */
845 	 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 	 FALSE,			/* partial_inplace */
847 	 0xffffffff,		/* src_mask */
848 	 0xffffffff,		/* dst_mask */
849 	 TRUE),			/* pcrel_offset */
850 
851   HOWTO (R_ARM_THM_PC12,	/* type */
852 	 0,			/* rightshift */
853 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
854 	 13,			/* bitsize */
855 	 TRUE,			/* pc_relative */
856 	 0,			/* bitpos */
857 	 complain_overflow_dont,/* complain_on_overflow */
858 	 bfd_elf_generic_reloc,	/* special_function */
859 	 "R_ARM_THM_PC12",	/* name */
860 	 FALSE,			/* partial_inplace */
861 	 0xffffffff,		/* src_mask */
862 	 0xffffffff,		/* dst_mask */
863 	 TRUE),			/* pcrel_offset */
864 
865   HOWTO (R_ARM_ABS32_NOI,	/* type */
866 	 0,			/* rightshift */
867 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
868 	 32,			/* bitsize */
869 	 FALSE,			/* pc_relative */
870 	 0,			/* bitpos */
871 	 complain_overflow_dont,/* complain_on_overflow */
872 	 bfd_elf_generic_reloc,	/* special_function */
873 	 "R_ARM_ABS32_NOI",	/* name */
874 	 FALSE,			/* partial_inplace */
875 	 0xffffffff,		/* src_mask */
876 	 0xffffffff,		/* dst_mask */
877 	 FALSE),		/* pcrel_offset */
878 
879   HOWTO (R_ARM_REL32_NOI,	/* type */
880 	 0,			/* rightshift */
881 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
882 	 32,			/* bitsize */
883 	 TRUE,			/* pc_relative */
884 	 0,			/* bitpos */
885 	 complain_overflow_dont,/* complain_on_overflow */
886 	 bfd_elf_generic_reloc,	/* special_function */
887 	 "R_ARM_REL32_NOI",	/* name */
888 	 FALSE,			/* partial_inplace */
889 	 0xffffffff,		/* src_mask */
890 	 0xffffffff,		/* dst_mask */
891 	 FALSE),		/* pcrel_offset */
892 
893   /* Group relocations.  */
894 
895   HOWTO (R_ARM_ALU_PC_G0_NC,	/* type */
896 	 0,			/* rightshift */
897 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
898 	 32,			/* bitsize */
899 	 TRUE,			/* pc_relative */
900 	 0,			/* bitpos */
901 	 complain_overflow_dont,/* complain_on_overflow */
902 	 bfd_elf_generic_reloc,	/* special_function */
903 	 "R_ARM_ALU_PC_G0_NC",	/* name */
904 	 FALSE,			/* partial_inplace */
905 	 0xffffffff,		/* src_mask */
906 	 0xffffffff,		/* dst_mask */
907 	 TRUE),			/* pcrel_offset */
908 
909   HOWTO (R_ARM_ALU_PC_G0,   	/* type */
910 	 0,			/* rightshift */
911 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
912 	 32,			/* bitsize */
913 	 TRUE,			/* pc_relative */
914 	 0,			/* bitpos */
915 	 complain_overflow_dont,/* complain_on_overflow */
916 	 bfd_elf_generic_reloc,	/* special_function */
917 	 "R_ARM_ALU_PC_G0",   	/* name */
918 	 FALSE,			/* partial_inplace */
919 	 0xffffffff,		/* src_mask */
920 	 0xffffffff,		/* dst_mask */
921 	 TRUE),			/* pcrel_offset */
922 
923   HOWTO (R_ARM_ALU_PC_G1_NC,	/* type */
924 	 0,			/* rightshift */
925 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
926 	 32,			/* bitsize */
927 	 TRUE,			/* pc_relative */
928 	 0,			/* bitpos */
929 	 complain_overflow_dont,/* complain_on_overflow */
930 	 bfd_elf_generic_reloc,	/* special_function */
931 	 "R_ARM_ALU_PC_G1_NC",	/* name */
932 	 FALSE,			/* partial_inplace */
933 	 0xffffffff,		/* src_mask */
934 	 0xffffffff,		/* dst_mask */
935 	 TRUE),			/* pcrel_offset */
936 
937   HOWTO (R_ARM_ALU_PC_G1,   	/* type */
938 	 0,			/* rightshift */
939 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
940 	 32,			/* bitsize */
941 	 TRUE,			/* pc_relative */
942 	 0,			/* bitpos */
943 	 complain_overflow_dont,/* complain_on_overflow */
944 	 bfd_elf_generic_reloc,	/* special_function */
945 	 "R_ARM_ALU_PC_G1",   	/* name */
946 	 FALSE,			/* partial_inplace */
947 	 0xffffffff,		/* src_mask */
948 	 0xffffffff,		/* dst_mask */
949 	 TRUE),			/* pcrel_offset */
950 
951   HOWTO (R_ARM_ALU_PC_G2,   	/* type */
952 	 0,			/* rightshift */
953 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
954 	 32,			/* bitsize */
955 	 TRUE,			/* pc_relative */
956 	 0,			/* bitpos */
957 	 complain_overflow_dont,/* complain_on_overflow */
958 	 bfd_elf_generic_reloc,	/* special_function */
959 	 "R_ARM_ALU_PC_G2",   	/* name */
960 	 FALSE,			/* partial_inplace */
961 	 0xffffffff,		/* src_mask */
962 	 0xffffffff,		/* dst_mask */
963 	 TRUE),			/* pcrel_offset */
964 
965   HOWTO (R_ARM_LDR_PC_G1,   	/* type */
966 	 0,			/* rightshift */
967 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
968 	 32,			/* bitsize */
969 	 TRUE,			/* pc_relative */
970 	 0,			/* bitpos */
971 	 complain_overflow_dont,/* complain_on_overflow */
972 	 bfd_elf_generic_reloc,	/* special_function */
973 	 "R_ARM_LDR_PC_G1",   	/* name */
974 	 FALSE,			/* partial_inplace */
975 	 0xffffffff,		/* src_mask */
976 	 0xffffffff,		/* dst_mask */
977 	 TRUE),			/* pcrel_offset */
978 
979   HOWTO (R_ARM_LDR_PC_G2,   	/* type */
980 	 0,			/* rightshift */
981 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
982 	 32,			/* bitsize */
983 	 TRUE,			/* pc_relative */
984 	 0,			/* bitpos */
985 	 complain_overflow_dont,/* complain_on_overflow */
986 	 bfd_elf_generic_reloc,	/* special_function */
987 	 "R_ARM_LDR_PC_G2",   	/* name */
988 	 FALSE,			/* partial_inplace */
989 	 0xffffffff,		/* src_mask */
990 	 0xffffffff,		/* dst_mask */
991 	 TRUE),			/* pcrel_offset */
992 
993   HOWTO (R_ARM_LDRS_PC_G0,   	/* type */
994 	 0,			/* rightshift */
995 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
996 	 32,			/* bitsize */
997 	 TRUE,			/* pc_relative */
998 	 0,			/* bitpos */
999 	 complain_overflow_dont,/* complain_on_overflow */
1000 	 bfd_elf_generic_reloc,	/* special_function */
1001 	 "R_ARM_LDRS_PC_G0",   	/* name */
1002 	 FALSE,			/* partial_inplace */
1003 	 0xffffffff,		/* src_mask */
1004 	 0xffffffff,		/* dst_mask */
1005 	 TRUE),			/* pcrel_offset */
1006 
1007   HOWTO (R_ARM_LDRS_PC_G1,   	/* type */
1008 	 0,			/* rightshift */
1009 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1010 	 32,			/* bitsize */
1011 	 TRUE,			/* pc_relative */
1012 	 0,			/* bitpos */
1013 	 complain_overflow_dont,/* complain_on_overflow */
1014 	 bfd_elf_generic_reloc,	/* special_function */
1015 	 "R_ARM_LDRS_PC_G1",   	/* name */
1016 	 FALSE,			/* partial_inplace */
1017 	 0xffffffff,		/* src_mask */
1018 	 0xffffffff,		/* dst_mask */
1019 	 TRUE),			/* pcrel_offset */
1020 
1021   HOWTO (R_ARM_LDRS_PC_G2,   	/* type */
1022 	 0,			/* rightshift */
1023 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1024 	 32,			/* bitsize */
1025 	 TRUE,			/* pc_relative */
1026 	 0,			/* bitpos */
1027 	 complain_overflow_dont,/* complain_on_overflow */
1028 	 bfd_elf_generic_reloc,	/* special_function */
1029 	 "R_ARM_LDRS_PC_G2",   	/* name */
1030 	 FALSE,			/* partial_inplace */
1031 	 0xffffffff,		/* src_mask */
1032 	 0xffffffff,		/* dst_mask */
1033 	 TRUE),			/* pcrel_offset */
1034 
1035   HOWTO (R_ARM_LDC_PC_G0,   	/* type */
1036 	 0,			/* rightshift */
1037 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1038 	 32,			/* bitsize */
1039 	 TRUE,			/* pc_relative */
1040 	 0,			/* bitpos */
1041 	 complain_overflow_dont,/* complain_on_overflow */
1042 	 bfd_elf_generic_reloc,	/* special_function */
1043 	 "R_ARM_LDC_PC_G0",   	/* name */
1044 	 FALSE,			/* partial_inplace */
1045 	 0xffffffff,		/* src_mask */
1046 	 0xffffffff,		/* dst_mask */
1047 	 TRUE),			/* pcrel_offset */
1048 
1049   HOWTO (R_ARM_LDC_PC_G1,   	/* type */
1050 	 0,			/* rightshift */
1051 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1052 	 32,			/* bitsize */
1053 	 TRUE,			/* pc_relative */
1054 	 0,			/* bitpos */
1055 	 complain_overflow_dont,/* complain_on_overflow */
1056 	 bfd_elf_generic_reloc,	/* special_function */
1057 	 "R_ARM_LDC_PC_G1",   	/* name */
1058 	 FALSE,			/* partial_inplace */
1059 	 0xffffffff,		/* src_mask */
1060 	 0xffffffff,		/* dst_mask */
1061 	 TRUE),			/* pcrel_offset */
1062 
1063   HOWTO (R_ARM_LDC_PC_G2,   	/* type */
1064 	 0,			/* rightshift */
1065 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1066 	 32,			/* bitsize */
1067 	 TRUE,			/* pc_relative */
1068 	 0,			/* bitpos */
1069 	 complain_overflow_dont,/* complain_on_overflow */
1070 	 bfd_elf_generic_reloc,	/* special_function */
1071 	 "R_ARM_LDC_PC_G2",   	/* name */
1072 	 FALSE,			/* partial_inplace */
1073 	 0xffffffff,		/* src_mask */
1074 	 0xffffffff,		/* dst_mask */
1075 	 TRUE),			/* pcrel_offset */
1076 
1077   HOWTO (R_ARM_ALU_SB_G0_NC,   	/* type */
1078 	 0,			/* rightshift */
1079 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1080 	 32,			/* bitsize */
1081 	 TRUE,			/* pc_relative */
1082 	 0,			/* bitpos */
1083 	 complain_overflow_dont,/* complain_on_overflow */
1084 	 bfd_elf_generic_reloc,	/* special_function */
1085 	 "R_ARM_ALU_SB_G0_NC", 	/* name */
1086 	 FALSE,			/* partial_inplace */
1087 	 0xffffffff,		/* src_mask */
1088 	 0xffffffff,		/* dst_mask */
1089 	 TRUE),			/* pcrel_offset */
1090 
1091   HOWTO (R_ARM_ALU_SB_G0,   	/* type */
1092 	 0,			/* rightshift */
1093 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1094 	 32,			/* bitsize */
1095 	 TRUE,			/* pc_relative */
1096 	 0,			/* bitpos */
1097 	 complain_overflow_dont,/* complain_on_overflow */
1098 	 bfd_elf_generic_reloc,	/* special_function */
1099 	 "R_ARM_ALU_SB_G0", 	/* name */
1100 	 FALSE,			/* partial_inplace */
1101 	 0xffffffff,		/* src_mask */
1102 	 0xffffffff,		/* dst_mask */
1103 	 TRUE),			/* pcrel_offset */
1104 
1105   HOWTO (R_ARM_ALU_SB_G1_NC,   	/* type */
1106 	 0,			/* rightshift */
1107 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1108 	 32,			/* bitsize */
1109 	 TRUE,			/* pc_relative */
1110 	 0,			/* bitpos */
1111 	 complain_overflow_dont,/* complain_on_overflow */
1112 	 bfd_elf_generic_reloc,	/* special_function */
1113 	 "R_ARM_ALU_SB_G1_NC", 	/* name */
1114 	 FALSE,			/* partial_inplace */
1115 	 0xffffffff,		/* src_mask */
1116 	 0xffffffff,		/* dst_mask */
1117 	 TRUE),			/* pcrel_offset */
1118 
1119   HOWTO (R_ARM_ALU_SB_G1,   	/* type */
1120 	 0,			/* rightshift */
1121 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1122 	 32,			/* bitsize */
1123 	 TRUE,			/* pc_relative */
1124 	 0,			/* bitpos */
1125 	 complain_overflow_dont,/* complain_on_overflow */
1126 	 bfd_elf_generic_reloc,	/* special_function */
1127 	 "R_ARM_ALU_SB_G1", 	/* name */
1128 	 FALSE,			/* partial_inplace */
1129 	 0xffffffff,		/* src_mask */
1130 	 0xffffffff,		/* dst_mask */
1131 	 TRUE),			/* pcrel_offset */
1132 
1133   HOWTO (R_ARM_ALU_SB_G2,   	/* type */
1134 	 0,			/* rightshift */
1135 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1136 	 32,			/* bitsize */
1137 	 TRUE,			/* pc_relative */
1138 	 0,			/* bitpos */
1139 	 complain_overflow_dont,/* complain_on_overflow */
1140 	 bfd_elf_generic_reloc,	/* special_function */
1141 	 "R_ARM_ALU_SB_G2", 	/* name */
1142 	 FALSE,			/* partial_inplace */
1143 	 0xffffffff,		/* src_mask */
1144 	 0xffffffff,		/* dst_mask */
1145 	 TRUE),			/* pcrel_offset */
1146 
1147   HOWTO (R_ARM_LDR_SB_G0,   	/* type */
1148 	 0,			/* rightshift */
1149 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1150 	 32,			/* bitsize */
1151 	 TRUE,			/* pc_relative */
1152 	 0,			/* bitpos */
1153 	 complain_overflow_dont,/* complain_on_overflow */
1154 	 bfd_elf_generic_reloc,	/* special_function */
1155 	 "R_ARM_LDR_SB_G0", 	/* name */
1156 	 FALSE,			/* partial_inplace */
1157 	 0xffffffff,		/* src_mask */
1158 	 0xffffffff,		/* dst_mask */
1159 	 TRUE),			/* pcrel_offset */
1160 
1161   HOWTO (R_ARM_LDR_SB_G1,   	/* type */
1162 	 0,			/* rightshift */
1163 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1164 	 32,			/* bitsize */
1165 	 TRUE,			/* pc_relative */
1166 	 0,			/* bitpos */
1167 	 complain_overflow_dont,/* complain_on_overflow */
1168 	 bfd_elf_generic_reloc,	/* special_function */
1169 	 "R_ARM_LDR_SB_G1", 	/* name */
1170 	 FALSE,			/* partial_inplace */
1171 	 0xffffffff,		/* src_mask */
1172 	 0xffffffff,		/* dst_mask */
1173 	 TRUE),			/* pcrel_offset */
1174 
1175   HOWTO (R_ARM_LDR_SB_G2,   	/* type */
1176 	 0,			/* rightshift */
1177 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1178 	 32,			/* bitsize */
1179 	 TRUE,			/* pc_relative */
1180 	 0,			/* bitpos */
1181 	 complain_overflow_dont,/* complain_on_overflow */
1182 	 bfd_elf_generic_reloc,	/* special_function */
1183 	 "R_ARM_LDR_SB_G2", 	/* name */
1184 	 FALSE,			/* partial_inplace */
1185 	 0xffffffff,		/* src_mask */
1186 	 0xffffffff,		/* dst_mask */
1187 	 TRUE),			/* pcrel_offset */
1188 
1189   HOWTO (R_ARM_LDRS_SB_G0,   	/* type */
1190 	 0,			/* rightshift */
1191 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1192 	 32,			/* bitsize */
1193 	 TRUE,			/* pc_relative */
1194 	 0,			/* bitpos */
1195 	 complain_overflow_dont,/* complain_on_overflow */
1196 	 bfd_elf_generic_reloc,	/* special_function */
1197 	 "R_ARM_LDRS_SB_G0", 	/* name */
1198 	 FALSE,			/* partial_inplace */
1199 	 0xffffffff,		/* src_mask */
1200 	 0xffffffff,		/* dst_mask */
1201 	 TRUE),			/* pcrel_offset */
1202 
1203   HOWTO (R_ARM_LDRS_SB_G1,   	/* type */
1204 	 0,			/* rightshift */
1205 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1206 	 32,			/* bitsize */
1207 	 TRUE,			/* pc_relative */
1208 	 0,			/* bitpos */
1209 	 complain_overflow_dont,/* complain_on_overflow */
1210 	 bfd_elf_generic_reloc,	/* special_function */
1211 	 "R_ARM_LDRS_SB_G1", 	/* name */
1212 	 FALSE,			/* partial_inplace */
1213 	 0xffffffff,		/* src_mask */
1214 	 0xffffffff,		/* dst_mask */
1215 	 TRUE),			/* pcrel_offset */
1216 
1217   HOWTO (R_ARM_LDRS_SB_G2,   	/* type */
1218 	 0,			/* rightshift */
1219 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1220 	 32,			/* bitsize */
1221 	 TRUE,			/* pc_relative */
1222 	 0,			/* bitpos */
1223 	 complain_overflow_dont,/* complain_on_overflow */
1224 	 bfd_elf_generic_reloc,	/* special_function */
1225 	 "R_ARM_LDRS_SB_G2", 	/* name */
1226 	 FALSE,			/* partial_inplace */
1227 	 0xffffffff,		/* src_mask */
1228 	 0xffffffff,		/* dst_mask */
1229 	 TRUE),			/* pcrel_offset */
1230 
1231   HOWTO (R_ARM_LDC_SB_G0,   	/* type */
1232 	 0,			/* rightshift */
1233 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1234 	 32,			/* bitsize */
1235 	 TRUE,			/* pc_relative */
1236 	 0,			/* bitpos */
1237 	 complain_overflow_dont,/* complain_on_overflow */
1238 	 bfd_elf_generic_reloc,	/* special_function */
1239 	 "R_ARM_LDC_SB_G0", 	/* name */
1240 	 FALSE,			/* partial_inplace */
1241 	 0xffffffff,		/* src_mask */
1242 	 0xffffffff,		/* dst_mask */
1243 	 TRUE),			/* pcrel_offset */
1244 
1245   HOWTO (R_ARM_LDC_SB_G1,   	/* type */
1246 	 0,			/* rightshift */
1247 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1248 	 32,			/* bitsize */
1249 	 TRUE,			/* pc_relative */
1250 	 0,			/* bitpos */
1251 	 complain_overflow_dont,/* complain_on_overflow */
1252 	 bfd_elf_generic_reloc,	/* special_function */
1253 	 "R_ARM_LDC_SB_G1", 	/* name */
1254 	 FALSE,			/* partial_inplace */
1255 	 0xffffffff,		/* src_mask */
1256 	 0xffffffff,		/* dst_mask */
1257 	 TRUE),			/* pcrel_offset */
1258 
1259   HOWTO (R_ARM_LDC_SB_G2,   	/* type */
1260 	 0,			/* rightshift */
1261 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1262 	 32,			/* bitsize */
1263 	 TRUE,			/* pc_relative */
1264 	 0,			/* bitpos */
1265 	 complain_overflow_dont,/* complain_on_overflow */
1266 	 bfd_elf_generic_reloc,	/* special_function */
1267 	 "R_ARM_LDC_SB_G2", 	/* name */
1268 	 FALSE,			/* partial_inplace */
1269 	 0xffffffff,		/* src_mask */
1270 	 0xffffffff,		/* dst_mask */
1271 	 TRUE),			/* pcrel_offset */
1272 
1273   /* End of group relocations.  */
1274 
1275   HOWTO (R_ARM_MOVW_BREL_NC,	/* type */
1276 	 0,			/* rightshift */
1277 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1278 	 16,			/* bitsize */
1279 	 FALSE,			/* pc_relative */
1280 	 0,			/* bitpos */
1281 	 complain_overflow_dont,/* complain_on_overflow */
1282 	 bfd_elf_generic_reloc,	/* special_function */
1283 	 "R_ARM_MOVW_BREL_NC",	/* name */
1284 	 FALSE,			/* partial_inplace */
1285 	 0x0000ffff,		/* src_mask */
1286 	 0x0000ffff,		/* dst_mask */
1287 	 FALSE),		/* pcrel_offset */
1288 
1289   HOWTO (R_ARM_MOVT_BREL,	/* type */
1290 	 0,			/* rightshift */
1291 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1292 	 16,			/* bitsize */
1293 	 FALSE,			/* pc_relative */
1294 	 0,			/* bitpos */
1295 	 complain_overflow_bitfield,/* complain_on_overflow */
1296 	 bfd_elf_generic_reloc,	/* special_function */
1297 	 "R_ARM_MOVT_BREL",	/* name */
1298 	 FALSE,			/* partial_inplace */
1299 	 0x0000ffff,		/* src_mask */
1300 	 0x0000ffff,		/* dst_mask */
1301 	 FALSE),		/* pcrel_offset */
1302 
1303   HOWTO (R_ARM_MOVW_BREL,	/* type */
1304 	 0,			/* rightshift */
1305 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1306 	 16,			/* bitsize */
1307 	 FALSE,			/* pc_relative */
1308 	 0,			/* bitpos */
1309 	 complain_overflow_dont,/* complain_on_overflow */
1310 	 bfd_elf_generic_reloc,	/* special_function */
1311 	 "R_ARM_MOVW_BREL",	/* name */
1312 	 FALSE,			/* partial_inplace */
1313 	 0x0000ffff,		/* src_mask */
1314 	 0x0000ffff,		/* dst_mask */
1315 	 FALSE),		/* pcrel_offset */
1316 
1317   HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 	 0,			/* rightshift */
1319 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1320 	 16,			/* bitsize */
1321 	 FALSE,			/* pc_relative */
1322 	 0,			/* bitpos */
1323 	 complain_overflow_dont,/* complain_on_overflow */
1324 	 bfd_elf_generic_reloc,	/* special_function */
1325 	 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 	 FALSE,			/* partial_inplace */
1327 	 0x040f70ff,		/* src_mask */
1328 	 0x040f70ff,		/* dst_mask */
1329 	 FALSE),		/* pcrel_offset */
1330 
1331   HOWTO (R_ARM_THM_MOVT_BREL,	/* type */
1332 	 0,			/* rightshift */
1333 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1334 	 16,			/* bitsize */
1335 	 FALSE,			/* pc_relative */
1336 	 0,			/* bitpos */
1337 	 complain_overflow_bitfield,/* complain_on_overflow */
1338 	 bfd_elf_generic_reloc,	/* special_function */
1339 	 "R_ARM_THM_MOVT_BREL",	/* name */
1340 	 FALSE,			/* partial_inplace */
1341 	 0x040f70ff,		/* src_mask */
1342 	 0x040f70ff,		/* dst_mask */
1343 	 FALSE),		/* pcrel_offset */
1344 
1345   HOWTO (R_ARM_THM_MOVW_BREL,	/* type */
1346 	 0,			/* rightshift */
1347 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1348 	 16,			/* bitsize */
1349 	 FALSE,			/* pc_relative */
1350 	 0,			/* bitpos */
1351 	 complain_overflow_dont,/* complain_on_overflow */
1352 	 bfd_elf_generic_reloc,	/* special_function */
1353 	 "R_ARM_THM_MOVW_BREL",	/* name */
1354 	 FALSE,			/* partial_inplace */
1355 	 0x040f70ff,		/* src_mask */
1356 	 0x040f70ff,		/* dst_mask */
1357 	 FALSE),		/* pcrel_offset */
1358 
1359   HOWTO (R_ARM_TLS_GOTDESC,	/* type */
1360 	 0,			/* rightshift */
1361 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1362 	 32,			/* bitsize */
1363 	 FALSE,			/* pc_relative */
1364 	 0,			/* bitpos */
1365 	 complain_overflow_bitfield,/* complain_on_overflow */
1366 	 NULL,			/* special_function */
1367 	 "R_ARM_TLS_GOTDESC",	/* name */
1368 	 TRUE,			/* partial_inplace */
1369 	 0xffffffff,		/* src_mask */
1370 	 0xffffffff,		/* dst_mask */
1371 	 FALSE),		/* pcrel_offset */
1372 
1373   HOWTO (R_ARM_TLS_CALL,	/* type */
1374 	 0,			/* rightshift */
1375 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1376 	 24,			/* bitsize */
1377 	 FALSE,			/* pc_relative */
1378 	 0,			/* bitpos */
1379 	 complain_overflow_dont,/* complain_on_overflow */
1380 	 bfd_elf_generic_reloc,	/* special_function */
1381 	 "R_ARM_TLS_CALL",	/* name */
1382 	 FALSE,			/* partial_inplace */
1383 	 0x00ffffff,		/* src_mask */
1384 	 0x00ffffff,		/* dst_mask */
1385 	 FALSE),		/* pcrel_offset */
1386 
1387   HOWTO (R_ARM_TLS_DESCSEQ,	/* type */
1388 	 0,			/* rightshift */
1389 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1390 	 0,			/* bitsize */
1391 	 FALSE,			/* pc_relative */
1392 	 0,			/* bitpos */
1393 	 complain_overflow_bitfield,/* complain_on_overflow */
1394 	 bfd_elf_generic_reloc,	/* special_function */
1395 	 "R_ARM_TLS_DESCSEQ",	/* name */
1396 	 FALSE,			/* partial_inplace */
1397 	 0x00000000,		/* src_mask */
1398 	 0x00000000,		/* dst_mask */
1399 	 FALSE),		/* pcrel_offset */
1400 
1401   HOWTO (R_ARM_THM_TLS_CALL,	/* type */
1402 	 0,			/* rightshift */
1403 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1404 	 24,			/* bitsize */
1405 	 FALSE,			/* pc_relative */
1406 	 0,			/* bitpos */
1407 	 complain_overflow_dont,/* complain_on_overflow */
1408 	 bfd_elf_generic_reloc,	/* special_function */
1409 	 "R_ARM_THM_TLS_CALL",	/* name */
1410 	 FALSE,			/* partial_inplace */
1411 	 0x07ff07ff,		/* src_mask */
1412 	 0x07ff07ff,		/* dst_mask */
1413 	 FALSE),		/* pcrel_offset */
1414 
1415   HOWTO (R_ARM_PLT32_ABS,	/* type */
1416 	 0,			/* rightshift */
1417 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1418 	 32,			/* bitsize */
1419 	 FALSE,			/* pc_relative */
1420 	 0,			/* bitpos */
1421 	 complain_overflow_dont,/* complain_on_overflow */
1422 	 bfd_elf_generic_reloc,	/* special_function */
1423 	 "R_ARM_PLT32_ABS",	/* name */
1424 	 FALSE,			/* partial_inplace */
1425 	 0xffffffff,		/* src_mask */
1426 	 0xffffffff,		/* dst_mask */
1427 	 FALSE),		/* pcrel_offset */
1428 
1429   HOWTO (R_ARM_GOT_ABS,		/* type */
1430 	 0,			/* rightshift */
1431 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1432 	 32,			/* bitsize */
1433 	 FALSE,			/* pc_relative */
1434 	 0,			/* bitpos */
1435 	 complain_overflow_dont,/* complain_on_overflow */
1436 	 bfd_elf_generic_reloc,	/* special_function */
1437 	 "R_ARM_GOT_ABS",	/* name */
1438 	 FALSE,			/* partial_inplace */
1439 	 0xffffffff,		/* src_mask */
1440 	 0xffffffff,		/* dst_mask */
1441 	 FALSE),			/* pcrel_offset */
1442 
1443   HOWTO (R_ARM_GOT_PREL,	/* type */
1444 	 0,			/* rightshift */
1445 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1446 	 32,			/* bitsize */
1447 	 TRUE,			/* pc_relative */
1448 	 0,			/* bitpos */
1449 	 complain_overflow_dont,	/* complain_on_overflow */
1450 	 bfd_elf_generic_reloc,	/* special_function */
1451 	 "R_ARM_GOT_PREL",	/* name */
1452 	 FALSE,			/* partial_inplace */
1453 	 0xffffffff,		/* src_mask */
1454 	 0xffffffff,		/* dst_mask */
1455 	 TRUE),			/* pcrel_offset */
1456 
1457   HOWTO (R_ARM_GOT_BREL12,	/* type */
1458 	 0,			/* rightshift */
1459 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1460 	 12,			/* bitsize */
1461 	 FALSE,			/* pc_relative */
1462 	 0,			/* bitpos */
1463 	 complain_overflow_bitfield,/* complain_on_overflow */
1464 	 bfd_elf_generic_reloc,	/* special_function */
1465 	 "R_ARM_GOT_BREL12",	/* name */
1466 	 FALSE,			/* partial_inplace */
1467 	 0x00000fff,		/* src_mask */
1468 	 0x00000fff,		/* dst_mask */
1469 	 FALSE),		/* pcrel_offset */
1470 
1471   HOWTO (R_ARM_GOTOFF12,	/* type */
1472 	 0,			/* rightshift */
1473 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1474 	 12,			/* bitsize */
1475 	 FALSE,			/* pc_relative */
1476 	 0,			/* bitpos */
1477 	 complain_overflow_bitfield,/* complain_on_overflow */
1478 	 bfd_elf_generic_reloc,	/* special_function */
1479 	 "R_ARM_GOTOFF12",	/* name */
1480 	 FALSE,			/* partial_inplace */
1481 	 0x00000fff,		/* src_mask */
1482 	 0x00000fff,		/* dst_mask */
1483 	 FALSE),		/* pcrel_offset */
1484 
1485   EMPTY_HOWTO (R_ARM_GOTRELAX),  /* reserved for future GOT-load optimizations */
1486 
1487   /* GNU extension to record C++ vtable member usage */
1488   HOWTO (R_ARM_GNU_VTENTRY,     /* type */
1489 	 0,                     /* rightshift */
1490 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1491 	 0,                     /* bitsize */
1492 	 FALSE,                 /* pc_relative */
1493 	 0,                     /* bitpos */
1494 	 complain_overflow_dont, /* complain_on_overflow */
1495 	 _bfd_elf_rel_vtable_reloc_fn,  /* special_function */
1496 	 "R_ARM_GNU_VTENTRY",   /* name */
1497 	 FALSE,                 /* partial_inplace */
1498 	 0,                     /* src_mask */
1499 	 0,                     /* dst_mask */
1500 	 FALSE),                /* pcrel_offset */
1501 
1502   /* GNU extension to record C++ vtable hierarchy */
1503   HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 	 0,                     /* rightshift */
1505 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1506 	 0,                     /* bitsize */
1507 	 FALSE,                 /* pc_relative */
1508 	 0,                     /* bitpos */
1509 	 complain_overflow_dont, /* complain_on_overflow */
1510 	 NULL,                  /* special_function */
1511 	 "R_ARM_GNU_VTINHERIT", /* name */
1512 	 FALSE,                 /* partial_inplace */
1513 	 0,                     /* src_mask */
1514 	 0,                     /* dst_mask */
1515 	 FALSE),                /* pcrel_offset */
1516 
1517   HOWTO (R_ARM_THM_JUMP11,	/* type */
1518 	 1,			/* rightshift */
1519 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1520 	 11,			/* bitsize */
1521 	 TRUE,			/* pc_relative */
1522 	 0,			/* bitpos */
1523 	 complain_overflow_signed,	/* complain_on_overflow */
1524 	 bfd_elf_generic_reloc,	/* special_function */
1525 	 "R_ARM_THM_JUMP11",	/* name */
1526 	 FALSE,			/* partial_inplace */
1527 	 0x000007ff,		/* src_mask */
1528 	 0x000007ff,		/* dst_mask */
1529 	 TRUE),			/* pcrel_offset */
1530 
1531   HOWTO (R_ARM_THM_JUMP8,	/* type */
1532 	 1,			/* rightshift */
1533 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1534 	 8,			/* bitsize */
1535 	 TRUE,			/* pc_relative */
1536 	 0,			/* bitpos */
1537 	 complain_overflow_signed,	/* complain_on_overflow */
1538 	 bfd_elf_generic_reloc,	/* special_function */
1539 	 "R_ARM_THM_JUMP8",	/* name */
1540 	 FALSE,			/* partial_inplace */
1541 	 0x000000ff,		/* src_mask */
1542 	 0x000000ff,		/* dst_mask */
1543 	 TRUE),			/* pcrel_offset */
1544 
1545   /* TLS relocations */
1546   HOWTO (R_ARM_TLS_GD32,	/* type */
1547 	 0,                     /* rightshift */
1548 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1549 	 32,                    /* bitsize */
1550 	 FALSE,                 /* pc_relative */
1551 	 0,                     /* bitpos */
1552 	 complain_overflow_bitfield,/* complain_on_overflow */
1553 	 NULL,			/* special_function */
1554 	 "R_ARM_TLS_GD32",	/* name */
1555 	 TRUE,			/* partial_inplace */
1556 	 0xffffffff,		/* src_mask */
1557 	 0xffffffff,		/* dst_mask */
1558 	 FALSE),                /* pcrel_offset */
1559 
1560   HOWTO (R_ARM_TLS_LDM32,	/* type */
1561 	 0,                     /* rightshift */
1562 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1563 	 32,                    /* bitsize */
1564 	 FALSE,                 /* pc_relative */
1565 	 0,                     /* bitpos */
1566 	 complain_overflow_bitfield,/* complain_on_overflow */
1567 	 bfd_elf_generic_reloc, /* special_function */
1568 	 "R_ARM_TLS_LDM32",	/* name */
1569 	 TRUE,			/* partial_inplace */
1570 	 0xffffffff,		/* src_mask */
1571 	 0xffffffff,		/* dst_mask */
1572 	 FALSE),                /* pcrel_offset */
1573 
1574   HOWTO (R_ARM_TLS_LDO32,	/* type */
1575 	 0,                     /* rightshift */
1576 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1577 	 32,                    /* bitsize */
1578 	 FALSE,                 /* pc_relative */
1579 	 0,                     /* bitpos */
1580 	 complain_overflow_bitfield,/* complain_on_overflow */
1581 	 bfd_elf_generic_reloc, /* special_function */
1582 	 "R_ARM_TLS_LDO32",	/* name */
1583 	 TRUE,			/* partial_inplace */
1584 	 0xffffffff,		/* src_mask */
1585 	 0xffffffff,		/* dst_mask */
1586 	 FALSE),                /* pcrel_offset */
1587 
1588   HOWTO (R_ARM_TLS_IE32,	/* type */
1589 	 0,                     /* rightshift */
1590 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1591 	 32,                    /* bitsize */
1592 	 FALSE,                  /* pc_relative */
1593 	 0,                     /* bitpos */
1594 	 complain_overflow_bitfield,/* complain_on_overflow */
1595 	 NULL,			/* special_function */
1596 	 "R_ARM_TLS_IE32",	/* name */
1597 	 TRUE,			/* partial_inplace */
1598 	 0xffffffff,		/* src_mask */
1599 	 0xffffffff,		/* dst_mask */
1600 	 FALSE),                /* pcrel_offset */
1601 
1602   HOWTO (R_ARM_TLS_LE32,	/* type */
1603 	 0,                     /* rightshift */
1604 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1605 	 32,                    /* bitsize */
1606 	 FALSE,                 /* pc_relative */
1607 	 0,                     /* bitpos */
1608 	 complain_overflow_bitfield,/* complain_on_overflow */
1609 	 NULL, 			/* special_function */
1610 	 "R_ARM_TLS_LE32",	/* name */
1611 	 TRUE,			/* partial_inplace */
1612 	 0xffffffff,		/* src_mask */
1613 	 0xffffffff,		/* dst_mask */
1614 	 FALSE),                /* pcrel_offset */
1615 
1616   HOWTO (R_ARM_TLS_LDO12,	/* type */
1617 	 0,			/* rightshift */
1618 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1619 	 12,			/* bitsize */
1620 	 FALSE,			/* pc_relative */
1621 	 0,			/* bitpos */
1622 	 complain_overflow_bitfield,/* complain_on_overflow */
1623 	 bfd_elf_generic_reloc,	/* special_function */
1624 	 "R_ARM_TLS_LDO12",	/* name */
1625 	 FALSE,			/* partial_inplace */
1626 	 0x00000fff,		/* src_mask */
1627 	 0x00000fff,		/* dst_mask */
1628 	 FALSE),		/* pcrel_offset */
1629 
1630   HOWTO (R_ARM_TLS_LE12,	/* type */
1631 	 0,			/* rightshift */
1632 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1633 	 12,			/* bitsize */
1634 	 FALSE,			/* pc_relative */
1635 	 0,			/* bitpos */
1636 	 complain_overflow_bitfield,/* complain_on_overflow */
1637 	 bfd_elf_generic_reloc,	/* special_function */
1638 	 "R_ARM_TLS_LE12",	/* name */
1639 	 FALSE,			/* partial_inplace */
1640 	 0x00000fff,		/* src_mask */
1641 	 0x00000fff,		/* dst_mask */
1642 	 FALSE),		/* pcrel_offset */
1643 
1644   HOWTO (R_ARM_TLS_IE12GP,	/* type */
1645 	 0,			/* rightshift */
1646 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1647 	 12,			/* bitsize */
1648 	 FALSE,			/* pc_relative */
1649 	 0,			/* bitpos */
1650 	 complain_overflow_bitfield,/* complain_on_overflow */
1651 	 bfd_elf_generic_reloc,	/* special_function */
1652 	 "R_ARM_TLS_IE12GP",	/* name */
1653 	 FALSE,			/* partial_inplace */
1654 	 0x00000fff,		/* src_mask */
1655 	 0x00000fff,		/* dst_mask */
1656 	 FALSE),		/* pcrel_offset */
1657 
1658   /* 112-127 private relocations.  */
1659   EMPTY_HOWTO (112),
1660   EMPTY_HOWTO (113),
1661   EMPTY_HOWTO (114),
1662   EMPTY_HOWTO (115),
1663   EMPTY_HOWTO (116),
1664   EMPTY_HOWTO (117),
1665   EMPTY_HOWTO (118),
1666   EMPTY_HOWTO (119),
1667   EMPTY_HOWTO (120),
1668   EMPTY_HOWTO (121),
1669   EMPTY_HOWTO (122),
1670   EMPTY_HOWTO (123),
1671   EMPTY_HOWTO (124),
1672   EMPTY_HOWTO (125),
1673   EMPTY_HOWTO (126),
1674   EMPTY_HOWTO (127),
1675 
1676   /* R_ARM_ME_TOO, obsolete.  */
1677   EMPTY_HOWTO (128),
1678 
1679   HOWTO (R_ARM_THM_TLS_DESCSEQ,	/* type */
1680 	 0,			/* rightshift */
1681 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1682 	 0,			/* bitsize */
1683 	 FALSE,			/* pc_relative */
1684 	 0,			/* bitpos */
1685 	 complain_overflow_bitfield,/* complain_on_overflow */
1686 	 bfd_elf_generic_reloc,	/* special_function */
1687 	 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 	 FALSE,			/* partial_inplace */
1689 	 0x00000000,		/* src_mask */
1690 	 0x00000000,		/* dst_mask */
1691 	 FALSE),		/* pcrel_offset */
1692   EMPTY_HOWTO (130),
1693   EMPTY_HOWTO (131),
1694   HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
1695 	 0,			/* rightshift.  */
1696 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1697 	 16,			/* bitsize.  */
1698 	 FALSE,			/* pc_relative.  */
1699 	 0,			/* bitpos.  */
1700 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1701 	 bfd_elf_generic_reloc,	/* special_function.  */
1702 	 "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
1703 	 FALSE,			/* partial_inplace.  */
1704 	 0x00000000,		/* src_mask.  */
1705 	 0x00000000,		/* dst_mask.  */
1706 	 FALSE),		/* pcrel_offset.  */
1707   HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
1708 	 0,			/* rightshift.  */
1709 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1710 	 16,			/* bitsize.  */
1711 	 FALSE,			/* pc_relative.  */
1712 	 0,			/* bitpos.  */
1713 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1714 	 bfd_elf_generic_reloc,	/* special_function.  */
1715 	 "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
1716 	 FALSE,			/* partial_inplace.  */
1717 	 0x00000000,		/* src_mask.  */
1718 	 0x00000000,		/* dst_mask.  */
1719 	 FALSE),		/* pcrel_offset.  */
1720   HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
1721 	 0,			/* rightshift.  */
1722 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1723 	 16,			/* bitsize.  */
1724 	 FALSE,			/* pc_relative.  */
1725 	 0,			/* bitpos.  */
1726 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1727 	 bfd_elf_generic_reloc,	/* special_function.  */
1728 	 "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
1729 	 FALSE,			/* partial_inplace.  */
1730 	 0x00000000,		/* src_mask.  */
1731 	 0x00000000,		/* dst_mask.  */
1732 	 FALSE),		/* pcrel_offset.  */
1733   HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
1734 	 0,			/* rightshift.  */
1735 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1736 	 16,			/* bitsize.  */
1737 	 FALSE,			/* pc_relative.  */
1738 	 0,			/* bitpos.  */
1739 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1740 	 bfd_elf_generic_reloc,	/* special_function.  */
1741 	 "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
1742 	 FALSE,			/* partial_inplace.  */
1743 	 0x00000000,		/* src_mask.  */
1744 	 0x00000000,		/* dst_mask.  */
1745 	 FALSE),		/* pcrel_offset.  */
1746 };
1747 
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751   HOWTO (R_ARM_IRELATIVE,	/* type */
1752 	 0,                     /* rightshift */
1753 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1754 	 32,                    /* bitsize */
1755 	 FALSE,                 /* pc_relative */
1756 	 0,                     /* bitpos */
1757 	 complain_overflow_bitfield,/* complain_on_overflow */
1758 	 bfd_elf_generic_reloc, /* special_function */
1759 	 "R_ARM_IRELATIVE",	/* name */
1760 	 TRUE,			/* partial_inplace */
1761 	 0xffffffff,		/* src_mask */
1762 	 0xffffffff,		/* dst_mask */
1763 	 FALSE)			/* pcrel_offset */
1764 };
1765 
1766 /* 249-255 extended, currently unused, relocations:  */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769   HOWTO (R_ARM_RREL32,		/* type */
1770 	 0,			/* rightshift */
1771 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1772 	 0,			/* bitsize */
1773 	 FALSE,			/* pc_relative */
1774 	 0,			/* bitpos */
1775 	 complain_overflow_dont,/* complain_on_overflow */
1776 	 bfd_elf_generic_reloc,	/* special_function */
1777 	 "R_ARM_RREL32",	/* name */
1778 	 FALSE,			/* partial_inplace */
1779 	 0,			/* src_mask */
1780 	 0,			/* dst_mask */
1781 	 FALSE),		/* pcrel_offset */
1782 
1783   HOWTO (R_ARM_RABS32,		/* type */
1784 	 0,			/* rightshift */
1785 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1786 	 0,			/* bitsize */
1787 	 FALSE,			/* pc_relative */
1788 	 0,			/* bitpos */
1789 	 complain_overflow_dont,/* complain_on_overflow */
1790 	 bfd_elf_generic_reloc,	/* special_function */
1791 	 "R_ARM_RABS32",	/* name */
1792 	 FALSE,			/* partial_inplace */
1793 	 0,			/* src_mask */
1794 	 0,			/* dst_mask */
1795 	 FALSE),		/* pcrel_offset */
1796 
1797   HOWTO (R_ARM_RPC24,		/* type */
1798 	 0,			/* rightshift */
1799 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1800 	 0,			/* bitsize */
1801 	 FALSE,			/* pc_relative */
1802 	 0,			/* bitpos */
1803 	 complain_overflow_dont,/* complain_on_overflow */
1804 	 bfd_elf_generic_reloc,	/* special_function */
1805 	 "R_ARM_RPC24",		/* name */
1806 	 FALSE,			/* partial_inplace */
1807 	 0,			/* src_mask */
1808 	 0,			/* dst_mask */
1809 	 FALSE),		/* pcrel_offset */
1810 
1811   HOWTO (R_ARM_RBASE,		/* type */
1812 	 0,			/* rightshift */
1813 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1814 	 0,			/* bitsize */
1815 	 FALSE,			/* pc_relative */
1816 	 0,			/* bitpos */
1817 	 complain_overflow_dont,/* complain_on_overflow */
1818 	 bfd_elf_generic_reloc,	/* special_function */
1819 	 "R_ARM_RBASE",		/* name */
1820 	 FALSE,			/* partial_inplace */
1821 	 0,			/* src_mask */
1822 	 0,			/* dst_mask */
1823 	 FALSE)			/* pcrel_offset */
1824 };
1825 
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829   if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830     return &elf32_arm_howto_table_1[r_type];
1831 
1832   if (r_type == R_ARM_IRELATIVE)
1833     return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834 
1835   if (r_type >= R_ARM_RREL32
1836       && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837     return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838 
1839   return NULL;
1840 }
1841 
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 			 Elf_Internal_Rela * elf_reloc)
1845 {
1846   unsigned int r_type;
1847 
1848   r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849   bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851 
1852 struct elf32_arm_reloc_map
1853   {
1854     bfd_reloc_code_real_type  bfd_reloc_val;
1855     unsigned char             elf_reloc_val;
1856   };
1857 
1858 /* All entries in this list must also be present in elf32_arm_howto_table.  */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860   {
1861     {BFD_RELOC_NONE,                 R_ARM_NONE},
1862     {BFD_RELOC_ARM_PCREL_BRANCH,     R_ARM_PC24},
1863     {BFD_RELOC_ARM_PCREL_CALL,	     R_ARM_CALL},
1864     {BFD_RELOC_ARM_PCREL_JUMP,	     R_ARM_JUMP24},
1865     {BFD_RELOC_ARM_PCREL_BLX,        R_ARM_XPC25},
1866     {BFD_RELOC_THUMB_PCREL_BLX,      R_ARM_THM_XPC22},
1867     {BFD_RELOC_32,                   R_ARM_ABS32},
1868     {BFD_RELOC_32_PCREL,             R_ARM_REL32},
1869     {BFD_RELOC_8,                    R_ARM_ABS8},
1870     {BFD_RELOC_16,                   R_ARM_ABS16},
1871     {BFD_RELOC_ARM_OFFSET_IMM,       R_ARM_ABS12},
1872     {BFD_RELOC_ARM_THUMB_OFFSET,     R_ARM_THM_ABS5},
1873     {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874     {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875     {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876     {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877     {BFD_RELOC_THUMB_PCREL_BRANCH9,  R_ARM_THM_JUMP8},
1878     {BFD_RELOC_THUMB_PCREL_BRANCH7,  R_ARM_THM_JUMP6},
1879     {BFD_RELOC_ARM_GLOB_DAT,         R_ARM_GLOB_DAT},
1880     {BFD_RELOC_ARM_JUMP_SLOT,        R_ARM_JUMP_SLOT},
1881     {BFD_RELOC_ARM_RELATIVE,         R_ARM_RELATIVE},
1882     {BFD_RELOC_ARM_GOTOFF,           R_ARM_GOTOFF32},
1883     {BFD_RELOC_ARM_GOTPC,            R_ARM_GOTPC},
1884     {BFD_RELOC_ARM_GOT_PREL,         R_ARM_GOT_PREL},
1885     {BFD_RELOC_ARM_GOT32,            R_ARM_GOT32},
1886     {BFD_RELOC_ARM_PLT32,            R_ARM_PLT32},
1887     {BFD_RELOC_ARM_TARGET1,	     R_ARM_TARGET1},
1888     {BFD_RELOC_ARM_ROSEGREL32,	     R_ARM_ROSEGREL32},
1889     {BFD_RELOC_ARM_SBREL32,	     R_ARM_SBREL32},
1890     {BFD_RELOC_ARM_PREL31,	     R_ARM_PREL31},
1891     {BFD_RELOC_ARM_TARGET2,	     R_ARM_TARGET2},
1892     {BFD_RELOC_ARM_PLT32,            R_ARM_PLT32},
1893     {BFD_RELOC_ARM_TLS_GOTDESC,      R_ARM_TLS_GOTDESC},
1894     {BFD_RELOC_ARM_TLS_CALL,         R_ARM_TLS_CALL},
1895     {BFD_RELOC_ARM_THM_TLS_CALL,     R_ARM_THM_TLS_CALL},
1896     {BFD_RELOC_ARM_TLS_DESCSEQ,      R_ARM_TLS_DESCSEQ},
1897     {BFD_RELOC_ARM_THM_TLS_DESCSEQ,  R_ARM_THM_TLS_DESCSEQ},
1898     {BFD_RELOC_ARM_TLS_DESC,         R_ARM_TLS_DESC},
1899     {BFD_RELOC_ARM_TLS_GD32,	     R_ARM_TLS_GD32},
1900     {BFD_RELOC_ARM_TLS_LDO32,	     R_ARM_TLS_LDO32},
1901     {BFD_RELOC_ARM_TLS_LDM32,	     R_ARM_TLS_LDM32},
1902     {BFD_RELOC_ARM_TLS_DTPMOD32,     R_ARM_TLS_DTPMOD32},
1903     {BFD_RELOC_ARM_TLS_DTPOFF32,     R_ARM_TLS_DTPOFF32},
1904     {BFD_RELOC_ARM_TLS_TPOFF32,      R_ARM_TLS_TPOFF32},
1905     {BFD_RELOC_ARM_TLS_IE32,         R_ARM_TLS_IE32},
1906     {BFD_RELOC_ARM_TLS_LE32,         R_ARM_TLS_LE32},
1907     {BFD_RELOC_ARM_IRELATIVE,        R_ARM_IRELATIVE},
1908     {BFD_RELOC_VTABLE_INHERIT,	     R_ARM_GNU_VTINHERIT},
1909     {BFD_RELOC_VTABLE_ENTRY,	     R_ARM_GNU_VTENTRY},
1910     {BFD_RELOC_ARM_MOVW,	     R_ARM_MOVW_ABS_NC},
1911     {BFD_RELOC_ARM_MOVT,	     R_ARM_MOVT_ABS},
1912     {BFD_RELOC_ARM_MOVW_PCREL,	     R_ARM_MOVW_PREL_NC},
1913     {BFD_RELOC_ARM_MOVT_PCREL,	     R_ARM_MOVT_PREL},
1914     {BFD_RELOC_ARM_THUMB_MOVW,	     R_ARM_THM_MOVW_ABS_NC},
1915     {BFD_RELOC_ARM_THUMB_MOVT,	     R_ARM_THM_MOVT_ABS},
1916     {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917     {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918     {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919     {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920     {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921     {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922     {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923     {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924     {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925     {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926     {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927     {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928     {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929     {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930     {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931     {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932     {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933     {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934     {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935     {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936     {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937     {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938     {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939     {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940     {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941     {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942     {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943     {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944     {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945     {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946     {BFD_RELOC_ARM_V4BX,	     R_ARM_V4BX},
1947     {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948     {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949     {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950     {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951   };
1952 
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 			     bfd_reloc_code_real_type code)
1956 {
1957   unsigned int i;
1958 
1959   for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960     if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961       return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962 
1963   return NULL;
1964 }
1965 
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 			     const char *r_name)
1969 {
1970   unsigned int i;
1971 
1972   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973     if (elf32_arm_howto_table_1[i].name != NULL
1974 	&& strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975       return &elf32_arm_howto_table_1[i];
1976 
1977   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978     if (elf32_arm_howto_table_2[i].name != NULL
1979 	&& strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980       return &elf32_arm_howto_table_2[i];
1981 
1982   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983     if (elf32_arm_howto_table_3[i].name != NULL
1984 	&& strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985       return &elf32_arm_howto_table_3[i];
1986 
1987   return NULL;
1988 }
1989 
1990 /* Support for core dump NOTE sections.  */
1991 
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995   int offset;
1996   size_t size;
1997 
1998   switch (note->descsz)
1999     {
2000       default:
2001 	return FALSE;
2002 
2003       case 148:		/* Linux/ARM 32-bit.  */
2004 	/* pr_cursig */
2005 	elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006 
2007 	/* pr_pid */
2008 	elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009 
2010 	/* pr_reg */
2011 	offset = 72;
2012 	size = 72;
2013 
2014 	break;
2015     }
2016 
2017   /* Make a ".reg/999" section.  */
2018   return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 					  size, note->descpos + offset);
2020 }
2021 
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025   switch (note->descsz)
2026     {
2027       default:
2028 	return FALSE;
2029 
2030       case 124:		/* Linux/ARM elf_prpsinfo.  */
2031 	elf_tdata (abfd)->core->pid
2032 	 = bfd_get_32 (abfd, note->descdata + 12);
2033 	elf_tdata (abfd)->core->program
2034 	 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 	elf_tdata (abfd)->core->command
2036 	 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037     }
2038 
2039   /* Note that for some reason, a spurious space is tacked
2040      onto the end of the args in some (at least one anyway)
2041      implementations, so strip it off if it exists.  */
2042   {
2043     char *command = elf_tdata (abfd)->core->command;
2044     int n = strlen (command);
2045 
2046     if (0 < n && command[n - 1] == ' ')
2047       command[n - 1] = '\0';
2048   }
2049 
2050   return TRUE;
2051 }
2052 
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 				int note_type, ...)
2056 {
2057   switch (note_type)
2058     {
2059     default:
2060       return NULL;
2061 
2062     case NT_PRPSINFO:
2063       {
2064 	char data[124];
2065 	va_list ap;
2066 
2067 	va_start (ap, note_type);
2068 	memset (data, 0, sizeof (data));
2069 	strncpy (data + 28, va_arg (ap, const char *), 16);
2070 	strncpy (data + 44, va_arg (ap, const char *), 80);
2071 	va_end (ap);
2072 
2073 	return elfcore_write_note (abfd, buf, bufsiz,
2074 				   "CORE", note_type, data, sizeof (data));
2075       }
2076 
2077     case NT_PRSTATUS:
2078       {
2079 	char data[148];
2080 	va_list ap;
2081 	long pid;
2082 	int cursig;
2083 	const void *greg;
2084 
2085 	va_start (ap, note_type);
2086 	memset (data, 0, sizeof (data));
2087 	pid = va_arg (ap, long);
2088 	bfd_put_32 (abfd, pid, data + 24);
2089 	cursig = va_arg (ap, int);
2090 	bfd_put_16 (abfd, cursig, data + 12);
2091 	greg = va_arg (ap, const void *);
2092 	memcpy (data + 72, greg, 72);
2093 	va_end (ap);
2094 
2095 	return elfcore_write_note (abfd, buf, bufsiz,
2096 				   "CORE", note_type, data, sizeof (data));
2097       }
2098     }
2099 }
2100 
2101 #define TARGET_LITTLE_SYM               arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME              "elf32-littlearm"
2103 #define TARGET_BIG_SYM                  arm_elf32_be_vec
2104 #define TARGET_BIG_NAME                 "elf32-bigarm"
2105 
2106 #define elf_backend_grok_prstatus	elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo		elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note	elf32_arm_nabi_write_core_note
2109 
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112 
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114    interworkable.  */
2115 #define INTERWORK_FLAG(abfd)  \
2116   (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117   || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118   || ((abfd)->flags & BFD_LINKER_CREATED))
2119 
2120 /* The linker script knows the section names for placement.
2121    The entry_names are used to do simple name mangling on the stubs.
2122    Given a function name, and its type, the stub can be found. The
2123    name can be changed. The only requirement is the %s be present.  */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME   "__%s_from_thumb"
2126 
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME   "__%s_from_arm"
2129 
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
2132 
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
2135 
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
2138 
2139 #define STUB_ENTRY_NAME   "__%s_veneer"
2140 
2141 /* The name of the dynamic interpreter.  This is put in the .interp
2142    section.  */
2143 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
2144 
2145 static const unsigned long tls_trampoline [] =
2146 {
2147   0xe08e0000,		/* add r0, lr, r0 */
2148   0xe5901004,		/* ldr r1, [r0,#4] */
2149   0xe12fff11,		/* bx  r1 */
2150 };
2151 
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154   0xe52d2004, /*	push    {r2}			*/
2155   0xe59f200c, /*      ldr     r2, [pc, #3f - . - 8]	*/
2156   0xe59f100c, /*      ldr     r1, [pc, #4f - . - 8]	*/
2157   0xe79f2002, /* 1:   ldr     r2, [pc, r2]		*/
2158   0xe081100f, /* 2:   add     r1, pc			*/
2159   0xe12fff12, /*      bx      r2			*/
2160   0x00000014, /* 3:   .word  _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 				+ dl_tlsdesc_lazy_resolver(GOT)   */
2162   0x00000018, /* 4:   .word  _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164 
2165 #ifdef FOUR_WORD_PLT
2166 
2167 /* The first entry in a procedure linkage table looks like
2168    this.  It is set up so that any shared library function that is
2169    called before the relocation has been set up calls the dynamic
2170    linker first.  */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173   0xe52de004,		/* str   lr, [sp, #-4]! */
2174   0xe59fe010,		/* ldr   lr, [pc, #16]  */
2175   0xe08fe00e,		/* add   lr, pc, lr     */
2176   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2177 };
2178 
2179 /* Subsequent entries in a procedure linkage table look like
2180    this.  */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183   0xe28fc600,		/* add   ip, pc, #NN	*/
2184   0xe28cca00,		/* add	 ip, ip, #NN	*/
2185   0xe5bcf000,		/* ldr	 pc, [ip, #NN]! */
2186   0x00000000,		/* unused		*/
2187 };
2188 
2189 #else /* not FOUR_WORD_PLT */
2190 
2191 /* The first entry in a procedure linkage table looks like
2192    this.  It is set up so that any shared library function that is
2193    called before the relocation has been set up calls the dynamic
2194    linker first.  */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197   0xe52de004,		/* str   lr, [sp, #-4]! */
2198   0xe59fe004,		/* ldr   lr, [pc, #4]   */
2199   0xe08fe00e,		/* add   lr, pc, lr     */
2200   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2201   0x00000000,		/* &GOT[0] - .          */
2202 };
2203 
2204 /* By default subsequent entries in a procedure linkage table look like
2205    this. Offsets that don't fit into 28 bits will cause link error.  */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208   0xe28fc600,		/* add   ip, pc, #0xNN00000 */
2209   0xe28cca00,		/* add	 ip, ip, #0xNN000   */
2210   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!  */
2211 };
2212 
2213 /* When explicitly asked, we'll use this "long" entry format
2214    which can cope with arbitrary displacements.  */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217   0xe28fc200,           /* add   ip, pc, #0xN0000000 */
2218   0xe28cc600,		/* add   ip, ip, #0xNN00000  */
2219   0xe28cca00,		/* add	 ip, ip, #0xNN000    */
2220   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!   */
2221 };
2222 
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224 
2225 #endif /* not FOUR_WORD_PLT */
2226 
2227 /* The first entry in a procedure linkage table looks like this.
2228    It is set up so that any shared library function that is called before the
2229    relocation has been set up calls the dynamic linker first.  */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233      an instruction maybe encoded to one or two array elements.  */
2234   0xf8dfb500,		/* push    {lr}          */
2235   0x44fee008,		/* ldr.w   lr, [pc, #8]  */
2236 			/* add     lr, pc        */
2237   0xff08f85e,		/* ldr.w   pc, [lr, #8]! */
2238   0x00000000,		/* &GOT[0] - .           */
2239 };
2240 
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242    look like this.  */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246      an instruction maybe encoded to one or two array elements.  */
2247   0x0c00f240,		/* movw    ip, #0xNNNN    */
2248   0x0c00f2c0,		/* movt    ip, #0xNNNN    */
2249   0xf8dc44fc,           /* add     ip, pc         */
2250   0xbf00f000            /* ldr.w   pc, [ip]       */
2251 			/* nop                    */
2252 };
2253 
2254 /* The format of the first entry in the procedure linkage table
2255    for a VxWorks executable.  */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258   0xe52dc008,	        /* str    ip,[sp,#-8]!			*/
2259   0xe59fc000,   	/* ldr    ip,[pc]			*/
2260   0xe59cf008,   	/* ldr    pc,[ip,#8]			*/
2261   0x00000000,   	/* .long  _GLOBAL_OFFSET_TABLE_		*/
2262 };
2263 
2264 /* The format of subsequent entries in a VxWorks executable.  */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267   0xe59fc000,         /* ldr    ip,[pc]			*/
2268   0xe59cf000,         /* ldr    pc,[ip]			*/
2269   0x00000000,         /* .long  @got				*/
2270   0xe59fc000,         /* ldr    ip,[pc]			*/
2271   0xea000000,         /* b      _PLT				*/
2272   0x00000000,         /* .long  @pltindex*sizeof(Elf32_Rela)	*/
2273 };
2274 
2275 /* The format of entries in a VxWorks shared library.  */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278   0xe59fc000,         /* ldr    ip,[pc]			*/
2279   0xe79cf009,         /* ldr    pc,[ip,r9]			*/
2280   0x00000000,         /* .long  @got				*/
2281   0xe59fc000,         /* ldr    ip,[pc]			*/
2282   0xe599f008,         /* ldr    pc,[r9,#8]			*/
2283   0x00000000,         /* .long  @pltindex*sizeof(Elf32_Rela)	*/
2284 };
2285 
2286 /* An initial stub used if the PLT entry is referenced from Thumb code.  */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290   0x4778,		/* bx pc */
2291   0x46c0		/* nop   */
2292 };
2293 
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295    address spaces.  */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298   0xe51ff004,         /* ldr   pc, [pc, #-4] */
2299   0x00000000,         /* dcd   R_ARM_GLOB_DAT(X) */
2300 };
2301 
2302 /* The first entry in a procedure linkage table looks like
2303    this.  It is set up so that any shared library function that is
2304    called before the relocation has been set up calls the dynamic
2305    linker first.  */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308   /* First bundle: */
2309   0xe300c000,		/* movw	ip, #:lower16:&GOT[2]-.+8	*/
2310   0xe340c000,		/* movt	ip, #:upper16:&GOT[2]-.+8	*/
2311   0xe08cc00f,		/* add	ip, ip, pc			*/
2312   0xe52dc008,		/* str	ip, [sp, #-8]!			*/
2313   /* Second bundle: */
2314   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2315   0xe59cc000,		/* ldr	ip, [ip]			*/
2316   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2317   0xe12fff1c,		/* bx	ip				*/
2318   /* Third bundle: */
2319   0xe320f000,		/* nop					*/
2320   0xe320f000,		/* nop					*/
2321   0xe320f000,		/* nop					*/
2322   /* .Lplt_tail: */
2323   0xe50dc004,		/* str	ip, [sp, #-4]			*/
2324   /* Fourth bundle: */
2325   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2326   0xe59cc000,		/* ldr	ip, [ip]			*/
2327   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2328   0xe12fff1c,		/* bx	ip				*/
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET	(11 * 4)
2331 
2332 /* Subsequent entries in a procedure linkage table look like this.  */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335   0xe300c000,		/* movw	ip, #:lower16:&GOT[n]-.+8	*/
2336   0xe340c000,		/* movt	ip, #:upper16:&GOT[n]-.+8	*/
2337   0xe08cc00f,		/* add	ip, ip, pc			*/
2338   0xea000000,		/* b	.Lplt_tail			*/
2339 };
2340 
2341 #define ARM_MAX_FWD_BRANCH_OFFSET  ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET  ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET  ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349 
2350 enum stub_insn_type
2351 {
2352   THUMB16_TYPE = 1,
2353   THUMB32_TYPE,
2354   ARM_TYPE,
2355   DATA_TYPE
2356 };
2357 
2358 #define THUMB16_INSN(X)		{(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack.  A Thumb conditional branch, in which the proper condition
2360    is inserted in arm_build_one_stub().  */
2361 #define THUMB16_BCOND_INSN(X)	{(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X)		{(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_MOVT(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2364 #define THUMB32_MOVW(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2365 #define THUMB32_B_INSN(X, Z)	{(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2366 #define ARM_INSN(X)		{(X), ARM_TYPE, R_ARM_NONE, 0}
2367 #define ARM_REL_INSN(X, Z)	{(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2368 #define DATA_WORD(X,Y,Z)	{(X), DATA_TYPE, (Y), (Z)}
2369 
2370 typedef struct
2371 {
2372   bfd_vma              data;
2373   enum stub_insn_type  type;
2374   unsigned int         r_type;
2375   int                  reloc_addend;
2376 }  insn_sequence;
2377 
2378 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2379    to reach the stub if necessary.  */
2380 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2381 {
2382   ARM_INSN (0xe51ff004),            /* ldr   pc, [pc, #-4] */
2383   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2384 };
2385 
2386 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2387    available.  */
2388 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2389 {
2390   ARM_INSN (0xe59fc000),            /* ldr   ip, [pc, #0] */
2391   ARM_INSN (0xe12fff1c),            /* bx    ip */
2392   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2393 };
2394 
2395 /* Thumb -> Thumb long branch stub. Used on M-profile architectures.  */
2396 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2397 {
2398   THUMB16_INSN (0xb401),             /* push {r0} */
2399   THUMB16_INSN (0x4802),             /* ldr  r0, [pc, #8] */
2400   THUMB16_INSN (0x4684),             /* mov  ip, r0 */
2401   THUMB16_INSN (0xbc01),             /* pop  {r0} */
2402   THUMB16_INSN (0x4760),             /* bx   ip */
2403   THUMB16_INSN (0xbf00),             /* nop */
2404   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2405 };
2406 
2407 /* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
2408 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2409 {
2410   THUMB32_INSN (0xf85ff000),         /* ldr.w  pc, [pc, #-0] */
2411   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
2412 };
2413 
2414 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2415    M-profile architectures.  */
2416 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2417 {
2418   THUMB32_MOVW (0xf2400c00),	     /* mov.w ip, R_ARM_MOVW_ABS_NC */
2419   THUMB32_MOVT (0xf2c00c00),	     /* movt  ip, R_ARM_MOVT_ABS << 16 */
2420   THUMB16_INSN (0x4760),             /* bx   ip */
2421 };
2422 
2423 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2424    allowed.  */
2425 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2426 {
2427   THUMB16_INSN (0x4778),             /* bx   pc */
2428   THUMB16_INSN (0x46c0),             /* nop */
2429   ARM_INSN (0xe59fc000),             /* ldr  ip, [pc, #0] */
2430   ARM_INSN (0xe12fff1c),             /* bx   ip */
2431   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2432 };
2433 
2434 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2435    available.  */
2436 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2437 {
2438   THUMB16_INSN (0x4778),             /* bx   pc */
2439   THUMB16_INSN (0x46c0),             /* nop   */
2440   ARM_INSN (0xe51ff004),             /* ldr   pc, [pc, #-4] */
2441   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd   R_ARM_ABS32(X) */
2442 };
2443 
2444 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2445    one, when the destination is close enough.  */
2446 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2447 {
2448   THUMB16_INSN (0x4778),             /* bx   pc */
2449   THUMB16_INSN (0x46c0),             /* nop   */
2450   ARM_REL_INSN (0xea000000, -8),     /* b    (X-8) */
2451 };
2452 
2453 /* ARM/Thumb -> ARM long branch stub, PIC.  On V5T and above, use
2454    blx to reach the stub if necessary.  */
2455 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2456 {
2457   ARM_INSN (0xe59fc000),             /* ldr   ip, [pc] */
2458   ARM_INSN (0xe08ff00c),             /* add   pc, pc, ip */
2459   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2460 };
2461 
2462 /* ARM/Thumb -> Thumb long branch stub, PIC.  On V5T and above, use
2463    blx to reach the stub if necessary.  We can not add into pc;
2464    it is not guaranteed to mode switch (different in ARMv6 and
2465    ARMv7).  */
2466 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2467 {
2468   ARM_INSN (0xe59fc004),             /* ldr   ip, [pc, #4] */
2469   ARM_INSN (0xe08fc00c),             /* add   ip, pc, ip */
2470   ARM_INSN (0xe12fff1c),             /* bx    ip */
2471   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2472 };
2473 
2474 /* V4T ARM -> ARM long branch stub, PIC.  */
2475 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2476 {
2477   ARM_INSN (0xe59fc004),             /* ldr   ip, [pc, #4] */
2478   ARM_INSN (0xe08fc00c),             /* add   ip, pc, ip */
2479   ARM_INSN (0xe12fff1c),             /* bx    ip */
2480   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2481 };
2482 
2483 /* V4T Thumb -> ARM long branch stub, PIC.  */
2484 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2485 {
2486   THUMB16_INSN (0x4778),             /* bx   pc */
2487   THUMB16_INSN (0x46c0),             /* nop  */
2488   ARM_INSN (0xe59fc000),             /* ldr  ip, [pc, #0] */
2489   ARM_INSN (0xe08cf00f),             /* add  pc, ip, pc */
2490   DATA_WORD (0, R_ARM_REL32, -4),     /* dcd  R_ARM_REL32(X) */
2491 };
2492 
2493 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2494    architectures.  */
2495 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2496 {
2497   THUMB16_INSN (0xb401),             /* push {r0} */
2498   THUMB16_INSN (0x4802),             /* ldr  r0, [pc, #8] */
2499   THUMB16_INSN (0x46fc),             /* mov  ip, pc */
2500   THUMB16_INSN (0x4484),             /* add  ip, r0 */
2501   THUMB16_INSN (0xbc01),             /* pop  {r0} */
2502   THUMB16_INSN (0x4760),             /* bx   ip */
2503   DATA_WORD (0, R_ARM_REL32, 4),     /* dcd  R_ARM_REL32(X) */
2504 };
2505 
2506 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2507    allowed.  */
2508 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2509 {
2510   THUMB16_INSN (0x4778),             /* bx   pc */
2511   THUMB16_INSN (0x46c0),             /* nop */
2512   ARM_INSN (0xe59fc004),             /* ldr  ip, [pc, #4] */
2513   ARM_INSN (0xe08fc00c),             /* add   ip, pc, ip */
2514   ARM_INSN (0xe12fff1c),             /* bx   ip */
2515   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd  R_ARM_REL32(X) */
2516 };
2517 
2518 /* Thumb2/ARM -> TLS trampoline.  Lowest common denominator, which is a
2519    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2520 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2521 {
2522   ARM_INSN (0xe59f1000),             /* ldr   r1, [pc] */
2523   ARM_INSN (0xe08ff001),             /* add   pc, pc, r1 */
2524   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2525 };
2526 
2527 /* V4T Thumb -> TLS trampoline.  lowest common denominator, which is a
2528    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2529 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2530 {
2531   THUMB16_INSN (0x4778),             /* bx   pc */
2532   THUMB16_INSN (0x46c0),             /* nop */
2533   ARM_INSN (0xe59f1000),             /* ldr  r1, [pc, #0] */
2534   ARM_INSN (0xe081f00f),             /* add  pc, r1, pc */
2535   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd  R_ARM_REL32(X) */
2536 };
2537 
2538 /* NaCl ARM -> ARM long branch stub.  */
2539 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2540 {
2541   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2542   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2543   ARM_INSN (0xe12fff1c),                /* bx	ip */
2544   ARM_INSN (0xe320f000),                /* nop */
2545   ARM_INSN (0xe125be70),                /* bkpt	0x5be0 */
2546   DATA_WORD (0, R_ARM_ABS32, 0),        /* dcd	R_ARM_ABS32(X) */
2547   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2548   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2549 };
2550 
2551 /* NaCl ARM -> ARM long branch stub, PIC.  */
2552 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2553 {
2554   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2555   ARM_INSN (0xe08cc00f),                /* add	ip, ip, pc */
2556   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2557   ARM_INSN (0xe12fff1c),                /* bx	ip */
2558   ARM_INSN (0xe125be70),                /* bkpt	0x5be0 */
2559   DATA_WORD (0, R_ARM_REL32, 8),        /* dcd	R_ARM_REL32(X+8) */
2560   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2561   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2562 };
2563 
2564 
2565 /* Cortex-A8 erratum-workaround stubs.  */
2566 
2567 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2568    can't use a conditional branch to reach this stub).  */
2569 
2570 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2571 {
2572   THUMB16_BCOND_INSN (0xd001),         /* b<cond>.n true.  */
2573   THUMB32_B_INSN (0xf000b800, -4),     /* b.w insn_after_original_branch.  */
2574   THUMB32_B_INSN (0xf000b800, -4)      /* true: b.w original_branch_dest.  */
2575 };
2576 
2577 /* Stub used for b.w and bl.w instructions.  */
2578 
2579 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2580 {
2581   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2582 };
2583 
2584 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2585 {
2586   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2587 };
2588 
2589 /* Stub used for Thumb-2 blx.w instructions.  We modified the original blx.w
2590    instruction (which switches to ARM mode) to point to this stub.  Jump to the
2591    real destination using an ARM-mode branch.  */
2592 
2593 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2594 {
2595   ARM_REL_INSN (0xea000000, -8)	/* b original_branch_dest.  */
2596 };
2597 
2598 /* For each section group there can be a specially created linker section
2599    to hold the stubs for that group.  The name of the stub section is based
2600    upon the name of another section within that group with the suffix below
2601    applied.
2602 
2603    PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2604    create what appeared to be a linker stub section when it actually
2605    contained user code/data.  For example, consider this fragment:
2606 
2607      const char * stubborn_problems[] = { "np" };
2608 
2609    If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2610    section called:
2611 
2612      .data.rel.local.stubborn_problems
2613 
2614    This then causes problems in arm32_arm_build_stubs() as it triggers:
2615 
2616       // Ignore non-stub sections.
2617       if (!strstr (stub_sec->name, STUB_SUFFIX))
2618 	continue;
2619 
2620    And so the section would be ignored instead of being processed.  Hence
2621    the change in definition of STUB_SUFFIX to a name that cannot be a valid
2622    C identifier.  */
2623 #define STUB_SUFFIX ".__stub"
2624 
2625 /* One entry per long/short branch stub defined above.  */
2626 #define DEF_STUBS \
2627   DEF_STUB(long_branch_any_any)	\
2628   DEF_STUB(long_branch_v4t_arm_thumb) \
2629   DEF_STUB(long_branch_thumb_only) \
2630   DEF_STUB(long_branch_v4t_thumb_thumb)	\
2631   DEF_STUB(long_branch_v4t_thumb_arm) \
2632   DEF_STUB(short_branch_v4t_thumb_arm) \
2633   DEF_STUB(long_branch_any_arm_pic) \
2634   DEF_STUB(long_branch_any_thumb_pic) \
2635   DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2636   DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2637   DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2638   DEF_STUB(long_branch_thumb_only_pic) \
2639   DEF_STUB(long_branch_any_tls_pic) \
2640   DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2641   DEF_STUB(long_branch_arm_nacl) \
2642   DEF_STUB(long_branch_arm_nacl_pic) \
2643   DEF_STUB(a8_veneer_b_cond) \
2644   DEF_STUB(a8_veneer_b) \
2645   DEF_STUB(a8_veneer_bl) \
2646   DEF_STUB(a8_veneer_blx) \
2647   DEF_STUB(long_branch_thumb2_only) \
2648   DEF_STUB(long_branch_thumb2_only_pure)
2649 
2650 #define DEF_STUB(x) arm_stub_##x,
2651 enum elf32_arm_stub_type
2652 {
2653   arm_stub_none,
2654   DEF_STUBS
2655   max_stub_type
2656 };
2657 #undef DEF_STUB
2658 
2659 /* Note the first a8_veneer type.  */
2660 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2661 
2662 typedef struct
2663 {
2664   const insn_sequence* template_sequence;
2665   int template_size;
2666 } stub_def;
2667 
2668 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2669 static const stub_def stub_definitions[] =
2670 {
2671   {NULL, 0},
2672   DEF_STUBS
2673 };
2674 
2675 struct elf32_arm_stub_hash_entry
2676 {
2677   /* Base hash table entry structure.  */
2678   struct bfd_hash_entry root;
2679 
2680   /* The stub section.  */
2681   asection *stub_sec;
2682 
2683   /* Offset within stub_sec of the beginning of this stub.  */
2684   bfd_vma stub_offset;
2685 
2686   /* Given the symbol's value and its section we can determine its final
2687      value when building the stubs (so the stub knows where to jump).  */
2688   bfd_vma target_value;
2689   asection *target_section;
2690 
2691   /* Same as above but for the source of the branch to the stub.  Used for
2692      Cortex-A8 erratum workaround to patch it to branch to the stub.  As
2693      such, source section does not need to be recorded since Cortex-A8 erratum
2694      workaround stubs are only generated when both source and target are in the
2695      same section.  */
2696   bfd_vma source_value;
2697 
2698   /* The instruction which caused this stub to be generated (only valid for
2699      Cortex-A8 erratum workaround stubs at present).  */
2700   unsigned long orig_insn;
2701 
2702   /* The stub type.  */
2703   enum elf32_arm_stub_type stub_type;
2704   /* Its encoding size in bytes.  */
2705   int stub_size;
2706   /* Its template.  */
2707   const insn_sequence *stub_template;
2708   /* The size of the template (number of entries).  */
2709   int stub_template_size;
2710 
2711   /* The symbol table entry, if any, that this was derived from.  */
2712   struct elf32_arm_link_hash_entry *h;
2713 
2714   /* Type of branch.  */
2715   enum arm_st_branch_type branch_type;
2716 
2717   /* Where this stub is being called from, or, in the case of combined
2718      stub sections, the first input section in the group.  */
2719   asection *id_sec;
2720 
2721   /* The name for the local symbol at the start of this stub.  The
2722      stub name in the hash table has to be unique; this does not, so
2723      it can be friendlier.  */
2724   char *output_name;
2725 };
2726 
2727 /* Used to build a map of a section.  This is required for mixed-endian
2728    code/data.  */
2729 
2730 typedef struct elf32_elf_section_map
2731 {
2732   bfd_vma vma;
2733   char type;
2734 }
2735 elf32_arm_section_map;
2736 
2737 /* Information about a VFP11 erratum veneer, or a branch to such a veneer.  */
2738 
2739 typedef enum
2740 {
2741   VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2742   VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2743   VFP11_ERRATUM_ARM_VENEER,
2744   VFP11_ERRATUM_THUMB_VENEER
2745 }
2746 elf32_vfp11_erratum_type;
2747 
2748 typedef struct elf32_vfp11_erratum_list
2749 {
2750   struct elf32_vfp11_erratum_list *next;
2751   bfd_vma vma;
2752   union
2753   {
2754     struct
2755     {
2756       struct elf32_vfp11_erratum_list *veneer;
2757       unsigned int vfp_insn;
2758     } b;
2759     struct
2760     {
2761       struct elf32_vfp11_erratum_list *branch;
2762       unsigned int id;
2763     } v;
2764   } u;
2765   elf32_vfp11_erratum_type type;
2766 }
2767 elf32_vfp11_erratum_list;
2768 
2769 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2770    veneer.  */
2771 typedef enum
2772 {
2773   STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2774   STM32L4XX_ERRATUM_VENEER
2775 }
2776 elf32_stm32l4xx_erratum_type;
2777 
2778 typedef struct elf32_stm32l4xx_erratum_list
2779 {
2780   struct elf32_stm32l4xx_erratum_list *next;
2781   bfd_vma vma;
2782   union
2783   {
2784     struct
2785     {
2786       struct elf32_stm32l4xx_erratum_list *veneer;
2787       unsigned int insn;
2788     } b;
2789     struct
2790     {
2791       struct elf32_stm32l4xx_erratum_list *branch;
2792       unsigned int id;
2793     } v;
2794   } u;
2795   elf32_stm32l4xx_erratum_type type;
2796 }
2797 elf32_stm32l4xx_erratum_list;
2798 
2799 typedef enum
2800 {
2801   DELETE_EXIDX_ENTRY,
2802   INSERT_EXIDX_CANTUNWIND_AT_END
2803 }
2804 arm_unwind_edit_type;
2805 
2806 /* A (sorted) list of edits to apply to an unwind table.  */
2807 typedef struct arm_unwind_table_edit
2808 {
2809   arm_unwind_edit_type type;
2810   /* Note: we sometimes want to insert an unwind entry corresponding to a
2811      section different from the one we're currently writing out, so record the
2812      (text) section this edit relates to here.  */
2813   asection *linked_section;
2814   unsigned int index;
2815   struct arm_unwind_table_edit *next;
2816 }
2817 arm_unwind_table_edit;
2818 
2819 typedef struct _arm_elf_section_data
2820 {
2821   /* Information about mapping symbols.  */
2822   struct bfd_elf_section_data elf;
2823   unsigned int mapcount;
2824   unsigned int mapsize;
2825   elf32_arm_section_map *map;
2826   /* Information about CPU errata.  */
2827   unsigned int erratumcount;
2828   elf32_vfp11_erratum_list *erratumlist;
2829   unsigned int stm32l4xx_erratumcount;
2830   elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2831   unsigned int additional_reloc_count;
2832   /* Information about unwind tables.  */
2833   union
2834   {
2835     /* Unwind info attached to a text section.  */
2836     struct
2837     {
2838       asection *arm_exidx_sec;
2839     } text;
2840 
2841     /* Unwind info attached to an .ARM.exidx section.  */
2842     struct
2843     {
2844       arm_unwind_table_edit *unwind_edit_list;
2845       arm_unwind_table_edit *unwind_edit_tail;
2846     } exidx;
2847   } u;
2848 }
2849 _arm_elf_section_data;
2850 
2851 #define elf32_arm_section_data(sec) \
2852   ((_arm_elf_section_data *) elf_section_data (sec))
2853 
2854 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2855    These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2856    so may be created multiple times: we use an array of these entries whilst
2857    relaxing which we can refresh easily, then create stubs for each potentially
2858    erratum-triggering instruction once we've settled on a solution.  */
2859 
2860 struct a8_erratum_fix
2861 {
2862   bfd *input_bfd;
2863   asection *section;
2864   bfd_vma offset;
2865   bfd_vma target_offset;
2866   unsigned long orig_insn;
2867   char *stub_name;
2868   enum elf32_arm_stub_type stub_type;
2869   enum arm_st_branch_type branch_type;
2870 };
2871 
2872 /* A table of relocs applied to branches which might trigger Cortex-A8
2873    erratum.  */
2874 
2875 struct a8_erratum_reloc
2876 {
2877   bfd_vma from;
2878   bfd_vma destination;
2879   struct elf32_arm_link_hash_entry *hash;
2880   const char *sym_name;
2881   unsigned int r_type;
2882   enum arm_st_branch_type branch_type;
2883   bfd_boolean non_a8_stub;
2884 };
2885 
2886 /* The size of the thread control block.  */
2887 #define TCB_SIZE	8
2888 
2889 /* ARM-specific information about a PLT entry, over and above the usual
2890    gotplt_union.  */
2891 struct arm_plt_info
2892 {
2893   /* We reference count Thumb references to a PLT entry separately,
2894      so that we can emit the Thumb trampoline only if needed.  */
2895   bfd_signed_vma thumb_refcount;
2896 
2897   /* Some references from Thumb code may be eliminated by BL->BLX
2898      conversion, so record them separately.  */
2899   bfd_signed_vma maybe_thumb_refcount;
2900 
2901   /* How many of the recorded PLT accesses were from non-call relocations.
2902      This information is useful when deciding whether anything takes the
2903      address of an STT_GNU_IFUNC PLT.  A value of 0 means that all
2904      non-call references to the function should resolve directly to the
2905      real runtime target.  */
2906   unsigned int noncall_refcount;
2907 
2908   /* Since PLT entries have variable size if the Thumb prologue is
2909      used, we need to record the index into .got.plt instead of
2910      recomputing it from the PLT offset.  */
2911   bfd_signed_vma got_offset;
2912 };
2913 
2914 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol.  */
2915 struct arm_local_iplt_info
2916 {
2917   /* The information that is usually found in the generic ELF part of
2918      the hash table entry.  */
2919   union gotplt_union root;
2920 
2921   /* The information that is usually found in the ARM-specific part of
2922      the hash table entry.  */
2923   struct arm_plt_info arm;
2924 
2925   /* A list of all potential dynamic relocations against this symbol.  */
2926   struct elf_dyn_relocs *dyn_relocs;
2927 };
2928 
2929 struct elf_arm_obj_tdata
2930 {
2931   struct elf_obj_tdata root;
2932 
2933   /* tls_type for each local got entry.  */
2934   char *local_got_tls_type;
2935 
2936   /* GOTPLT entries for TLS descriptors.  */
2937   bfd_vma *local_tlsdesc_gotent;
2938 
2939   /* Information for local symbols that need entries in .iplt.  */
2940   struct arm_local_iplt_info **local_iplt;
2941 
2942   /* Zero to warn when linking objects with incompatible enum sizes.  */
2943   int no_enum_size_warning;
2944 
2945   /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
2946   int no_wchar_size_warning;
2947 };
2948 
2949 #define elf_arm_tdata(bfd) \
2950   ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2951 
2952 #define elf32_arm_local_got_tls_type(bfd) \
2953   (elf_arm_tdata (bfd)->local_got_tls_type)
2954 
2955 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2956   (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2957 
2958 #define elf32_arm_local_iplt(bfd) \
2959   (elf_arm_tdata (bfd)->local_iplt)
2960 
2961 #define is_arm_elf(bfd) \
2962   (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2963    && elf_tdata (bfd) != NULL \
2964    && elf_object_id (bfd) == ARM_ELF_DATA)
2965 
2966 static bfd_boolean
2967 elf32_arm_mkobject (bfd *abfd)
2968 {
2969   return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2970 				  ARM_ELF_DATA);
2971 }
2972 
2973 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2974 
2975 /* Arm ELF linker hash entry.  */
2976 struct elf32_arm_link_hash_entry
2977 {
2978   struct elf_link_hash_entry root;
2979 
2980   /* Track dynamic relocs copied for this symbol.  */
2981   struct elf_dyn_relocs *dyn_relocs;
2982 
2983   /* ARM-specific PLT information.  */
2984   struct arm_plt_info plt;
2985 
2986 #define GOT_UNKNOWN	0
2987 #define GOT_NORMAL	1
2988 #define GOT_TLS_GD	2
2989 #define GOT_TLS_IE	4
2990 #define GOT_TLS_GDESC	8
2991 #define GOT_TLS_GD_ANY_P(type)	((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2992   unsigned int tls_type : 8;
2993 
2994   /* True if the symbol's PLT entry is in .iplt rather than .plt.  */
2995   unsigned int is_iplt : 1;
2996 
2997   unsigned int unused : 23;
2998 
2999   /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3000      starting at the end of the jump table.  */
3001   bfd_vma tlsdesc_got;
3002 
3003   /* The symbol marking the real symbol location for exported thumb
3004      symbols with Arm stubs.  */
3005   struct elf_link_hash_entry *export_glue;
3006 
3007   /* A pointer to the most recently used stub hash entry against this
3008      symbol.  */
3009   struct elf32_arm_stub_hash_entry *stub_cache;
3010 };
3011 
3012 /* Traverse an arm ELF linker hash table.  */
3013 #define elf32_arm_link_hash_traverse(table, func, info)			\
3014   (elf_link_hash_traverse						\
3015    (&(table)->root,							\
3016     (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func),	\
3017     (info)))
3018 
3019 /* Get the ARM elf linker hash table from a link_info structure.  */
3020 #define elf32_arm_hash_table(info) \
3021   (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3022   == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3023 
3024 #define arm_stub_hash_lookup(table, string, create, copy) \
3025   ((struct elf32_arm_stub_hash_entry *) \
3026    bfd_hash_lookup ((table), (string), (create), (copy)))
3027 
3028 /* Array to keep track of which stub sections have been created, and
3029    information on stub grouping.  */
3030 struct map_stub
3031 {
3032   /* This is the section to which stubs in the group will be
3033      attached.  */
3034   asection *link_sec;
3035   /* The stub section.  */
3036   asection *stub_sec;
3037 };
3038 
3039 #define elf32_arm_compute_jump_table_size(htab) \
3040   ((htab)->next_tls_desc_index * 4)
3041 
3042 /* ARM ELF linker hash table.  */
3043 struct elf32_arm_link_hash_table
3044 {
3045   /* The main hash table.  */
3046   struct elf_link_hash_table root;
3047 
3048   /* The size in bytes of the section containing the Thumb-to-ARM glue.  */
3049   bfd_size_type thumb_glue_size;
3050 
3051   /* The size in bytes of the section containing the ARM-to-Thumb glue.  */
3052   bfd_size_type arm_glue_size;
3053 
3054   /* The size in bytes of section containing the ARMv4 BX veneers.  */
3055   bfd_size_type bx_glue_size;
3056 
3057   /* Offsets of ARMv4 BX veneers.  Bit1 set if present, and Bit0 set when
3058      veneer has been populated.  */
3059   bfd_vma bx_glue_offset[15];
3060 
3061   /* The size in bytes of the section containing glue for VFP11 erratum
3062      veneers.  */
3063   bfd_size_type vfp11_erratum_glue_size;
3064 
3065  /* The size in bytes of the section containing glue for STM32L4XX erratum
3066      veneers.  */
3067   bfd_size_type stm32l4xx_erratum_glue_size;
3068 
3069   /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
3070      holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3071      elf32_arm_write_section().  */
3072   struct a8_erratum_fix *a8_erratum_fixes;
3073   unsigned int num_a8_erratum_fixes;
3074 
3075   /* An arbitrary input BFD chosen to hold the glue sections.  */
3076   bfd * bfd_of_glue_owner;
3077 
3078   /* Nonzero to output a BE8 image.  */
3079   int byteswap_code;
3080 
3081   /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3082      Nonzero if R_ARM_TARGET1 means R_ARM_REL32.  */
3083   int target1_is_rel;
3084 
3085   /* The relocation to use for R_ARM_TARGET2 relocations.  */
3086   int target2_reloc;
3087 
3088   /* 0 = Ignore R_ARM_V4BX.
3089      1 = Convert BX to MOV PC.
3090      2 = Generate v4 interworing stubs.  */
3091   int fix_v4bx;
3092 
3093   /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum.  */
3094   int fix_cortex_a8;
3095 
3096   /* Whether we should fix the ARM1176 BLX immediate issue.  */
3097   int fix_arm1176;
3098 
3099   /* Nonzero if the ARM/Thumb BLX instructions are available for use.  */
3100   int use_blx;
3101 
3102   /* What sort of code sequences we should look for which may trigger the
3103      VFP11 denorm erratum.  */
3104   bfd_arm_vfp11_fix vfp11_fix;
3105 
3106   /* Global counter for the number of fixes we have emitted.  */
3107   int num_vfp11_fixes;
3108 
3109   /* What sort of code sequences we should look for which may trigger the
3110      STM32L4XX erratum.  */
3111   bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3112 
3113   /* Global counter for the number of fixes we have emitted.  */
3114   int num_stm32l4xx_fixes;
3115 
3116   /* Nonzero to force PIC branch veneers.  */
3117   int pic_veneer;
3118 
3119   /* The number of bytes in the initial entry in the PLT.  */
3120   bfd_size_type plt_header_size;
3121 
3122   /* The number of bytes in the subsequent PLT etries.  */
3123   bfd_size_type plt_entry_size;
3124 
3125   /* True if the target system is VxWorks.  */
3126   int vxworks_p;
3127 
3128   /* True if the target system is Symbian OS.  */
3129   int symbian_p;
3130 
3131   /* True if the target system is Native Client.  */
3132   int nacl_p;
3133 
3134   /* True if the target uses REL relocations.  */
3135   int use_rel;
3136 
3137   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
3138   bfd_vma next_tls_desc_index;
3139 
3140   /* How many R_ARM_TLS_DESC relocations were generated so far.  */
3141   bfd_vma num_tls_desc;
3142 
3143   /* Short-cuts to get to dynamic linker sections.  */
3144   asection *sdynbss;
3145   asection *srelbss;
3146 
3147   /* The (unloaded but important) VxWorks .rela.plt.unloaded section.  */
3148   asection *srelplt2;
3149 
3150   /* The offset into splt of the PLT entry for the TLS descriptor
3151      resolver.  Special values are 0, if not necessary (or not found
3152      to be necessary yet), and -1 if needed but not determined
3153      yet.  */
3154   bfd_vma dt_tlsdesc_plt;
3155 
3156   /* The offset into sgot of the GOT entry used by the PLT entry
3157      above.  */
3158   bfd_vma dt_tlsdesc_got;
3159 
3160   /* Offset in .plt section of tls_arm_trampoline.  */
3161   bfd_vma tls_trampoline;
3162 
3163   /* Data for R_ARM_TLS_LDM32 relocations.  */
3164   union
3165   {
3166     bfd_signed_vma refcount;
3167     bfd_vma offset;
3168   } tls_ldm_got;
3169 
3170   /* Small local sym cache.  */
3171   struct sym_cache sym_cache;
3172 
3173   /* For convenience in allocate_dynrelocs.  */
3174   bfd * obfd;
3175 
3176   /* The amount of space used by the reserved portion of the sgotplt
3177      section, plus whatever space is used by the jump slots.  */
3178   bfd_vma sgotplt_jump_table_size;
3179 
3180   /* The stub hash table.  */
3181   struct bfd_hash_table stub_hash_table;
3182 
3183   /* Linker stub bfd.  */
3184   bfd *stub_bfd;
3185 
3186   /* Linker call-backs.  */
3187   asection * (*add_stub_section) (const char *, asection *, asection *,
3188 				  unsigned int);
3189   void (*layout_sections_again) (void);
3190 
3191   /* Array to keep track of which stub sections have been created, and
3192      information on stub grouping.  */
3193   struct map_stub *stub_group;
3194 
3195   /* Number of elements in stub_group.  */
3196   unsigned int top_id;
3197 
3198   /* Assorted information used by elf32_arm_size_stubs.  */
3199   unsigned int bfd_count;
3200   unsigned int top_index;
3201   asection **input_list;
3202 };
3203 
3204 static inline int
3205 ctz (unsigned int mask)
3206 {
3207 #if GCC_VERSION >= 3004
3208   return __builtin_ctz (mask);
3209 #else
3210   unsigned int i;
3211 
3212   for (i = 0; i < 8 * sizeof (mask); i++)
3213     {
3214       if (mask & 0x1)
3215 	break;
3216       mask = (mask >> 1);
3217     }
3218   return i;
3219 #endif
3220 }
3221 
3222 #ifndef __NetBSD__
3223 static inline int
3224 popcount (unsigned int mask)
3225 {
3226 #if GCC_VERSION >= 3004
3227   return __builtin_popcount (mask);
3228 #else
3229   unsigned int i, sum = 0;
3230 
3231   for (i = 0; i < 8 * sizeof (mask); i++)
3232     {
3233       if (mask & 0x1)
3234 	sum++;
3235       mask = (mask >> 1);
3236     }
3237   return sum;
3238 #endif
3239 }
3240 #endif
3241 
3242 /* Create an entry in an ARM ELF linker hash table.  */
3243 
3244 static struct bfd_hash_entry *
3245 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3246 			     struct bfd_hash_table * table,
3247 			     const char * string)
3248 {
3249   struct elf32_arm_link_hash_entry * ret =
3250     (struct elf32_arm_link_hash_entry *) entry;
3251 
3252   /* Allocate the structure if it has not already been allocated by a
3253      subclass.  */
3254   if (ret == NULL)
3255     ret = (struct elf32_arm_link_hash_entry *)
3256 	bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3257   if (ret == NULL)
3258     return (struct bfd_hash_entry *) ret;
3259 
3260   /* Call the allocation method of the superclass.  */
3261   ret = ((struct elf32_arm_link_hash_entry *)
3262 	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3263 				     table, string));
3264   if (ret != NULL)
3265     {
3266       ret->dyn_relocs = NULL;
3267       ret->tls_type = GOT_UNKNOWN;
3268       ret->tlsdesc_got = (bfd_vma) -1;
3269       ret->plt.thumb_refcount = 0;
3270       ret->plt.maybe_thumb_refcount = 0;
3271       ret->plt.noncall_refcount = 0;
3272       ret->plt.got_offset = -1;
3273       ret->is_iplt = FALSE;
3274       ret->export_glue = NULL;
3275 
3276       ret->stub_cache = NULL;
3277     }
3278 
3279   return (struct bfd_hash_entry *) ret;
3280 }
3281 
3282 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3283    symbols.  */
3284 
3285 static bfd_boolean
3286 elf32_arm_allocate_local_sym_info (bfd *abfd)
3287 {
3288   if (elf_local_got_refcounts (abfd) == NULL)
3289     {
3290       bfd_size_type num_syms;
3291       bfd_size_type size;
3292       char *data;
3293 
3294       num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3295       size = num_syms * (sizeof (bfd_signed_vma)
3296 			 + sizeof (struct arm_local_iplt_info *)
3297 			 + sizeof (bfd_vma)
3298 			 + sizeof (char));
3299       data = bfd_zalloc (abfd, size);
3300       if (data == NULL)
3301 	return FALSE;
3302 
3303       elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3304       data += num_syms * sizeof (bfd_signed_vma);
3305 
3306       elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3307       data += num_syms * sizeof (struct arm_local_iplt_info *);
3308 
3309       elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3310       data += num_syms * sizeof (bfd_vma);
3311 
3312       elf32_arm_local_got_tls_type (abfd) = data;
3313     }
3314   return TRUE;
3315 }
3316 
3317 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3318    to input bfd ABFD.  Create the information if it doesn't already exist.
3319    Return null if an allocation fails.  */
3320 
3321 static struct arm_local_iplt_info *
3322 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3323 {
3324   struct arm_local_iplt_info **ptr;
3325 
3326   if (!elf32_arm_allocate_local_sym_info (abfd))
3327     return NULL;
3328 
3329   BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3330   ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3331   if (*ptr == NULL)
3332     *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3333   return *ptr;
3334 }
3335 
3336 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3337    in ABFD's symbol table.  If the symbol is global, H points to its
3338    hash table entry, otherwise H is null.
3339 
3340    Return true if the symbol does have PLT information.  When returning
3341    true, point *ROOT_PLT at the target-independent reference count/offset
3342    union and *ARM_PLT at the ARM-specific information.  */
3343 
3344 static bfd_boolean
3345 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3346 			unsigned long r_symndx, union gotplt_union **root_plt,
3347 			struct arm_plt_info **arm_plt)
3348 {
3349   struct arm_local_iplt_info *local_iplt;
3350 
3351   if (h != NULL)
3352     {
3353       *root_plt = &h->root.plt;
3354       *arm_plt = &h->plt;
3355       return TRUE;
3356     }
3357 
3358   if (elf32_arm_local_iplt (abfd) == NULL)
3359     return FALSE;
3360 
3361   local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3362   if (local_iplt == NULL)
3363     return FALSE;
3364 
3365   *root_plt = &local_iplt->root;
3366   *arm_plt = &local_iplt->arm;
3367   return TRUE;
3368 }
3369 
3370 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3371    before it.  */
3372 
3373 static bfd_boolean
3374 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3375 				  struct arm_plt_info *arm_plt)
3376 {
3377   struct elf32_arm_link_hash_table *htab;
3378 
3379   htab = elf32_arm_hash_table (info);
3380   return (arm_plt->thumb_refcount != 0
3381 	  || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3382 }
3383 
3384 /* Return a pointer to the head of the dynamic reloc list that should
3385    be used for local symbol ISYM, which is symbol number R_SYMNDX in
3386    ABFD's symbol table.  Return null if an error occurs.  */
3387 
3388 static struct elf_dyn_relocs **
3389 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3390 				   Elf_Internal_Sym *isym)
3391 {
3392   if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3393     {
3394       struct arm_local_iplt_info *local_iplt;
3395 
3396       local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3397       if (local_iplt == NULL)
3398 	return NULL;
3399       return &local_iplt->dyn_relocs;
3400     }
3401   else
3402     {
3403       /* Track dynamic relocs needed for local syms too.
3404 	 We really need local syms available to do this
3405 	 easily.  Oh well.  */
3406       asection *s;
3407       void *vpp;
3408 
3409       s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3410       if (s == NULL)
3411 	abort ();
3412 
3413       vpp = &elf_section_data (s)->local_dynrel;
3414       return (struct elf_dyn_relocs **) vpp;
3415     }
3416 }
3417 
3418 /* Initialize an entry in the stub hash table.  */
3419 
3420 static struct bfd_hash_entry *
3421 stub_hash_newfunc (struct bfd_hash_entry *entry,
3422 		   struct bfd_hash_table *table,
3423 		   const char *string)
3424 {
3425   /* Allocate the structure if it has not already been allocated by a
3426      subclass.  */
3427   if (entry == NULL)
3428     {
3429       entry = (struct bfd_hash_entry *)
3430 	  bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3431       if (entry == NULL)
3432 	return entry;
3433     }
3434 
3435   /* Call the allocation method of the superclass.  */
3436   entry = bfd_hash_newfunc (entry, table, string);
3437   if (entry != NULL)
3438     {
3439       struct elf32_arm_stub_hash_entry *eh;
3440 
3441       /* Initialize the local fields.  */
3442       eh = (struct elf32_arm_stub_hash_entry *) entry;
3443       eh->stub_sec = NULL;
3444       eh->stub_offset = 0;
3445       eh->source_value = 0;
3446       eh->target_value = 0;
3447       eh->target_section = NULL;
3448       eh->orig_insn = 0;
3449       eh->stub_type = arm_stub_none;
3450       eh->stub_size = 0;
3451       eh->stub_template = NULL;
3452       eh->stub_template_size = 0;
3453       eh->h = NULL;
3454       eh->id_sec = NULL;
3455       eh->output_name = NULL;
3456     }
3457 
3458   return entry;
3459 }
3460 
3461 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3462    shortcuts to them in our hash table.  */
3463 
3464 static bfd_boolean
3465 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3466 {
3467   struct elf32_arm_link_hash_table *htab;
3468 
3469   htab = elf32_arm_hash_table (info);
3470   if (htab == NULL)
3471     return FALSE;
3472 
3473   /* BPABI objects never have a GOT, or associated sections.  */
3474   if (htab->symbian_p)
3475     return TRUE;
3476 
3477   if (! _bfd_elf_create_got_section (dynobj, info))
3478     return FALSE;
3479 
3480   return TRUE;
3481 }
3482 
3483 /* Create the .iplt, .rel(a).iplt and .igot.plt sections.  */
3484 
3485 static bfd_boolean
3486 create_ifunc_sections (struct bfd_link_info *info)
3487 {
3488   struct elf32_arm_link_hash_table *htab;
3489   const struct elf_backend_data *bed;
3490   bfd *dynobj;
3491   asection *s;
3492   flagword flags;
3493 
3494   htab = elf32_arm_hash_table (info);
3495   dynobj = htab->root.dynobj;
3496   bed = get_elf_backend_data (dynobj);
3497   flags = bed->dynamic_sec_flags;
3498 
3499   if (htab->root.iplt == NULL)
3500     {
3501       s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3502 					      flags | SEC_READONLY | SEC_CODE);
3503       if (s == NULL
3504 	  || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3505 	return FALSE;
3506       htab->root.iplt = s;
3507     }
3508 
3509   if (htab->root.irelplt == NULL)
3510     {
3511       s = bfd_make_section_anyway_with_flags (dynobj,
3512 					      RELOC_SECTION (htab, ".iplt"),
3513 					      flags | SEC_READONLY);
3514       if (s == NULL
3515 	  || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3516 	return FALSE;
3517       htab->root.irelplt = s;
3518     }
3519 
3520   if (htab->root.igotplt == NULL)
3521     {
3522       s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3523       if (s == NULL
3524 	  || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3525 	return FALSE;
3526       htab->root.igotplt = s;
3527     }
3528   return TRUE;
3529 }
3530 
3531 /* Determine if we're dealing with a Thumb only architecture.  */
3532 
3533 static bfd_boolean
3534 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3535 {
3536   int arch;
3537   int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3538 					  Tag_CPU_arch_profile);
3539 
3540   if (profile)
3541     return profile == 'M';
3542 
3543   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3544 
3545   /* Force return logic to be reviewed for each new architecture.  */
3546   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3547 	      || arch == TAG_CPU_ARCH_V8M_BASE
3548 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3549 
3550   if (arch == TAG_CPU_ARCH_V6_M
3551       || arch == TAG_CPU_ARCH_V6S_M
3552       || arch == TAG_CPU_ARCH_V7E_M
3553       || arch == TAG_CPU_ARCH_V8M_BASE
3554       || arch == TAG_CPU_ARCH_V8M_MAIN)
3555     return TRUE;
3556 
3557   return FALSE;
3558 }
3559 
3560 /* Determine if we're dealing with a Thumb-2 object.  */
3561 
3562 static bfd_boolean
3563 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3564 {
3565   int arch;
3566   int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3567 					    Tag_THUMB_ISA_use);
3568 
3569   if (thumb_isa)
3570     return thumb_isa == 2;
3571 
3572   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3573 
3574   /* Force return logic to be reviewed for each new architecture.  */
3575   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3576 	      || arch == TAG_CPU_ARCH_V8M_BASE
3577 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3578 
3579   return (arch == TAG_CPU_ARCH_V6T2
3580 	  || arch == TAG_CPU_ARCH_V7
3581 	  || arch == TAG_CPU_ARCH_V7E_M
3582 	  || arch == TAG_CPU_ARCH_V8
3583 	  || arch == TAG_CPU_ARCH_V8M_MAIN);
3584 }
3585 
3586 /* Determine whether Thumb-2 BL instruction is available.  */
3587 
3588 static bfd_boolean
3589 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3590 {
3591   int arch =
3592     bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3593 
3594   /* Force return logic to be reviewed for each new architecture.  */
3595   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3596 	      || arch == TAG_CPU_ARCH_V8M_BASE
3597 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3598 
3599   /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M).  */
3600   return (arch == TAG_CPU_ARCH_V6T2
3601 	  || arch >= TAG_CPU_ARCH_V7);
3602 }
3603 
3604 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3605    .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3606    hash table.  */
3607 
3608 static bfd_boolean
3609 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3610 {
3611   struct elf32_arm_link_hash_table *htab;
3612 
3613   htab = elf32_arm_hash_table (info);
3614   if (htab == NULL)
3615     return FALSE;
3616 
3617   if (!htab->root.sgot && !create_got_section (dynobj, info))
3618     return FALSE;
3619 
3620   if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3621     return FALSE;
3622 
3623   htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3624   if (!bfd_link_pic (info))
3625     htab->srelbss = bfd_get_linker_section (dynobj,
3626 					    RELOC_SECTION (htab, ".bss"));
3627 
3628   if (htab->vxworks_p)
3629     {
3630       if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3631 	return FALSE;
3632 
3633       if (bfd_link_pic (info))
3634 	{
3635 	  htab->plt_header_size = 0;
3636 	  htab->plt_entry_size
3637 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3638 	}
3639       else
3640 	{
3641 	  htab->plt_header_size
3642 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3643 	  htab->plt_entry_size
3644 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3645 	}
3646 
3647       if (elf_elfheader (dynobj))
3648 	elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3649     }
3650   else
3651     {
3652       /* PR ld/16017
3653 	 Test for thumb only architectures.  Note - we cannot just call
3654 	 using_thumb_only() as the attributes in the output bfd have not been
3655 	 initialised at this point, so instead we use the input bfd.  */
3656       bfd * saved_obfd = htab->obfd;
3657 
3658       htab->obfd = dynobj;
3659       if (using_thumb_only (htab))
3660 	{
3661 	  htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3662 	  htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3663 	}
3664       htab->obfd = saved_obfd;
3665     }
3666 
3667   if (!htab->root.splt
3668       || !htab->root.srelplt
3669       || !htab->sdynbss
3670       || (!bfd_link_pic (info) && !htab->srelbss))
3671     abort ();
3672 
3673   return TRUE;
3674 }
3675 
3676 /* Copy the extra info we tack onto an elf_link_hash_entry.  */
3677 
3678 static void
3679 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3680 				struct elf_link_hash_entry *dir,
3681 				struct elf_link_hash_entry *ind)
3682 {
3683   struct elf32_arm_link_hash_entry *edir, *eind;
3684 
3685   edir = (struct elf32_arm_link_hash_entry *) dir;
3686   eind = (struct elf32_arm_link_hash_entry *) ind;
3687 
3688   if (eind->dyn_relocs != NULL)
3689     {
3690       if (edir->dyn_relocs != NULL)
3691 	{
3692 	  struct elf_dyn_relocs **pp;
3693 	  struct elf_dyn_relocs *p;
3694 
3695 	  /* Add reloc counts against the indirect sym to the direct sym
3696 	     list.  Merge any entries against the same section.  */
3697 	  for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3698 	    {
3699 	      struct elf_dyn_relocs *q;
3700 
3701 	      for (q = edir->dyn_relocs; q != NULL; q = q->next)
3702 		if (q->sec == p->sec)
3703 		  {
3704 		    q->pc_count += p->pc_count;
3705 		    q->count += p->count;
3706 		    *pp = p->next;
3707 		    break;
3708 		  }
3709 	      if (q == NULL)
3710 		pp = &p->next;
3711 	    }
3712 	  *pp = edir->dyn_relocs;
3713 	}
3714 
3715       edir->dyn_relocs = eind->dyn_relocs;
3716       eind->dyn_relocs = NULL;
3717     }
3718 
3719   if (ind->root.type == bfd_link_hash_indirect)
3720     {
3721       /* Copy over PLT info.  */
3722       edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3723       eind->plt.thumb_refcount = 0;
3724       edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3725       eind->plt.maybe_thumb_refcount = 0;
3726       edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3727       eind->plt.noncall_refcount = 0;
3728 
3729       /* We should only allocate a function to .iplt once the final
3730 	 symbol information is known.  */
3731       BFD_ASSERT (!eind->is_iplt);
3732 
3733       if (dir->got.refcount <= 0)
3734 	{
3735 	  edir->tls_type = eind->tls_type;
3736 	  eind->tls_type = GOT_UNKNOWN;
3737 	}
3738     }
3739 
3740   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3741 }
3742 
3743 /* Destroy an ARM elf linker hash table.  */
3744 
3745 static void
3746 elf32_arm_link_hash_table_free (bfd *obfd)
3747 {
3748   struct elf32_arm_link_hash_table *ret
3749     = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3750 
3751   bfd_hash_table_free (&ret->stub_hash_table);
3752   _bfd_elf_link_hash_table_free (obfd);
3753 }
3754 
3755 /* Create an ARM elf linker hash table.  */
3756 
3757 static struct bfd_link_hash_table *
3758 elf32_arm_link_hash_table_create (bfd *abfd)
3759 {
3760   struct elf32_arm_link_hash_table *ret;
3761   bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3762 
3763   ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3764   if (ret == NULL)
3765     return NULL;
3766 
3767   if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3768 				      elf32_arm_link_hash_newfunc,
3769 				      sizeof (struct elf32_arm_link_hash_entry),
3770 				      ARM_ELF_DATA))
3771     {
3772       free (ret);
3773       return NULL;
3774     }
3775 
3776   ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3777   ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3778 #ifdef FOUR_WORD_PLT
3779   ret->plt_header_size = 16;
3780   ret->plt_entry_size = 16;
3781 #else
3782   ret->plt_header_size = 20;
3783   ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3784 #endif
3785   ret->use_rel = 1;
3786   ret->obfd = abfd;
3787 
3788   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3789 			    sizeof (struct elf32_arm_stub_hash_entry)))
3790     {
3791       _bfd_elf_link_hash_table_free (abfd);
3792       return NULL;
3793     }
3794   ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3795 
3796   return &ret->root.root;
3797 }
3798 
3799 /* Determine what kind of NOPs are available.  */
3800 
3801 static bfd_boolean
3802 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3803 {
3804   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3805 					     Tag_CPU_arch);
3806 
3807   /* Force return logic to be reviewed for each new architecture.  */
3808   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3809 	      || arch == TAG_CPU_ARCH_V8M_BASE
3810 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3811 
3812   return (arch == TAG_CPU_ARCH_V6T2
3813 	  || arch == TAG_CPU_ARCH_V6K
3814 	  || arch == TAG_CPU_ARCH_V7
3815 	  || arch == TAG_CPU_ARCH_V8);
3816 }
3817 
3818 static bfd_boolean
3819 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3820 {
3821   switch (stub_type)
3822     {
3823     case arm_stub_long_branch_thumb_only:
3824     case arm_stub_long_branch_thumb2_only:
3825     case arm_stub_long_branch_thumb2_only_pure:
3826     case arm_stub_long_branch_v4t_thumb_arm:
3827     case arm_stub_short_branch_v4t_thumb_arm:
3828     case arm_stub_long_branch_v4t_thumb_arm_pic:
3829     case arm_stub_long_branch_v4t_thumb_tls_pic:
3830     case arm_stub_long_branch_thumb_only_pic:
3831       return TRUE;
3832     case arm_stub_none:
3833       BFD_FAIL ();
3834       return FALSE;
3835       break;
3836     default:
3837       return FALSE;
3838     }
3839 }
3840 
3841 /* Determine the type of stub needed, if any, for a call.  */
3842 
3843 static enum elf32_arm_stub_type
3844 arm_type_of_stub (struct bfd_link_info *info,
3845 		  asection *input_sec,
3846 		  const Elf_Internal_Rela *rel,
3847 		  unsigned char st_type,
3848 		  enum arm_st_branch_type *actual_branch_type,
3849 		  struct elf32_arm_link_hash_entry *hash,
3850 		  bfd_vma destination,
3851 		  asection *sym_sec,
3852 		  bfd *input_bfd,
3853 		  const char *name)
3854 {
3855   bfd_vma location;
3856   bfd_signed_vma branch_offset;
3857   unsigned int r_type;
3858   struct elf32_arm_link_hash_table * globals;
3859   bfd_boolean thumb2, thumb2_bl, thumb_only;
3860   enum elf32_arm_stub_type stub_type = arm_stub_none;
3861   int use_plt = 0;
3862   enum arm_st_branch_type branch_type = *actual_branch_type;
3863   union gotplt_union *root_plt;
3864   struct arm_plt_info *arm_plt;
3865   int arch;
3866   int thumb2_movw;
3867 
3868   if (branch_type == ST_BRANCH_LONG)
3869     return stub_type;
3870 
3871   globals = elf32_arm_hash_table (info);
3872   if (globals == NULL)
3873     return stub_type;
3874 
3875   thumb_only = using_thumb_only (globals);
3876   thumb2 = using_thumb2 (globals);
3877   thumb2_bl = using_thumb2_bl (globals);
3878 
3879   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3880 
3881   /* True for architectures that implement the thumb2 movw instruction.  */
3882   thumb2_movw = thumb2 || (arch  == TAG_CPU_ARCH_V8M_BASE);
3883 
3884   /* Determine where the call point is.  */
3885   location = (input_sec->output_offset
3886 	      + input_sec->output_section->vma
3887 	      + rel->r_offset);
3888 
3889   r_type = ELF32_R_TYPE (rel->r_info);
3890 
3891   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3892      are considering a function call relocation.  */
3893   if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3894                      || r_type == R_ARM_THM_JUMP19)
3895       && branch_type == ST_BRANCH_TO_ARM)
3896     branch_type = ST_BRANCH_TO_THUMB;
3897 
3898   /* For TLS call relocs, it is the caller's responsibility to provide
3899      the address of the appropriate trampoline.  */
3900   if (r_type != R_ARM_TLS_CALL
3901       && r_type != R_ARM_THM_TLS_CALL
3902       && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3903 				 &root_plt, &arm_plt)
3904       && root_plt->offset != (bfd_vma) -1)
3905     {
3906       asection *splt;
3907 
3908       if (hash == NULL || hash->is_iplt)
3909 	splt = globals->root.iplt;
3910       else
3911 	splt = globals->root.splt;
3912       if (splt != NULL)
3913 	{
3914 	  use_plt = 1;
3915 
3916 	  /* Note when dealing with PLT entries: the main PLT stub is in
3917 	     ARM mode, so if the branch is in Thumb mode, another
3918 	     Thumb->ARM stub will be inserted later just before the ARM
3919 	     PLT stub. We don't take this extra distance into account
3920 	     here, because if a long branch stub is needed, we'll add a
3921 	     Thumb->Arm one and branch directly to the ARM PLT entry
3922 	     because it avoids spreading offset corrections in several
3923 	     places.  */
3924 
3925 	  destination = (splt->output_section->vma
3926 			 + splt->output_offset
3927 			 + root_plt->offset);
3928 	  st_type = STT_FUNC;
3929 	  branch_type = ST_BRANCH_TO_ARM;
3930 	}
3931     }
3932   /* Calls to STT_GNU_IFUNC symbols should go through a PLT.  */
3933   BFD_ASSERT (st_type != STT_GNU_IFUNC);
3934 
3935   branch_offset = (bfd_signed_vma)(destination - location);
3936 
3937   if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3938       || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3939     {
3940       /* Handle cases where:
3941 	 - this call goes too far (different Thumb/Thumb2 max
3942 	   distance)
3943 	 - it's a Thumb->Arm call and blx is not available, or it's a
3944 	   Thumb->Arm branch (not bl). A stub is needed in this case,
3945 	   but only if this call is not through a PLT entry. Indeed,
3946 	   PLT stubs handle mode switching already.
3947       */
3948       if ((!thumb2_bl
3949 	    && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3950 		|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3951 	  || (thumb2_bl
3952 	      && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3953 		  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3954 	  || (thumb2
3955 	      && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3956 		  || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3957 	      && (r_type == R_ARM_THM_JUMP19))
3958 	  || (branch_type == ST_BRANCH_TO_ARM
3959 	      && (((r_type == R_ARM_THM_CALL
3960 		    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3961 		  || (r_type == R_ARM_THM_JUMP24)
3962                   || (r_type == R_ARM_THM_JUMP19))
3963 	      && !use_plt))
3964 	{
3965 	  if (branch_type == ST_BRANCH_TO_THUMB)
3966 	    {
3967 	      /* Thumb to thumb.  */
3968 	      if (!thumb_only)
3969 		{
3970 		  if (input_sec->flags & SEC_ELF_PURECODE)
3971 		    (*_bfd_error_handler) (_("%B(%s): warning: long branch "
3972 					     " veneers used in section with "
3973 					     "SHF_ARM_PURECODE section "
3974 					     "attribute is only supported"
3975 					     " for M-profile targets that "
3976 					     "implement the movw "
3977 					     "instruction."));
3978 
3979 		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3980 		    /* PIC stubs.  */
3981 		    ? ((globals->use_blx
3982 			&& (r_type == R_ARM_THM_CALL))
3983 		       /* V5T and above. Stub starts with ARM code, so
3984 			  we must be able to switch mode before
3985 			  reaching it, which is only possible for 'bl'
3986 			  (ie R_ARM_THM_CALL relocation).  */
3987 		       ? arm_stub_long_branch_any_thumb_pic
3988 		       /* On V4T, use Thumb code only.  */
3989 		       : arm_stub_long_branch_v4t_thumb_thumb_pic)
3990 
3991 		    /* non-PIC stubs.  */
3992 		    : ((globals->use_blx
3993 			&& (r_type == R_ARM_THM_CALL))
3994 		       /* V5T and above.  */
3995 		       ? arm_stub_long_branch_any_any
3996 		       /* V4T.  */
3997 		       : arm_stub_long_branch_v4t_thumb_thumb);
3998 		}
3999 	      else
4000 		{
4001 		  if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4002 		      stub_type = arm_stub_long_branch_thumb2_only_pure;
4003 		  else
4004 		    {
4005 		      if (input_sec->flags & SEC_ELF_PURECODE)
4006 			(*_bfd_error_handler) (_("%B(%s): warning: long branch "
4007 						 " veneers used in section with "
4008 						 "SHF_ARM_PURECODE section "
4009 						 "attribute is only supported"
4010 						 " for M-profile targets that "
4011 						 "implement the movw "
4012 						 "instruction."));
4013 
4014 		      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4015 			/* PIC stub.  */
4016 			? arm_stub_long_branch_thumb_only_pic
4017 			/* non-PIC stub.  */
4018 			: (thumb2 ? arm_stub_long_branch_thumb2_only
4019 				  : arm_stub_long_branch_thumb_only);
4020 		    }
4021 		}
4022 	    }
4023 	  else
4024 	    {
4025 	      if (input_sec->flags & SEC_ELF_PURECODE)
4026 		(*_bfd_error_handler) (_("%B(%s): warning: long branch "
4027 					 " veneers used in section with "
4028 					 "SHF_ARM_PURECODE section "
4029 					 "attribute is only supported"
4030 					 " for M-profile targets that "
4031 					 "implement the movw "
4032 					 "instruction."));
4033 
4034 	      /* Thumb to arm.  */
4035 	      if (sym_sec != NULL
4036 		  && sym_sec->owner != NULL
4037 		  && !INTERWORK_FLAG (sym_sec->owner))
4038 		{
4039 		  (*_bfd_error_handler)
4040 		    (_("%B(%s): warning: interworking not enabled.\n"
4041 		       "  first occurrence: %B: Thumb call to ARM"),
4042 		     sym_sec->owner, input_bfd, name);
4043 		}
4044 
4045 	      stub_type =
4046 		(bfd_link_pic (info) | globals->pic_veneer)
4047 		/* PIC stubs.  */
4048 		? (r_type == R_ARM_THM_TLS_CALL
4049 		   /* TLS PIC stubs.  */
4050 		   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4051 		      : arm_stub_long_branch_v4t_thumb_tls_pic)
4052 		   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4053 		      /* V5T PIC and above.  */
4054 		      ? arm_stub_long_branch_any_arm_pic
4055 		      /* V4T PIC stub.  */
4056 		      : arm_stub_long_branch_v4t_thumb_arm_pic))
4057 
4058 		/* non-PIC stubs.  */
4059 		: ((globals->use_blx && r_type == R_ARM_THM_CALL)
4060 		   /* V5T and above.  */
4061 		   ? arm_stub_long_branch_any_any
4062 		   /* V4T.  */
4063 		   : arm_stub_long_branch_v4t_thumb_arm);
4064 
4065 	      /* Handle v4t short branches.  */
4066 	      if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4067 		  && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4068 		  && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4069 		stub_type = arm_stub_short_branch_v4t_thumb_arm;
4070 	    }
4071 	}
4072     }
4073   else if (r_type == R_ARM_CALL
4074 	   || r_type == R_ARM_JUMP24
4075 	   || r_type == R_ARM_PLT32
4076 	   || r_type == R_ARM_TLS_CALL)
4077     {
4078       if (input_sec->flags & SEC_ELF_PURECODE)
4079 	(*_bfd_error_handler) (_("%B(%s): warning: long branch "
4080 				 " veneers used in section with "
4081 				 "SHF_ARM_PURECODE section "
4082 				 "attribute is only supported"
4083 				 " for M-profile targets that "
4084 				 "implement the movw "
4085 				 "instruction."));
4086       if (branch_type == ST_BRANCH_TO_THUMB)
4087 	{
4088 	  /* Arm to thumb.  */
4089 
4090 	  if (sym_sec != NULL
4091 	      && sym_sec->owner != NULL
4092 	      && !INTERWORK_FLAG (sym_sec->owner))
4093 	    {
4094 	      (*_bfd_error_handler)
4095 		(_("%B(%s): warning: interworking not enabled.\n"
4096 		   "  first occurrence: %B: ARM call to Thumb"),
4097 		 sym_sec->owner, input_bfd, name);
4098 	    }
4099 
4100 	  /* We have an extra 2-bytes reach because of
4101 	     the mode change (bit 24 (H) of BLX encoding).  */
4102 	  if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4103 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4104 	      || (r_type == R_ARM_CALL && !globals->use_blx)
4105 	      || (r_type == R_ARM_JUMP24)
4106 	      || (r_type == R_ARM_PLT32))
4107 	    {
4108 	      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4109 		/* PIC stubs.  */
4110 		? ((globals->use_blx)
4111 		   /* V5T and above.  */
4112 		   ? arm_stub_long_branch_any_thumb_pic
4113 		   /* V4T stub.  */
4114 		   : arm_stub_long_branch_v4t_arm_thumb_pic)
4115 
4116 		/* non-PIC stubs.  */
4117 		: ((globals->use_blx)
4118 		   /* V5T and above.  */
4119 		   ? arm_stub_long_branch_any_any
4120 		   /* V4T.  */
4121 		   : arm_stub_long_branch_v4t_arm_thumb);
4122 	    }
4123 	}
4124       else
4125 	{
4126 	  /* Arm to arm.  */
4127 	  if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4128 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4129 	    {
4130 	      stub_type =
4131 		(bfd_link_pic (info) | globals->pic_veneer)
4132 		/* PIC stubs.  */
4133 		? (r_type == R_ARM_TLS_CALL
4134 		   /* TLS PIC Stub.  */
4135 		   ? arm_stub_long_branch_any_tls_pic
4136 		   : (globals->nacl_p
4137 		      ? arm_stub_long_branch_arm_nacl_pic
4138 		      : arm_stub_long_branch_any_arm_pic))
4139 		/* non-PIC stubs.  */
4140 		: (globals->nacl_p
4141 		   ? arm_stub_long_branch_arm_nacl
4142 		   : arm_stub_long_branch_any_any);
4143 	    }
4144 	}
4145     }
4146 
4147   /* If a stub is needed, record the actual destination type.  */
4148   if (stub_type != arm_stub_none)
4149     *actual_branch_type = branch_type;
4150 
4151   return stub_type;
4152 }
4153 
4154 /* Build a name for an entry in the stub hash table.  */
4155 
4156 static char *
4157 elf32_arm_stub_name (const asection *input_section,
4158 		     const asection *sym_sec,
4159 		     const struct elf32_arm_link_hash_entry *hash,
4160 		     const Elf_Internal_Rela *rel,
4161 		     enum elf32_arm_stub_type stub_type)
4162 {
4163   char *stub_name;
4164   bfd_size_type len;
4165 
4166   if (hash)
4167     {
4168       len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4169       stub_name = (char *) bfd_malloc (len);
4170       if (stub_name != NULL)
4171 	sprintf (stub_name, "%08x_%s+%x_%d",
4172 		 input_section->id & 0xffffffff,
4173 		 hash->root.root.root.string,
4174 		 (int) rel->r_addend & 0xffffffff,
4175 		 (int) stub_type);
4176     }
4177   else
4178     {
4179       len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4180       stub_name = (char *) bfd_malloc (len);
4181       if (stub_name != NULL)
4182 	sprintf (stub_name, "%08x_%x:%x+%x_%d",
4183 		 input_section->id & 0xffffffff,
4184 		 sym_sec->id & 0xffffffff,
4185 		 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4186 		 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4187 		 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4188 		 (int) rel->r_addend & 0xffffffff,
4189 		 (int) stub_type);
4190     }
4191 
4192   return stub_name;
4193 }
4194 
4195 /* Look up an entry in the stub hash.  Stub entries are cached because
4196    creating the stub name takes a bit of time.  */
4197 
4198 static struct elf32_arm_stub_hash_entry *
4199 elf32_arm_get_stub_entry (const asection *input_section,
4200 			  const asection *sym_sec,
4201 			  struct elf_link_hash_entry *hash,
4202 			  const Elf_Internal_Rela *rel,
4203 			  struct elf32_arm_link_hash_table *htab,
4204 			  enum elf32_arm_stub_type stub_type)
4205 {
4206   struct elf32_arm_stub_hash_entry *stub_entry;
4207   struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4208   const asection *id_sec;
4209 
4210   if ((input_section->flags & SEC_CODE) == 0)
4211     return NULL;
4212 
4213   /* If this input section is part of a group of sections sharing one
4214      stub section, then use the id of the first section in the group.
4215      Stub names need to include a section id, as there may well be
4216      more than one stub used to reach say, printf, and we need to
4217      distinguish between them.  */
4218   id_sec = htab->stub_group[input_section->id].link_sec;
4219 
4220   if (h != NULL && h->stub_cache != NULL
4221       && h->stub_cache->h == h
4222       && h->stub_cache->id_sec == id_sec
4223       && h->stub_cache->stub_type == stub_type)
4224     {
4225       stub_entry = h->stub_cache;
4226     }
4227   else
4228     {
4229       char *stub_name;
4230 
4231       stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4232       if (stub_name == NULL)
4233 	return NULL;
4234 
4235       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4236 					stub_name, FALSE, FALSE);
4237       if (h != NULL)
4238 	h->stub_cache = stub_entry;
4239 
4240       free (stub_name);
4241     }
4242 
4243   return stub_entry;
4244 }
4245 
4246 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4247    section.  */
4248 
4249 static bfd_boolean
4250 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4251 {
4252   if (stub_type >= max_stub_type)
4253     abort ();  /* Should be unreachable.  */
4254 
4255   return FALSE;
4256 }
4257 
4258 /* Required alignment (as a power of 2) for the dedicated section holding
4259    veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4260    with input sections.  */
4261 
4262 static int
4263 arm_dedicated_stub_output_section_required_alignment
4264   (enum elf32_arm_stub_type stub_type)
4265 {
4266   if (stub_type >= max_stub_type)
4267     abort ();  /* Should be unreachable.  */
4268 
4269   BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4270   return 0;
4271 }
4272 
4273 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4274    NULL if veneers of this type are interspersed with input sections.  */
4275 
4276 static const char *
4277 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4278 {
4279   if (stub_type >= max_stub_type)
4280     abort ();  /* Should be unreachable.  */
4281 
4282   BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4283   return NULL;
4284 }
4285 
4286 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4287    returns the address of the hash table field in HTAB holding a pointer to the
4288    corresponding input section.  Otherwise, returns NULL.  */
4289 
4290 static asection **
4291 arm_dedicated_stub_input_section_ptr
4292   (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4293    enum elf32_arm_stub_type stub_type)
4294 {
4295   if (stub_type >= max_stub_type)
4296     abort ();  /* Should be unreachable.  */
4297 
4298   BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4299   return NULL;
4300 }
4301 
4302 /* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
4303    is the section that branch into veneer and can be NULL if stub should go in
4304    a dedicated output section.  Returns a pointer to the stub section, and the
4305    section to which the stub section will be attached (in *LINK_SEC_P).
4306    LINK_SEC_P may be NULL.  */
4307 
4308 static asection *
4309 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4310 				   struct elf32_arm_link_hash_table *htab,
4311 				   enum elf32_arm_stub_type stub_type)
4312 {
4313   asection *link_sec, *out_sec, **stub_sec_p;
4314   const char *stub_sec_prefix;
4315   bfd_boolean dedicated_output_section =
4316     arm_dedicated_stub_output_section_required (stub_type);
4317   int align;
4318 
4319   if (dedicated_output_section)
4320     {
4321       bfd *output_bfd = htab->obfd;
4322       const char *out_sec_name =
4323 	arm_dedicated_stub_output_section_name (stub_type);
4324       link_sec = NULL;
4325       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4326       stub_sec_prefix = out_sec_name;
4327       align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4328       out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4329       if (out_sec == NULL)
4330 	{
4331 	  (*_bfd_error_handler) (_("No address assigned to the veneers output "
4332 				   "section %s"), out_sec_name);
4333 	  return NULL;
4334 	}
4335     }
4336   else
4337     {
4338       link_sec = htab->stub_group[section->id].link_sec;
4339       BFD_ASSERT (link_sec != NULL);
4340       stub_sec_p = &htab->stub_group[section->id].stub_sec;
4341       if (*stub_sec_p == NULL)
4342 	stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4343       stub_sec_prefix = link_sec->name;
4344       out_sec = link_sec->output_section;
4345       align = htab->nacl_p ? 4 : 3;
4346     }
4347 
4348   if (*stub_sec_p == NULL)
4349     {
4350       size_t namelen;
4351       bfd_size_type len;
4352       char *s_name;
4353 
4354       namelen = strlen (stub_sec_prefix);
4355       len = namelen + sizeof (STUB_SUFFIX);
4356       s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4357       if (s_name == NULL)
4358 	return NULL;
4359 
4360       memcpy (s_name, stub_sec_prefix, namelen);
4361       memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4362       *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4363 					       align);
4364       if (*stub_sec_p == NULL)
4365 	return NULL;
4366 
4367       out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4368 			| SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4369 			| SEC_KEEP;
4370     }
4371 
4372   if (!dedicated_output_section)
4373     htab->stub_group[section->id].stub_sec = *stub_sec_p;
4374 
4375   if (link_sec_p)
4376     *link_sec_p = link_sec;
4377 
4378   return *stub_sec_p;
4379 }
4380 
4381 /* Add a new stub entry to the stub hash.  Not all fields of the new
4382    stub entry are initialised.  */
4383 
4384 static struct elf32_arm_stub_hash_entry *
4385 elf32_arm_add_stub (const char *stub_name, asection *section,
4386 		    struct elf32_arm_link_hash_table *htab,
4387 		    enum elf32_arm_stub_type stub_type)
4388 {
4389   asection *link_sec;
4390   asection *stub_sec;
4391   struct elf32_arm_stub_hash_entry *stub_entry;
4392 
4393   stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4394 						stub_type);
4395   if (stub_sec == NULL)
4396     return NULL;
4397 
4398   /* Enter this entry into the linker stub hash table.  */
4399   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4400 				     TRUE, FALSE);
4401   if (stub_entry == NULL)
4402     {
4403       if (section == NULL)
4404 	section = stub_sec;
4405       (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4406 			     section->owner,
4407 			     stub_name);
4408       return NULL;
4409     }
4410 
4411   stub_entry->stub_sec = stub_sec;
4412   stub_entry->stub_offset = 0;
4413   stub_entry->id_sec = link_sec;
4414 
4415   return stub_entry;
4416 }
4417 
4418 /* Store an Arm insn into an output section not processed by
4419    elf32_arm_write_section.  */
4420 
4421 static void
4422 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4423 	      bfd * output_bfd, bfd_vma val, void * ptr)
4424 {
4425   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4426     bfd_putl32 (val, ptr);
4427   else
4428     bfd_putb32 (val, ptr);
4429 }
4430 
4431 /* Store a 16-bit Thumb insn into an output section not processed by
4432    elf32_arm_write_section.  */
4433 
4434 static void
4435 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4436 		bfd * output_bfd, bfd_vma val, void * ptr)
4437 {
4438   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4439     bfd_putl16 (val, ptr);
4440   else
4441     bfd_putb16 (val, ptr);
4442 }
4443 
4444 /* Store a Thumb2 insn into an output section not processed by
4445    elf32_arm_write_section.  */
4446 
4447 static void
4448 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4449 		 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4450 {
4451   /* T2 instructions are 16-bit streamed.  */
4452   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4453     {
4454       bfd_putl16 ((val >> 16) & 0xffff, ptr);
4455       bfd_putl16 ((val & 0xffff), ptr + 2);
4456     }
4457   else
4458     {
4459       bfd_putb16 ((val >> 16) & 0xffff, ptr);
4460       bfd_putb16 ((val & 0xffff), ptr + 2);
4461     }
4462 }
4463 
4464 /* If it's possible to change R_TYPE to a more efficient access
4465    model, return the new reloc type.  */
4466 
4467 static unsigned
4468 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4469 			  struct elf_link_hash_entry *h)
4470 {
4471   int is_local = (h == NULL);
4472 
4473   if (bfd_link_pic (info)
4474       || (h && h->root.type == bfd_link_hash_undefweak))
4475     return r_type;
4476 
4477   /* We do not support relaxations for Old TLS models.  */
4478   switch (r_type)
4479     {
4480     case R_ARM_TLS_GOTDESC:
4481     case R_ARM_TLS_CALL:
4482     case R_ARM_THM_TLS_CALL:
4483     case R_ARM_TLS_DESCSEQ:
4484     case R_ARM_THM_TLS_DESCSEQ:
4485       return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4486     }
4487 
4488   return r_type;
4489 }
4490 
4491 static bfd_reloc_status_type elf32_arm_final_link_relocate
4492   (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4493    Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4494    const char *, unsigned char, enum arm_st_branch_type,
4495    struct elf_link_hash_entry *, bfd_boolean *, char **);
4496 
4497 static unsigned int
4498 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4499 {
4500   switch (stub_type)
4501     {
4502     case arm_stub_a8_veneer_b_cond:
4503     case arm_stub_a8_veneer_b:
4504     case arm_stub_a8_veneer_bl:
4505       return 2;
4506 
4507     case arm_stub_long_branch_any_any:
4508     case arm_stub_long_branch_v4t_arm_thumb:
4509     case arm_stub_long_branch_thumb_only:
4510     case arm_stub_long_branch_thumb2_only:
4511     case arm_stub_long_branch_thumb2_only_pure:
4512     case arm_stub_long_branch_v4t_thumb_thumb:
4513     case arm_stub_long_branch_v4t_thumb_arm:
4514     case arm_stub_short_branch_v4t_thumb_arm:
4515     case arm_stub_long_branch_any_arm_pic:
4516     case arm_stub_long_branch_any_thumb_pic:
4517     case arm_stub_long_branch_v4t_thumb_thumb_pic:
4518     case arm_stub_long_branch_v4t_arm_thumb_pic:
4519     case arm_stub_long_branch_v4t_thumb_arm_pic:
4520     case arm_stub_long_branch_thumb_only_pic:
4521     case arm_stub_long_branch_any_tls_pic:
4522     case arm_stub_long_branch_v4t_thumb_tls_pic:
4523     case arm_stub_a8_veneer_blx:
4524       return 4;
4525 
4526     case arm_stub_long_branch_arm_nacl:
4527     case arm_stub_long_branch_arm_nacl_pic:
4528       return 16;
4529 
4530     default:
4531       abort ();  /* Should be unreachable.  */
4532     }
4533 }
4534 
4535 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4536    veneering (TRUE) or have their own symbol (FALSE).  */
4537 
4538 static bfd_boolean
4539 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4540 {
4541   if (stub_type >= max_stub_type)
4542     abort ();  /* Should be unreachable.  */
4543 
4544   return FALSE;
4545 }
4546 
4547 /* Returns the padding needed for the dedicated section used stubs of type
4548    STUB_TYPE.  */
4549 
4550 static int
4551 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4552 {
4553   if (stub_type >= max_stub_type)
4554     abort ();  /* Should be unreachable.  */
4555 
4556   return 0;
4557 }
4558 
4559 static bfd_boolean
4560 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4561 		    void * in_arg)
4562 {
4563 #define MAXRELOCS 3
4564   struct elf32_arm_stub_hash_entry *stub_entry;
4565   struct elf32_arm_link_hash_table *globals;
4566   struct bfd_link_info *info;
4567   asection *stub_sec;
4568   bfd *stub_bfd;
4569   bfd_byte *loc;
4570   bfd_vma sym_value;
4571   int template_size;
4572   int size;
4573   const insn_sequence *template_sequence;
4574   int i;
4575   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4576   int stub_reloc_offset[MAXRELOCS] = {0, 0};
4577   int nrelocs = 0;
4578 
4579   /* Massage our args to the form they really have.  */
4580   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4581   info = (struct bfd_link_info *) in_arg;
4582 
4583   globals = elf32_arm_hash_table (info);
4584   if (globals == NULL)
4585     return FALSE;
4586 
4587   stub_sec = stub_entry->stub_sec;
4588 
4589   if ((globals->fix_cortex_a8 < 0)
4590       != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4591     /* We have to do less-strictly-aligned fixes last.  */
4592     return TRUE;
4593 
4594   /* Make a note of the offset within the stubs for this entry.  */
4595   stub_entry->stub_offset = stub_sec->size;
4596   loc = stub_sec->contents + stub_entry->stub_offset;
4597 
4598   stub_bfd = stub_sec->owner;
4599 
4600   /* This is the address of the stub destination.  */
4601   sym_value = (stub_entry->target_value
4602 	       + stub_entry->target_section->output_offset
4603 	       + stub_entry->target_section->output_section->vma);
4604 
4605   template_sequence = stub_entry->stub_template;
4606   template_size = stub_entry->stub_template_size;
4607 
4608   size = 0;
4609   for (i = 0; i < template_size; i++)
4610     {
4611       switch (template_sequence[i].type)
4612 	{
4613 	case THUMB16_TYPE:
4614 	  {
4615 	    bfd_vma data = (bfd_vma) template_sequence[i].data;
4616 	    if (template_sequence[i].reloc_addend != 0)
4617 	      {
4618 		/* We've borrowed the reloc_addend field to mean we should
4619 		   insert a condition code into this (Thumb-1 branch)
4620 		   instruction.  See THUMB16_BCOND_INSN.  */
4621 		BFD_ASSERT ((data & 0xff00) == 0xd000);
4622 		data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4623 	      }
4624 	    bfd_put_16 (stub_bfd, data, loc + size);
4625 	    size += 2;
4626 	  }
4627 	  break;
4628 
4629 	case THUMB32_TYPE:
4630 	  bfd_put_16 (stub_bfd,
4631 		      (template_sequence[i].data >> 16) & 0xffff,
4632 		      loc + size);
4633 	  bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4634 		      loc + size + 2);
4635 	  if (template_sequence[i].r_type != R_ARM_NONE)
4636 	    {
4637 	      stub_reloc_idx[nrelocs] = i;
4638 	      stub_reloc_offset[nrelocs++] = size;
4639 	    }
4640 	  size += 4;
4641 	  break;
4642 
4643 	case ARM_TYPE:
4644 	  bfd_put_32 (stub_bfd, template_sequence[i].data,
4645 		      loc + size);
4646 	  /* Handle cases where the target is encoded within the
4647 	     instruction.  */
4648 	  if (template_sequence[i].r_type == R_ARM_JUMP24)
4649 	    {
4650 	      stub_reloc_idx[nrelocs] = i;
4651 	      stub_reloc_offset[nrelocs++] = size;
4652 	    }
4653 	  size += 4;
4654 	  break;
4655 
4656 	case DATA_TYPE:
4657 	  bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4658 	  stub_reloc_idx[nrelocs] = i;
4659 	  stub_reloc_offset[nrelocs++] = size;
4660 	  size += 4;
4661 	  break;
4662 
4663 	default:
4664 	  BFD_FAIL ();
4665 	  return FALSE;
4666 	}
4667     }
4668 
4669   stub_sec->size += size;
4670 
4671   /* Stub size has already been computed in arm_size_one_stub. Check
4672      consistency.  */
4673   BFD_ASSERT (size == stub_entry->stub_size);
4674 
4675   /* Destination is Thumb. Force bit 0 to 1 to reflect this.  */
4676   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4677     sym_value |= 1;
4678 
4679   /* Assume there is at least one and at most MAXRELOCS entries to relocate
4680      in each stub.  */
4681   BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4682 
4683   for (i = 0; i < nrelocs; i++)
4684     {
4685       Elf_Internal_Rela rel;
4686       bfd_boolean unresolved_reloc;
4687       char *error_message;
4688       bfd_vma points_to =
4689 	sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4690 
4691       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4692       rel.r_info = ELF32_R_INFO (0,
4693 				 template_sequence[stub_reloc_idx[i]].r_type);
4694       rel.r_addend = 0;
4695 
4696       if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4697 	/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4698 	   template should refer back to the instruction after the original
4699 	   branch.  We use target_section as Cortex-A8 erratum workaround stubs
4700 	   are only generated when both source and target are in the same
4701 	   section.  */
4702 	points_to = stub_entry->target_section->output_section->vma
4703 		    + stub_entry->target_section->output_offset
4704 		    + stub_entry->source_value;
4705 
4706       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4707 	  (template_sequence[stub_reloc_idx[i]].r_type),
4708 	   stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4709 	   points_to, info, stub_entry->target_section, "", STT_FUNC,
4710 	   stub_entry->branch_type,
4711 	   (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4712 	   &error_message);
4713     }
4714 
4715   return TRUE;
4716 #undef MAXRELOCS
4717 }
4718 
4719 /* Calculate the template, template size and instruction size for a stub.
4720    Return value is the instruction size.  */
4721 
4722 static unsigned int
4723 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4724 			     const insn_sequence **stub_template,
4725 			     int *stub_template_size)
4726 {
4727   const insn_sequence *template_sequence = NULL;
4728   int template_size = 0, i;
4729   unsigned int size;
4730 
4731   template_sequence = stub_definitions[stub_type].template_sequence;
4732   if (stub_template)
4733     *stub_template = template_sequence;
4734 
4735   template_size = stub_definitions[stub_type].template_size;
4736   if (stub_template_size)
4737     *stub_template_size = template_size;
4738 
4739   size = 0;
4740   for (i = 0; i < template_size; i++)
4741     {
4742       switch (template_sequence[i].type)
4743 	{
4744 	case THUMB16_TYPE:
4745 	  size += 2;
4746 	  break;
4747 
4748 	case ARM_TYPE:
4749 	case THUMB32_TYPE:
4750 	case DATA_TYPE:
4751 	  size += 4;
4752 	  break;
4753 
4754 	default:
4755 	  BFD_FAIL ();
4756 	  return 0;
4757 	}
4758     }
4759 
4760   return size;
4761 }
4762 
4763 /* As above, but don't actually build the stub.  Just bump offset so
4764    we know stub section sizes.  */
4765 
4766 static bfd_boolean
4767 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4768 		   void *in_arg ATTRIBUTE_UNUSED)
4769 {
4770   struct elf32_arm_stub_hash_entry *stub_entry;
4771   const insn_sequence *template_sequence;
4772   int template_size, size;
4773 
4774   /* Massage our args to the form they really have.  */
4775   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4776 
4777   BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4778 	     && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4779 
4780   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4781 				      &template_size);
4782 
4783   stub_entry->stub_size = size;
4784   stub_entry->stub_template = template_sequence;
4785   stub_entry->stub_template_size = template_size;
4786 
4787   size = (size + 7) & ~7;
4788   stub_entry->stub_sec->size += size;
4789 
4790   return TRUE;
4791 }
4792 
4793 /* External entry points for sizing and building linker stubs.  */
4794 
4795 /* Set up various things so that we can make a list of input sections
4796    for each output section included in the link.  Returns -1 on error,
4797    0 when no stubs will be needed, and 1 on success.  */
4798 
4799 int
4800 elf32_arm_setup_section_lists (bfd *output_bfd,
4801 			       struct bfd_link_info *info)
4802 {
4803   bfd *input_bfd;
4804   unsigned int bfd_count;
4805   unsigned int top_id, top_index;
4806   asection *section;
4807   asection **input_list, **list;
4808   bfd_size_type amt;
4809   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4810 
4811   if (htab == NULL)
4812     return 0;
4813   if (! is_elf_hash_table (htab))
4814     return 0;
4815 
4816   /* Count the number of input BFDs and find the top input section id.  */
4817   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4818        input_bfd != NULL;
4819        input_bfd = input_bfd->link.next)
4820     {
4821       bfd_count += 1;
4822       for (section = input_bfd->sections;
4823 	   section != NULL;
4824 	   section = section->next)
4825 	{
4826 	  if (top_id < section->id)
4827 	    top_id = section->id;
4828 	}
4829     }
4830   htab->bfd_count = bfd_count;
4831 
4832   amt = sizeof (struct map_stub) * (top_id + 1);
4833   htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4834   if (htab->stub_group == NULL)
4835     return -1;
4836   htab->top_id = top_id;
4837 
4838   /* We can't use output_bfd->section_count here to find the top output
4839      section index as some sections may have been removed, and
4840      _bfd_strip_section_from_output doesn't renumber the indices.  */
4841   for (section = output_bfd->sections, top_index = 0;
4842        section != NULL;
4843        section = section->next)
4844     {
4845       if (top_index < section->index)
4846 	top_index = section->index;
4847     }
4848 
4849   htab->top_index = top_index;
4850   amt = sizeof (asection *) * (top_index + 1);
4851   input_list = (asection **) bfd_malloc (amt);
4852   htab->input_list = input_list;
4853   if (input_list == NULL)
4854     return -1;
4855 
4856   /* For sections we aren't interested in, mark their entries with a
4857      value we can check later.  */
4858   list = input_list + top_index;
4859   do
4860     *list = bfd_abs_section_ptr;
4861   while (list-- != input_list);
4862 
4863   for (section = output_bfd->sections;
4864        section != NULL;
4865        section = section->next)
4866     {
4867       if ((section->flags & SEC_CODE) != 0)
4868 	input_list[section->index] = NULL;
4869     }
4870 
4871   return 1;
4872 }
4873 
4874 /* The linker repeatedly calls this function for each input section,
4875    in the order that input sections are linked into output sections.
4876    Build lists of input sections to determine groupings between which
4877    we may insert linker stubs.  */
4878 
4879 void
4880 elf32_arm_next_input_section (struct bfd_link_info *info,
4881 			      asection *isec)
4882 {
4883   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4884 
4885   if (htab == NULL)
4886     return;
4887 
4888   if (isec->output_section->index <= htab->top_index)
4889     {
4890       asection **list = htab->input_list + isec->output_section->index;
4891 
4892       if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4893 	{
4894 	  /* Steal the link_sec pointer for our list.  */
4895 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4896 	  /* This happens to make the list in reverse order,
4897 	     which we reverse later.  */
4898 	  PREV_SEC (isec) = *list;
4899 	  *list = isec;
4900 	}
4901     }
4902 }
4903 
4904 /* See whether we can group stub sections together.  Grouping stub
4905    sections may result in fewer stubs.  More importantly, we need to
4906    put all .init* and .fini* stubs at the end of the .init or
4907    .fini output sections respectively, because glibc splits the
4908    _init and _fini functions into multiple parts.  Putting a stub in
4909    the middle of a function is not a good idea.  */
4910 
4911 static void
4912 group_sections (struct elf32_arm_link_hash_table *htab,
4913 		bfd_size_type stub_group_size,
4914 		bfd_boolean stubs_always_after_branch)
4915 {
4916   asection **list = htab->input_list;
4917 
4918   do
4919     {
4920       asection *tail = *list;
4921       asection *head;
4922 
4923       if (tail == bfd_abs_section_ptr)
4924 	continue;
4925 
4926       /* Reverse the list: we must avoid placing stubs at the
4927 	 beginning of the section because the beginning of the text
4928 	 section may be required for an interrupt vector in bare metal
4929 	 code.  */
4930 #define NEXT_SEC PREV_SEC
4931       head = NULL;
4932       while (tail != NULL)
4933 	{
4934 	  /* Pop from tail.  */
4935 	  asection *item = tail;
4936 	  tail = PREV_SEC (item);
4937 
4938 	  /* Push on head.  */
4939 	  NEXT_SEC (item) = head;
4940 	  head = item;
4941 	}
4942 
4943       while (head != NULL)
4944 	{
4945 	  asection *curr;
4946 	  asection *next;
4947 	  bfd_vma stub_group_start = head->output_offset;
4948 	  bfd_vma end_of_next;
4949 
4950 	  curr = head;
4951 	  while (NEXT_SEC (curr) != NULL)
4952 	    {
4953 	      next = NEXT_SEC (curr);
4954 	      end_of_next = next->output_offset + next->size;
4955 	      if (end_of_next - stub_group_start >= stub_group_size)
4956 		/* End of NEXT is too far from start, so stop.  */
4957 		break;
4958 	      /* Add NEXT to the group.  */
4959 	      curr = next;
4960 	    }
4961 
4962 	  /* OK, the size from the start to the start of CURR is less
4963 	     than stub_group_size and thus can be handled by one stub
4964 	     section.  (Or the head section is itself larger than
4965 	     stub_group_size, in which case we may be toast.)
4966 	     We should really be keeping track of the total size of
4967 	     stubs added here, as stubs contribute to the final output
4968 	     section size.  */
4969 	  do
4970 	    {
4971 	      next = NEXT_SEC (head);
4972 	      /* Set up this stub group.  */
4973 	      htab->stub_group[head->id].link_sec = curr;
4974 	    }
4975 	  while (head != curr && (head = next) != NULL);
4976 
4977 	  /* But wait, there's more!  Input sections up to stub_group_size
4978 	     bytes after the stub section can be handled by it too.  */
4979 	  if (!stubs_always_after_branch)
4980 	    {
4981 	      stub_group_start = curr->output_offset + curr->size;
4982 
4983 	      while (next != NULL)
4984 		{
4985 		  end_of_next = next->output_offset + next->size;
4986 		  if (end_of_next - stub_group_start >= stub_group_size)
4987 		    /* End of NEXT is too far from stubs, so stop.  */
4988 		    break;
4989 		  /* Add NEXT to the stub group.  */
4990 		  head = next;
4991 		  next = NEXT_SEC (head);
4992 		  htab->stub_group[head->id].link_sec = curr;
4993 		}
4994 	    }
4995 	  head = next;
4996 	}
4997     }
4998   while (list++ != htab->input_list + htab->top_index);
4999 
5000   free (htab->input_list);
5001 #undef PREV_SEC
5002 #undef NEXT_SEC
5003 }
5004 
5005 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5006    erratum fix.  */
5007 
5008 static int
5009 a8_reloc_compare (const void *a, const void *b)
5010 {
5011   const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5012   const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5013 
5014   if (ra->from < rb->from)
5015     return -1;
5016   else if (ra->from > rb->from)
5017     return 1;
5018   else
5019     return 0;
5020 }
5021 
5022 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5023 						    const char *, char **);
5024 
5025 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5026    branch/TLB erratum.  Fill in the table described by A8_FIXES_P,
5027    NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P.  Returns true if an error occurs, false
5028    otherwise.  */
5029 
5030 static bfd_boolean
5031 cortex_a8_erratum_scan (bfd *input_bfd,
5032 			struct bfd_link_info *info,
5033 			struct a8_erratum_fix **a8_fixes_p,
5034 			unsigned int *num_a8_fixes_p,
5035 			unsigned int *a8_fix_table_size_p,
5036 			struct a8_erratum_reloc *a8_relocs,
5037 			unsigned int num_a8_relocs,
5038 			unsigned prev_num_a8_fixes,
5039 			bfd_boolean *stub_changed_p)
5040 {
5041   asection *section;
5042   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5043   struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5044   unsigned int num_a8_fixes = *num_a8_fixes_p;
5045   unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5046 
5047   if (htab == NULL)
5048     return FALSE;
5049 
5050   for (section = input_bfd->sections;
5051        section != NULL;
5052        section = section->next)
5053     {
5054       bfd_byte *contents = NULL;
5055       struct _arm_elf_section_data *sec_data;
5056       unsigned int span;
5057       bfd_vma base_vma;
5058 
5059       if (elf_section_type (section) != SHT_PROGBITS
5060 	  || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5061 	  || (section->flags & SEC_EXCLUDE) != 0
5062 	  || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5063 	  || (section->output_section == bfd_abs_section_ptr))
5064 	continue;
5065 
5066       base_vma = section->output_section->vma + section->output_offset;
5067 
5068       if (elf_section_data (section)->this_hdr.contents != NULL)
5069 	contents = elf_section_data (section)->this_hdr.contents;
5070       else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5071 	return TRUE;
5072 
5073       sec_data = elf32_arm_section_data (section);
5074 
5075       for (span = 0; span < sec_data->mapcount; span++)
5076 	{
5077 	  unsigned int span_start = sec_data->map[span].vma;
5078 	  unsigned int span_end = (span == sec_data->mapcount - 1)
5079 	    ? section->size : sec_data->map[span + 1].vma;
5080 	  unsigned int i;
5081 	  char span_type = sec_data->map[span].type;
5082 	  bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5083 
5084 	  if (span_type != 't')
5085 	    continue;
5086 
5087 	  /* Span is entirely within a single 4KB region: skip scanning.  */
5088 	  if (((base_vma + span_start) & ~0xfff)
5089 	      == ((base_vma + span_end) & ~0xfff))
5090 	    continue;
5091 
5092 	  /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5093 
5094 	       * The opcode is BLX.W, BL.W, B.W, Bcc.W
5095 	       * The branch target is in the same 4KB region as the
5096 		 first half of the branch.
5097 	       * The instruction before the branch is a 32-bit
5098 		 length non-branch instruction.  */
5099 	  for (i = span_start; i < span_end;)
5100 	    {
5101 	      unsigned int insn = bfd_getl16 (&contents[i]);
5102 	      bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5103 	      bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5104 
5105 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5106 		insn_32bit = TRUE;
5107 
5108 	      if (insn_32bit)
5109 		{
5110 		  /* Load the rest of the insn (in manual-friendly order).  */
5111 		  insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5112 
5113 		  /* Encoding T4: B<c>.W.  */
5114 		  is_b = (insn & 0xf800d000) == 0xf0009000;
5115 		  /* Encoding T1: BL<c>.W.  */
5116 		  is_bl = (insn & 0xf800d000) == 0xf000d000;
5117 		  /* Encoding T2: BLX<c>.W.  */
5118 		  is_blx = (insn & 0xf800d000) == 0xf000c000;
5119 		  /* Encoding T3: B<c>.W (not permitted in IT block).  */
5120 		  is_bcc = (insn & 0xf800d000) == 0xf0008000
5121 			   && (insn & 0x07f00000) != 0x03800000;
5122 		}
5123 
5124 	      is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5125 
5126 	      if (((base_vma + i) & 0xfff) == 0xffe
5127 		  && insn_32bit
5128 		  && is_32bit_branch
5129 		  && last_was_32bit
5130 		  && ! last_was_branch)
5131 		{
5132 		  bfd_signed_vma offset = 0;
5133 		  bfd_boolean force_target_arm = FALSE;
5134 		  bfd_boolean force_target_thumb = FALSE;
5135 		  bfd_vma target;
5136 		  enum elf32_arm_stub_type stub_type = arm_stub_none;
5137 		  struct a8_erratum_reloc key, *found;
5138 		  bfd_boolean use_plt = FALSE;
5139 
5140 		  key.from = base_vma + i;
5141 		  found = (struct a8_erratum_reloc *)
5142 		      bsearch (&key, a8_relocs, num_a8_relocs,
5143 			       sizeof (struct a8_erratum_reloc),
5144 			       &a8_reloc_compare);
5145 
5146 		  if (found)
5147 		    {
5148 		      char *error_message = NULL;
5149 		      struct elf_link_hash_entry *entry;
5150 
5151 		      /* We don't care about the error returned from this
5152 			 function, only if there is glue or not.  */
5153 		      entry = find_thumb_glue (info, found->sym_name,
5154 					       &error_message);
5155 
5156 		      if (entry)
5157 			found->non_a8_stub = TRUE;
5158 
5159 		      /* Keep a simpler condition, for the sake of clarity.  */
5160 		      if (htab->root.splt != NULL && found->hash != NULL
5161 			  && found->hash->root.plt.offset != (bfd_vma) -1)
5162 			use_plt = TRUE;
5163 
5164 		      if (found->r_type == R_ARM_THM_CALL)
5165 			{
5166 			  if (found->branch_type == ST_BRANCH_TO_ARM
5167 			      || use_plt)
5168 			    force_target_arm = TRUE;
5169 			  else
5170 			    force_target_thumb = TRUE;
5171 			}
5172 		    }
5173 
5174 		  /* Check if we have an offending branch instruction.  */
5175 
5176 		  if (found && found->non_a8_stub)
5177 		    /* We've already made a stub for this instruction, e.g.
5178 		       it's a long branch or a Thumb->ARM stub.  Assume that
5179 		       stub will suffice to work around the A8 erratum (see
5180 		       setting of always_after_branch above).  */
5181 		    ;
5182 		  else if (is_bcc)
5183 		    {
5184 		      offset = (insn & 0x7ff) << 1;
5185 		      offset |= (insn & 0x3f0000) >> 4;
5186 		      offset |= (insn & 0x2000) ? 0x40000 : 0;
5187 		      offset |= (insn & 0x800) ? 0x80000 : 0;
5188 		      offset |= (insn & 0x4000000) ? 0x100000 : 0;
5189 		      if (offset & 0x100000)
5190 			offset |= ~ ((bfd_signed_vma) 0xfffff);
5191 		      stub_type = arm_stub_a8_veneer_b_cond;
5192 		    }
5193 		  else if (is_b || is_bl || is_blx)
5194 		    {
5195 		      int s = (insn & 0x4000000) != 0;
5196 		      int j1 = (insn & 0x2000) != 0;
5197 		      int j2 = (insn & 0x800) != 0;
5198 		      int i1 = !(j1 ^ s);
5199 		      int i2 = !(j2 ^ s);
5200 
5201 		      offset = (insn & 0x7ff) << 1;
5202 		      offset |= (insn & 0x3ff0000) >> 4;
5203 		      offset |= i2 << 22;
5204 		      offset |= i1 << 23;
5205 		      offset |= s << 24;
5206 		      if (offset & 0x1000000)
5207 			offset |= ~ ((bfd_signed_vma) 0xffffff);
5208 
5209 		      if (is_blx)
5210 			offset &= ~ ((bfd_signed_vma) 3);
5211 
5212 		      stub_type = is_blx ? arm_stub_a8_veneer_blx :
5213 			is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5214 		    }
5215 
5216 		  if (stub_type != arm_stub_none)
5217 		    {
5218 		      bfd_vma pc_for_insn = base_vma + i + 4;
5219 
5220 		      /* The original instruction is a BL, but the target is
5221 			 an ARM instruction.  If we were not making a stub,
5222 			 the BL would have been converted to a BLX.  Use the
5223 			 BLX stub instead in that case.  */
5224 		      if (htab->use_blx && force_target_arm
5225 			  && stub_type == arm_stub_a8_veneer_bl)
5226 			{
5227 			  stub_type = arm_stub_a8_veneer_blx;
5228 			  is_blx = TRUE;
5229 			  is_bl = FALSE;
5230 			}
5231 		      /* Conversely, if the original instruction was
5232 			 BLX but the target is Thumb mode, use the BL
5233 			 stub.  */
5234 		      else if (force_target_thumb
5235 			       && stub_type == arm_stub_a8_veneer_blx)
5236 			{
5237 			  stub_type = arm_stub_a8_veneer_bl;
5238 			  is_blx = FALSE;
5239 			  is_bl = TRUE;
5240 			}
5241 
5242 		      if (is_blx)
5243 			pc_for_insn &= ~ ((bfd_vma) 3);
5244 
5245 		      /* If we found a relocation, use the proper destination,
5246 			 not the offset in the (unrelocated) instruction.
5247 			 Note this is always done if we switched the stub type
5248 			 above.  */
5249 		      if (found)
5250 			offset =
5251 			  (bfd_signed_vma) (found->destination - pc_for_insn);
5252 
5253 		      /* If the stub will use a Thumb-mode branch to a
5254 			 PLT target, redirect it to the preceding Thumb
5255 			 entry point.  */
5256 		      if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5257 			offset -= PLT_THUMB_STUB_SIZE;
5258 
5259 		      target = pc_for_insn + offset;
5260 
5261 		      /* The BLX stub is ARM-mode code.  Adjust the offset to
5262 			 take the different PC value (+8 instead of +4) into
5263 			 account.  */
5264 		      if (stub_type == arm_stub_a8_veneer_blx)
5265 			offset += 4;
5266 
5267 		      if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5268 			{
5269 			  char *stub_name = NULL;
5270 
5271 			  if (num_a8_fixes == a8_fix_table_size)
5272 			    {
5273 			      a8_fix_table_size *= 2;
5274 			      a8_fixes = (struct a8_erratum_fix *)
5275 				  bfd_realloc (a8_fixes,
5276 					       sizeof (struct a8_erratum_fix)
5277 					       * a8_fix_table_size);
5278 			    }
5279 
5280 			  if (num_a8_fixes < prev_num_a8_fixes)
5281 			    {
5282 			      /* If we're doing a subsequent scan,
5283 				 check if we've found the same fix as
5284 				 before, and try and reuse the stub
5285 				 name.  */
5286 			      stub_name = a8_fixes[num_a8_fixes].stub_name;
5287 			      if ((a8_fixes[num_a8_fixes].section != section)
5288 				  || (a8_fixes[num_a8_fixes].offset != i))
5289 				{
5290 				  free (stub_name);
5291 				  stub_name = NULL;
5292 				  *stub_changed_p = TRUE;
5293 				}
5294 			    }
5295 
5296 			  if (!stub_name)
5297 			    {
5298 			      stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5299 			      if (stub_name != NULL)
5300 				sprintf (stub_name, "%x:%x", section->id, i);
5301 			    }
5302 
5303 			  a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5304 			  a8_fixes[num_a8_fixes].section = section;
5305 			  a8_fixes[num_a8_fixes].offset = i;
5306 			  a8_fixes[num_a8_fixes].target_offset =
5307 			    target - base_vma;
5308 			  a8_fixes[num_a8_fixes].orig_insn = insn;
5309 			  a8_fixes[num_a8_fixes].stub_name = stub_name;
5310 			  a8_fixes[num_a8_fixes].stub_type = stub_type;
5311 			  a8_fixes[num_a8_fixes].branch_type =
5312 			    is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5313 
5314 			  num_a8_fixes++;
5315 			}
5316 		    }
5317 		}
5318 
5319 	      i += insn_32bit ? 4 : 2;
5320 	      last_was_32bit = insn_32bit;
5321 	      last_was_branch = is_32bit_branch;
5322 	    }
5323 	}
5324 
5325       if (elf_section_data (section)->this_hdr.contents == NULL)
5326 	free (contents);
5327     }
5328 
5329   *a8_fixes_p = a8_fixes;
5330   *num_a8_fixes_p = num_a8_fixes;
5331   *a8_fix_table_size_p = a8_fix_table_size;
5332 
5333   return FALSE;
5334 }
5335 
5336 /* Create or update a stub entry depending on whether the stub can already be
5337    found in HTAB.  The stub is identified by:
5338    - its type STUB_TYPE
5339    - its source branch (note that several can share the same stub) whose
5340      section and relocation (if any) are given by SECTION and IRELA
5341      respectively
5342    - its target symbol whose input section, hash, name, value and branch type
5343      are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5344      respectively
5345 
5346    If found, the value of the stub's target symbol is updated from SYM_VALUE
5347    and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
5348    TRUE and the stub entry is initialized.
5349 
5350    Returns whether the stub could be successfully created or updated, or FALSE
5351    if an error occured.  */
5352 
5353 static bfd_boolean
5354 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5355 		       enum elf32_arm_stub_type stub_type, asection *section,
5356 		       Elf_Internal_Rela *irela, asection *sym_sec,
5357 		       struct elf32_arm_link_hash_entry *hash, char *sym_name,
5358 		       bfd_vma sym_value, enum arm_st_branch_type branch_type,
5359 		       bfd_boolean *new_stub)
5360 {
5361   const asection *id_sec;
5362   char *stub_name;
5363   struct elf32_arm_stub_hash_entry *stub_entry;
5364   unsigned int r_type;
5365   bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5366 
5367   BFD_ASSERT (stub_type != arm_stub_none);
5368   *new_stub = FALSE;
5369 
5370   if (sym_claimed)
5371     stub_name = sym_name;
5372   else
5373     {
5374       BFD_ASSERT (irela);
5375       BFD_ASSERT (section);
5376 
5377       /* Support for grouping stub sections.  */
5378       id_sec = htab->stub_group[section->id].link_sec;
5379 
5380       /* Get the name of this stub.  */
5381       stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5382 				       stub_type);
5383       if (!stub_name)
5384 	return FALSE;
5385     }
5386 
5387   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5388 				     FALSE);
5389   /* The proper stub has already been created, just update its value.  */
5390   if (stub_entry != NULL)
5391     {
5392       if (!sym_claimed)
5393 	free (stub_name);
5394       stub_entry->target_value = sym_value;
5395       return TRUE;
5396     }
5397 
5398   stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5399   if (stub_entry == NULL)
5400     {
5401       if (!sym_claimed)
5402 	free (stub_name);
5403       return FALSE;
5404     }
5405 
5406   stub_entry->target_value = sym_value;
5407   stub_entry->target_section = sym_sec;
5408   stub_entry->stub_type = stub_type;
5409   stub_entry->h = hash;
5410   stub_entry->branch_type = branch_type;
5411 
5412   if (sym_claimed)
5413     stub_entry->output_name = sym_name;
5414   else
5415     {
5416       if (sym_name == NULL)
5417 	sym_name = "unnamed";
5418       stub_entry->output_name = (char *)
5419 	bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5420 				   + strlen (sym_name));
5421       if (stub_entry->output_name == NULL)
5422 	{
5423 	  free (stub_name);
5424 	  return FALSE;
5425 	}
5426 
5427       /* For historical reasons, use the existing names for ARM-to-Thumb and
5428 	 Thumb-to-ARM stubs.  */
5429       r_type = ELF32_R_TYPE (irela->r_info);
5430       if ((r_type == (unsigned int) R_ARM_THM_CALL
5431 	   || r_type == (unsigned int) R_ARM_THM_JUMP24
5432 	   || r_type == (unsigned int) R_ARM_THM_JUMP19)
5433 	  && branch_type == ST_BRANCH_TO_ARM)
5434 	sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5435       else if ((r_type == (unsigned int) R_ARM_CALL
5436 		|| r_type == (unsigned int) R_ARM_JUMP24)
5437 	       && branch_type == ST_BRANCH_TO_THUMB)
5438 	sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5439       else
5440 	sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5441     }
5442 
5443   *new_stub = TRUE;
5444   return TRUE;
5445 }
5446 
5447 /* Determine and set the size of the stub section for a final link.
5448 
5449    The basic idea here is to examine all the relocations looking for
5450    PC-relative calls to a target that is unreachable with a "bl"
5451    instruction.  */
5452 
5453 bfd_boolean
5454 elf32_arm_size_stubs (bfd *output_bfd,
5455 		      bfd *stub_bfd,
5456 		      struct bfd_link_info *info,
5457 		      bfd_signed_vma group_size,
5458 		      asection * (*add_stub_section) (const char *, asection *,
5459 						      asection *,
5460 						      unsigned int),
5461 		      void (*layout_sections_again) (void))
5462 {
5463   bfd_size_type stub_group_size;
5464   bfd_boolean stubs_always_after_branch;
5465   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5466   struct a8_erratum_fix *a8_fixes = NULL;
5467   unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5468   struct a8_erratum_reloc *a8_relocs = NULL;
5469   unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5470 
5471   if (htab == NULL)
5472     return FALSE;
5473 
5474   if (htab->fix_cortex_a8)
5475     {
5476       a8_fixes = (struct a8_erratum_fix *)
5477 	  bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5478       a8_relocs = (struct a8_erratum_reloc *)
5479 	  bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5480     }
5481 
5482   /* Propagate mach to stub bfd, because it may not have been
5483      finalized when we created stub_bfd.  */
5484   bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5485 		     bfd_get_mach (output_bfd));
5486 
5487   /* Stash our params away.  */
5488   htab->stub_bfd = stub_bfd;
5489   htab->add_stub_section = add_stub_section;
5490   htab->layout_sections_again = layout_sections_again;
5491   stubs_always_after_branch = group_size < 0;
5492 
5493   /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5494      as the first half of a 32-bit branch straddling two 4K pages.  This is a
5495      crude way of enforcing that.  */
5496   if (htab->fix_cortex_a8)
5497     stubs_always_after_branch = 1;
5498 
5499   if (group_size < 0)
5500     stub_group_size = -group_size;
5501   else
5502     stub_group_size = group_size;
5503 
5504   if (stub_group_size == 1)
5505     {
5506       /* Default values.  */
5507       /* Thumb branch range is +-4MB has to be used as the default
5508 	 maximum size (a given section can contain both ARM and Thumb
5509 	 code, so the worst case has to be taken into account).
5510 
5511 	 This value is 24K less than that, which allows for 2025
5512 	 12-byte stubs.  If we exceed that, then we will fail to link.
5513 	 The user will have to relink with an explicit group size
5514 	 option.  */
5515       stub_group_size = 4170000;
5516     }
5517 
5518   group_sections (htab, stub_group_size, stubs_always_after_branch);
5519 
5520   /* If we're applying the cortex A8 fix, we need to determine the
5521      program header size now, because we cannot change it later --
5522      that could alter section placements.  Notice the A8 erratum fix
5523      ends up requiring the section addresses to remain unchanged
5524      modulo the page size.  That's something we cannot represent
5525      inside BFD, and we don't want to force the section alignment to
5526      be the page size.  */
5527   if (htab->fix_cortex_a8)
5528     (*htab->layout_sections_again) ();
5529 
5530   while (1)
5531     {
5532       bfd *input_bfd;
5533       unsigned int bfd_indx;
5534       asection *stub_sec;
5535       enum elf32_arm_stub_type stub_type;
5536       bfd_boolean stub_changed = FALSE;
5537       unsigned prev_num_a8_fixes = num_a8_fixes;
5538 
5539       num_a8_fixes = 0;
5540       for (input_bfd = info->input_bfds, bfd_indx = 0;
5541 	   input_bfd != NULL;
5542 	   input_bfd = input_bfd->link.next, bfd_indx++)
5543 	{
5544 	  Elf_Internal_Shdr *symtab_hdr;
5545 	  asection *section;
5546 	  Elf_Internal_Sym *local_syms = NULL;
5547 
5548 	  if (!is_arm_elf (input_bfd))
5549 	    continue;
5550 
5551 	  num_a8_relocs = 0;
5552 
5553 	  /* We'll need the symbol table in a second.  */
5554 	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5555 	  if (symtab_hdr->sh_info == 0)
5556 	    continue;
5557 
5558 	  /* Walk over each section attached to the input bfd.  */
5559 	  for (section = input_bfd->sections;
5560 	       section != NULL;
5561 	       section = section->next)
5562 	    {
5563 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5564 
5565 	      /* If there aren't any relocs, then there's nothing more
5566 		 to do.  */
5567 	      if ((section->flags & SEC_RELOC) == 0
5568 		  || section->reloc_count == 0
5569 		  || (section->flags & SEC_CODE) == 0)
5570 		continue;
5571 
5572 	      /* If this section is a link-once section that will be
5573 		 discarded, then don't create any stubs.  */
5574 	      if (section->output_section == NULL
5575 		  || section->output_section->owner != output_bfd)
5576 		continue;
5577 
5578 	      /* Get the relocs.  */
5579 	      internal_relocs
5580 		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5581 					     NULL, info->keep_memory);
5582 	      if (internal_relocs == NULL)
5583 		goto error_ret_free_local;
5584 
5585 	      /* Now examine each relocation.  */
5586 	      irela = internal_relocs;
5587 	      irelaend = irela + section->reloc_count;
5588 	      for (; irela < irelaend; irela++)
5589 		{
5590 		  unsigned int r_type, r_indx;
5591 		  asection *sym_sec;
5592 		  bfd_vma sym_value;
5593 		  bfd_vma destination;
5594 		  struct elf32_arm_link_hash_entry *hash;
5595 		  const char *sym_name;
5596 		  unsigned char st_type;
5597 		  enum arm_st_branch_type branch_type;
5598 		  bfd_boolean created_stub = FALSE;
5599 
5600 		  r_type = ELF32_R_TYPE (irela->r_info);
5601 		  r_indx = ELF32_R_SYM (irela->r_info);
5602 
5603 		  if (r_type >= (unsigned int) R_ARM_max)
5604 		    {
5605 		      bfd_set_error (bfd_error_bad_value);
5606 		    error_ret_free_internal:
5607 		      if (elf_section_data (section)->relocs == NULL)
5608 			free (internal_relocs);
5609 		    /* Fall through.  */
5610 		    error_ret_free_local:
5611 		      if (local_syms != NULL
5612 			  && (symtab_hdr->contents
5613 			      != (unsigned char *) local_syms))
5614 			free (local_syms);
5615 		      return FALSE;
5616 		    }
5617 
5618 		  hash = NULL;
5619 		  if (r_indx >= symtab_hdr->sh_info)
5620 		    hash = elf32_arm_hash_entry
5621 		      (elf_sym_hashes (input_bfd)
5622 		       [r_indx - symtab_hdr->sh_info]);
5623 
5624 		  /* Only look for stubs on branch instructions, or
5625 		     non-relaxed TLSCALL  */
5626 		  if ((r_type != (unsigned int) R_ARM_CALL)
5627 		      && (r_type != (unsigned int) R_ARM_THM_CALL)
5628 		      && (r_type != (unsigned int) R_ARM_JUMP24)
5629 		      && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5630 		      && (r_type != (unsigned int) R_ARM_THM_XPC22)
5631 		      && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5632 		      && (r_type != (unsigned int) R_ARM_PLT32)
5633 		      && !((r_type == (unsigned int) R_ARM_TLS_CALL
5634 			    || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5635 			   && r_type == elf32_arm_tls_transition
5636 			       (info, r_type, &hash->root)
5637 			   && ((hash ? hash->tls_type
5638 				: (elf32_arm_local_got_tls_type
5639 				   (input_bfd)[r_indx]))
5640 			       & GOT_TLS_GDESC) != 0))
5641 		    continue;
5642 
5643 		  /* Now determine the call target, its name, value,
5644 		     section.  */
5645 		  sym_sec = NULL;
5646 		  sym_value = 0;
5647 		  destination = 0;
5648 		  sym_name = NULL;
5649 
5650 		  if (r_type == (unsigned int) R_ARM_TLS_CALL
5651 		      || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5652 		    {
5653 		      /* A non-relaxed TLS call.  The target is the
5654 			 plt-resident trampoline and nothing to do
5655 			 with the symbol.  */
5656 		      BFD_ASSERT (htab->tls_trampoline > 0);
5657 		      sym_sec = htab->root.splt;
5658 		      sym_value = htab->tls_trampoline;
5659 		      hash = 0;
5660 		      st_type = STT_FUNC;
5661 		      branch_type = ST_BRANCH_TO_ARM;
5662 		    }
5663 		  else if (!hash)
5664 		    {
5665 		      /* It's a local symbol.  */
5666 		      Elf_Internal_Sym *sym;
5667 
5668 		      if (local_syms == NULL)
5669 			{
5670 			  local_syms
5671 			    = (Elf_Internal_Sym *) symtab_hdr->contents;
5672 			  if (local_syms == NULL)
5673 			    local_syms
5674 			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5675 						      symtab_hdr->sh_info, 0,
5676 						      NULL, NULL, NULL);
5677 			  if (local_syms == NULL)
5678 			    goto error_ret_free_internal;
5679 			}
5680 
5681 		      sym = local_syms + r_indx;
5682 		      if (sym->st_shndx == SHN_UNDEF)
5683 			sym_sec = bfd_und_section_ptr;
5684 		      else if (sym->st_shndx == SHN_ABS)
5685 			sym_sec = bfd_abs_section_ptr;
5686 		      else if (sym->st_shndx == SHN_COMMON)
5687 			sym_sec = bfd_com_section_ptr;
5688 		      else
5689 			sym_sec =
5690 			  bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5691 
5692 		      if (!sym_sec)
5693 			/* This is an undefined symbol.  It can never
5694 			   be resolved.  */
5695 			continue;
5696 
5697 		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5698 			sym_value = sym->st_value;
5699 		      destination = (sym_value + irela->r_addend
5700 				     + sym_sec->output_offset
5701 				     + sym_sec->output_section->vma);
5702 		      st_type = ELF_ST_TYPE (sym->st_info);
5703 		      branch_type =
5704 			ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5705 		      sym_name
5706 			= bfd_elf_string_from_elf_section (input_bfd,
5707 							   symtab_hdr->sh_link,
5708 							   sym->st_name);
5709 		    }
5710 		  else
5711 		    {
5712 		      /* It's an external symbol.  */
5713 		      while (hash->root.root.type == bfd_link_hash_indirect
5714 			     || hash->root.root.type == bfd_link_hash_warning)
5715 			hash = ((struct elf32_arm_link_hash_entry *)
5716 				hash->root.root.u.i.link);
5717 
5718 		      if (hash->root.root.type == bfd_link_hash_defined
5719 			  || hash->root.root.type == bfd_link_hash_defweak)
5720 			{
5721 			  sym_sec = hash->root.root.u.def.section;
5722 			  sym_value = hash->root.root.u.def.value;
5723 
5724 			  struct elf32_arm_link_hash_table *globals =
5725 						  elf32_arm_hash_table (info);
5726 
5727 			  /* For a destination in a shared library,
5728 			     use the PLT stub as target address to
5729 			     decide whether a branch stub is
5730 			     needed.  */
5731 			  if (globals != NULL
5732 			      && globals->root.splt != NULL
5733 			      && hash != NULL
5734 			      && hash->root.plt.offset != (bfd_vma) -1)
5735 			    {
5736 			      sym_sec = globals->root.splt;
5737 			      sym_value = hash->root.plt.offset;
5738 			      if (sym_sec->output_section != NULL)
5739 				destination = (sym_value
5740 					       + sym_sec->output_offset
5741 					       + sym_sec->output_section->vma);
5742 			    }
5743 			  else if (sym_sec->output_section != NULL)
5744 			    destination = (sym_value + irela->r_addend
5745 					   + sym_sec->output_offset
5746 					   + sym_sec->output_section->vma);
5747 			}
5748 		      else if ((hash->root.root.type == bfd_link_hash_undefined)
5749 			       || (hash->root.root.type == bfd_link_hash_undefweak))
5750 			{
5751 			  /* For a shared library, use the PLT stub as
5752 			     target address to decide whether a long
5753 			     branch stub is needed.
5754 			     For absolute code, they cannot be handled.  */
5755 			  struct elf32_arm_link_hash_table *globals =
5756 			    elf32_arm_hash_table (info);
5757 
5758 			  if (globals != NULL
5759 			      && globals->root.splt != NULL
5760 			      && hash != NULL
5761 			      && hash->root.plt.offset != (bfd_vma) -1)
5762 			    {
5763 			      sym_sec = globals->root.splt;
5764 			      sym_value = hash->root.plt.offset;
5765 			      if (sym_sec->output_section != NULL)
5766 				destination = (sym_value
5767 					       + sym_sec->output_offset
5768 					       + sym_sec->output_section->vma);
5769 			    }
5770 			  else
5771 			    continue;
5772 			}
5773 		      else
5774 			{
5775 			  bfd_set_error (bfd_error_bad_value);
5776 			  goto error_ret_free_internal;
5777 			}
5778 		      st_type = hash->root.type;
5779 		      branch_type =
5780 			ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5781 		      sym_name = hash->root.root.root.string;
5782 		    }
5783 
5784 		  do
5785 		    {
5786 		      bfd_boolean new_stub;
5787 
5788 		      /* Determine what (if any) linker stub is needed.  */
5789 		      stub_type = arm_type_of_stub (info, section, irela,
5790 						    st_type, &branch_type,
5791 						    hash, destination, sym_sec,
5792 						    input_bfd, sym_name);
5793 		      if (stub_type == arm_stub_none)
5794 			break;
5795 
5796 		      /* We've either created a stub for this reloc already,
5797 			 or we are about to.  */
5798 		      created_stub =
5799 			elf32_arm_create_stub (htab, stub_type, section, irela,
5800 					       sym_sec, hash,
5801 					       (char *) sym_name, sym_value,
5802 					       branch_type, &new_stub);
5803 
5804 		      if (!created_stub)
5805 			goto error_ret_free_internal;
5806 		      else if (!new_stub)
5807 			break;
5808 		      else
5809 			stub_changed = TRUE;
5810 		    }
5811 		  while (0);
5812 
5813 		  /* Look for relocations which might trigger Cortex-A8
5814 		     erratum.  */
5815 		  if (htab->fix_cortex_a8
5816 		      && (r_type == (unsigned int) R_ARM_THM_JUMP24
5817 			  || r_type == (unsigned int) R_ARM_THM_JUMP19
5818 			  || r_type == (unsigned int) R_ARM_THM_CALL
5819 			  || r_type == (unsigned int) R_ARM_THM_XPC22))
5820 		    {
5821 		      bfd_vma from = section->output_section->vma
5822 				     + section->output_offset
5823 				     + irela->r_offset;
5824 
5825 		      if ((from & 0xfff) == 0xffe)
5826 			{
5827 			  /* Found a candidate.  Note we haven't checked the
5828 			     destination is within 4K here: if we do so (and
5829 			     don't create an entry in a8_relocs) we can't tell
5830 			     that a branch should have been relocated when
5831 			     scanning later.  */
5832 			  if (num_a8_relocs == a8_reloc_table_size)
5833 			    {
5834 			      a8_reloc_table_size *= 2;
5835 			      a8_relocs = (struct a8_erratum_reloc *)
5836 				  bfd_realloc (a8_relocs,
5837 					       sizeof (struct a8_erratum_reloc)
5838 					       * a8_reloc_table_size);
5839 			    }
5840 
5841 			  a8_relocs[num_a8_relocs].from = from;
5842 			  a8_relocs[num_a8_relocs].destination = destination;
5843 			  a8_relocs[num_a8_relocs].r_type = r_type;
5844 			  a8_relocs[num_a8_relocs].branch_type = branch_type;
5845 			  a8_relocs[num_a8_relocs].sym_name = sym_name;
5846 			  a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5847 			  a8_relocs[num_a8_relocs].hash = hash;
5848 
5849 			  num_a8_relocs++;
5850 			}
5851 		    }
5852 		}
5853 
5854 	      /* We're done with the internal relocs, free them.  */
5855 	      if (elf_section_data (section)->relocs == NULL)
5856 		free (internal_relocs);
5857 	    }
5858 
5859 	  if (htab->fix_cortex_a8)
5860 	    {
5861 	      /* Sort relocs which might apply to Cortex-A8 erratum.  */
5862 	      qsort (a8_relocs, num_a8_relocs,
5863 		     sizeof (struct a8_erratum_reloc),
5864 		     &a8_reloc_compare);
5865 
5866 	      /* Scan for branches which might trigger Cortex-A8 erratum.  */
5867 	      if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5868 					  &num_a8_fixes, &a8_fix_table_size,
5869 					  a8_relocs, num_a8_relocs,
5870 					  prev_num_a8_fixes, &stub_changed)
5871 		  != 0)
5872 		goto error_ret_free_local;
5873 	    }
5874 
5875 	  if (local_syms != NULL
5876 	      && symtab_hdr->contents != (unsigned char *) local_syms)
5877 	    {
5878 	      if (!info->keep_memory)
5879 		free (local_syms);
5880 	      else
5881 		symtab_hdr->contents = (unsigned char *) local_syms;
5882 	    }
5883 	}
5884 
5885       if (prev_num_a8_fixes != num_a8_fixes)
5886 	stub_changed = TRUE;
5887 
5888       if (!stub_changed)
5889 	break;
5890 
5891       /* OK, we've added some stubs.  Find out the new size of the
5892 	 stub sections.  */
5893       for (stub_sec = htab->stub_bfd->sections;
5894 	   stub_sec != NULL;
5895 	   stub_sec = stub_sec->next)
5896 	{
5897 	  /* Ignore non-stub sections.  */
5898 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
5899 	    continue;
5900 
5901 	  stub_sec->size = 0;
5902 	}
5903 
5904       /* Compute stub section size, considering padding.  */
5905       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5906       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
5907 	   stub_type++)
5908 	{
5909 	  int size, padding;
5910 	  asection **stub_sec_p;
5911 
5912 	  padding = arm_dedicated_stub_section_padding (stub_type);
5913 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
5914 	  /* Skip if no stub input section or no stub section padding
5915 	     required.  */
5916 	  if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
5917 	    continue;
5918 	  /* Stub section padding required but no dedicated section.  */
5919 	  BFD_ASSERT (stub_sec_p);
5920 
5921 	  size = (*stub_sec_p)->size;
5922 	  size = (size + padding - 1) & ~(padding - 1);
5923 	  (*stub_sec_p)->size = size;
5924 	}
5925 
5926       /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
5927       if (htab->fix_cortex_a8)
5928 	for (i = 0; i < num_a8_fixes; i++)
5929 	  {
5930 	    stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5931 			 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5932 
5933 	    if (stub_sec == NULL)
5934 	      return FALSE;
5935 
5936 	    stub_sec->size
5937 	      += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5938 					      NULL);
5939 	  }
5940 
5941 
5942       /* Ask the linker to do its stuff.  */
5943       (*htab->layout_sections_again) ();
5944     }
5945 
5946   /* Add stubs for Cortex-A8 erratum fixes now.  */
5947   if (htab->fix_cortex_a8)
5948     {
5949       for (i = 0; i < num_a8_fixes; i++)
5950 	{
5951 	  struct elf32_arm_stub_hash_entry *stub_entry;
5952 	  char *stub_name = a8_fixes[i].stub_name;
5953 	  asection *section = a8_fixes[i].section;
5954 	  unsigned int section_id = a8_fixes[i].section->id;
5955 	  asection *link_sec = htab->stub_group[section_id].link_sec;
5956 	  asection *stub_sec = htab->stub_group[section_id].stub_sec;
5957 	  const insn_sequence *template_sequence;
5958 	  int template_size, size = 0;
5959 
5960 	  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5961 					     TRUE, FALSE);
5962 	  if (stub_entry == NULL)
5963 	    {
5964 	      (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5965 				     section->owner,
5966 				     stub_name);
5967 	      return FALSE;
5968 	    }
5969 
5970 	  stub_entry->stub_sec = stub_sec;
5971 	  stub_entry->stub_offset = 0;
5972 	  stub_entry->id_sec = link_sec;
5973 	  stub_entry->stub_type = a8_fixes[i].stub_type;
5974 	  stub_entry->source_value = a8_fixes[i].offset;
5975 	  stub_entry->target_section = a8_fixes[i].section;
5976 	  stub_entry->target_value = a8_fixes[i].target_offset;
5977 	  stub_entry->orig_insn = a8_fixes[i].orig_insn;
5978 	  stub_entry->branch_type = a8_fixes[i].branch_type;
5979 
5980 	  size = find_stub_size_and_template (a8_fixes[i].stub_type,
5981 					      &template_sequence,
5982 					      &template_size);
5983 
5984 	  stub_entry->stub_size = size;
5985 	  stub_entry->stub_template = template_sequence;
5986 	  stub_entry->stub_template_size = template_size;
5987 	}
5988 
5989       /* Stash the Cortex-A8 erratum fix array for use later in
5990 	 elf32_arm_write_section().  */
5991       htab->a8_erratum_fixes = a8_fixes;
5992       htab->num_a8_erratum_fixes = num_a8_fixes;
5993     }
5994   else
5995     {
5996       htab->a8_erratum_fixes = NULL;
5997       htab->num_a8_erratum_fixes = 0;
5998     }
5999   return TRUE;
6000 }
6001 
6002 /* Build all the stubs associated with the current output file.  The
6003    stubs are kept in a hash table attached to the main linker hash
6004    table.  We also set up the .plt entries for statically linked PIC
6005    functions here.  This function is called via arm_elf_finish in the
6006    linker.  */
6007 
6008 bfd_boolean
6009 elf32_arm_build_stubs (struct bfd_link_info *info)
6010 {
6011   asection *stub_sec;
6012   struct bfd_hash_table *table;
6013   struct elf32_arm_link_hash_table *htab;
6014 
6015   htab = elf32_arm_hash_table (info);
6016   if (htab == NULL)
6017     return FALSE;
6018 
6019   for (stub_sec = htab->stub_bfd->sections;
6020        stub_sec != NULL;
6021        stub_sec = stub_sec->next)
6022     {
6023       bfd_size_type size;
6024 
6025       /* Ignore non-stub sections.  */
6026       if (!strstr (stub_sec->name, STUB_SUFFIX))
6027 	continue;
6028 
6029       /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
6030 	 must at least be done for stub section requiring padding.  */
6031       size = stub_sec->size;
6032       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6033       if (stub_sec->contents == NULL && size != 0)
6034 	return FALSE;
6035       stub_sec->size = 0;
6036     }
6037 
6038   /* Build the stubs as directed by the stub hash table.  */
6039   table = &htab->stub_hash_table;
6040   bfd_hash_traverse (table, arm_build_one_stub, info);
6041   if (htab->fix_cortex_a8)
6042     {
6043       /* Place the cortex a8 stubs last.  */
6044       htab->fix_cortex_a8 = -1;
6045       bfd_hash_traverse (table, arm_build_one_stub, info);
6046     }
6047 
6048   return TRUE;
6049 }
6050 
6051 /* Locate the Thumb encoded calling stub for NAME.  */
6052 
6053 static struct elf_link_hash_entry *
6054 find_thumb_glue (struct bfd_link_info *link_info,
6055 		 const char *name,
6056 		 char **error_message)
6057 {
6058   char *tmp_name;
6059   struct elf_link_hash_entry *hash;
6060   struct elf32_arm_link_hash_table *hash_table;
6061 
6062   /* We need a pointer to the armelf specific hash table.  */
6063   hash_table = elf32_arm_hash_table (link_info);
6064   if (hash_table == NULL)
6065     return NULL;
6066 
6067   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6068 				  + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
6069 
6070   BFD_ASSERT (tmp_name);
6071 
6072   sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
6073 
6074   hash = elf_link_hash_lookup
6075     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6076 
6077   if (hash == NULL
6078       && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
6079 		   tmp_name, name) == -1)
6080     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6081 
6082   free (tmp_name);
6083 
6084   return hash;
6085 }
6086 
6087 /* Locate the ARM encoded calling stub for NAME.  */
6088 
6089 static struct elf_link_hash_entry *
6090 find_arm_glue (struct bfd_link_info *link_info,
6091 	       const char *name,
6092 	       char **error_message)
6093 {
6094   char *tmp_name;
6095   struct elf_link_hash_entry *myh;
6096   struct elf32_arm_link_hash_table *hash_table;
6097 
6098   /* We need a pointer to the elfarm specific hash table.  */
6099   hash_table = elf32_arm_hash_table (link_info);
6100   if (hash_table == NULL)
6101     return NULL;
6102 
6103   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6104 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6105 
6106   BFD_ASSERT (tmp_name);
6107 
6108   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6109 
6110   myh = elf_link_hash_lookup
6111     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6112 
6113   if (myh == NULL
6114       && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
6115 		   tmp_name, name) == -1)
6116     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6117 
6118   free (tmp_name);
6119 
6120   return myh;
6121 }
6122 
6123 /* ARM->Thumb glue (static images):
6124 
6125    .arm
6126    __func_from_arm:
6127    ldr r12, __func_addr
6128    bx  r12
6129    __func_addr:
6130    .word func    @ behave as if you saw a ARM_32 reloc.
6131 
6132    (v5t static images)
6133    .arm
6134    __func_from_arm:
6135    ldr pc, __func_addr
6136    __func_addr:
6137    .word func    @ behave as if you saw a ARM_32 reloc.
6138 
6139    (relocatable images)
6140    .arm
6141    __func_from_arm:
6142    ldr r12, __func_offset
6143    add r12, r12, pc
6144    bx  r12
6145    __func_offset:
6146    .word func - .   */
6147 
6148 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6149 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6150 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6151 static const insn32 a2t3_func_addr_insn = 0x00000001;
6152 
6153 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6154 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6155 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6156 
6157 #define ARM2THUMB_PIC_GLUE_SIZE 16
6158 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6159 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6160 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6161 
6162 /* Thumb->ARM:                          Thumb->(non-interworking aware) ARM
6163 
6164      .thumb                             .thumb
6165      .align 2                           .align 2
6166  __func_from_thumb:                 __func_from_thumb:
6167      bx pc                              push {r6, lr}
6168      nop                                ldr  r6, __func_addr
6169      .arm                               mov  lr, pc
6170      b func                             bx   r6
6171 					.arm
6172 				    ;; back_to_thumb
6173 					ldmia r13! {r6, lr}
6174 					bx    lr
6175 				    __func_addr:
6176 					.word        func  */
6177 
6178 #define THUMB2ARM_GLUE_SIZE 8
6179 static const insn16 t2a1_bx_pc_insn = 0x4778;
6180 static const insn16 t2a2_noop_insn = 0x46c0;
6181 static const insn32 t2a3_b_insn = 0xea000000;
6182 
6183 #define VFP11_ERRATUM_VENEER_SIZE 8
6184 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6185 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6186 
6187 #define ARM_BX_VENEER_SIZE 12
6188 static const insn32 armbx1_tst_insn = 0xe3100001;
6189 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6190 static const insn32 armbx3_bx_insn = 0xe12fff10;
6191 
6192 #ifndef ELFARM_NABI_C_INCLUDED
6193 static void
6194 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6195 {
6196   asection * s;
6197   bfd_byte * contents;
6198 
6199   if (size == 0)
6200     {
6201       /* Do not include empty glue sections in the output.  */
6202       if (abfd != NULL)
6203 	{
6204 	  s = bfd_get_linker_section (abfd, name);
6205 	  if (s != NULL)
6206 	    s->flags |= SEC_EXCLUDE;
6207 	}
6208       return;
6209     }
6210 
6211   BFD_ASSERT (abfd != NULL);
6212 
6213   s = bfd_get_linker_section (abfd, name);
6214   BFD_ASSERT (s != NULL);
6215 
6216   contents = (bfd_byte *) bfd_alloc (abfd, size);
6217 
6218   BFD_ASSERT (s->size == size);
6219   s->contents = contents;
6220 }
6221 
6222 bfd_boolean
6223 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6224 {
6225   struct elf32_arm_link_hash_table * globals;
6226 
6227   globals = elf32_arm_hash_table (info);
6228   BFD_ASSERT (globals != NULL);
6229 
6230   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6231 				   globals->arm_glue_size,
6232 				   ARM2THUMB_GLUE_SECTION_NAME);
6233 
6234   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6235 				   globals->thumb_glue_size,
6236 				   THUMB2ARM_GLUE_SECTION_NAME);
6237 
6238   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6239 				   globals->vfp11_erratum_glue_size,
6240 				   VFP11_ERRATUM_VENEER_SECTION_NAME);
6241 
6242   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6243 				   globals->stm32l4xx_erratum_glue_size,
6244 				   STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6245 
6246   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6247 				   globals->bx_glue_size,
6248 				   ARM_BX_GLUE_SECTION_NAME);
6249 
6250   return TRUE;
6251 }
6252 
6253 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6254    returns the symbol identifying the stub.  */
6255 
6256 static struct elf_link_hash_entry *
6257 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6258 			  struct elf_link_hash_entry * h)
6259 {
6260   const char * name = h->root.root.string;
6261   asection * s;
6262   char * tmp_name;
6263   struct elf_link_hash_entry * myh;
6264   struct bfd_link_hash_entry * bh;
6265   struct elf32_arm_link_hash_table * globals;
6266   bfd_vma val;
6267   bfd_size_type size;
6268 
6269   globals = elf32_arm_hash_table (link_info);
6270   BFD_ASSERT (globals != NULL);
6271   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6272 
6273   s = bfd_get_linker_section
6274     (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6275 
6276   BFD_ASSERT (s != NULL);
6277 
6278   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6279 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6280 
6281   BFD_ASSERT (tmp_name);
6282 
6283   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6284 
6285   myh = elf_link_hash_lookup
6286     (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6287 
6288   if (myh != NULL)
6289     {
6290       /* We've already seen this guy.  */
6291       free (tmp_name);
6292       return myh;
6293     }
6294 
6295   /* The only trick here is using hash_table->arm_glue_size as the value.
6296      Even though the section isn't allocated yet, this is where we will be
6297      putting it.  The +1 on the value marks that the stub has not been
6298      output yet - not that it is a Thumb function.  */
6299   bh = NULL;
6300   val = globals->arm_glue_size + 1;
6301   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6302 				    tmp_name, BSF_GLOBAL, s, val,
6303 				    NULL, TRUE, FALSE, &bh);
6304 
6305   myh = (struct elf_link_hash_entry *) bh;
6306   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6307   myh->forced_local = 1;
6308 
6309   free (tmp_name);
6310 
6311   if (bfd_link_pic (link_info)
6312       || globals->root.is_relocatable_executable
6313       || globals->pic_veneer)
6314     size = ARM2THUMB_PIC_GLUE_SIZE;
6315   else if (globals->use_blx)
6316     size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6317   else
6318     size = ARM2THUMB_STATIC_GLUE_SIZE;
6319 
6320   s->size += size;
6321   globals->arm_glue_size += size;
6322 
6323   return myh;
6324 }
6325 
6326 /* Allocate space for ARMv4 BX veneers.  */
6327 
6328 static void
6329 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6330 {
6331   asection * s;
6332   struct elf32_arm_link_hash_table *globals;
6333   char *tmp_name;
6334   struct elf_link_hash_entry *myh;
6335   struct bfd_link_hash_entry *bh;
6336   bfd_vma val;
6337 
6338   /* BX PC does not need a veneer.  */
6339   if (reg == 15)
6340     return;
6341 
6342   globals = elf32_arm_hash_table (link_info);
6343   BFD_ASSERT (globals != NULL);
6344   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6345 
6346   /* Check if this veneer has already been allocated.  */
6347   if (globals->bx_glue_offset[reg])
6348     return;
6349 
6350   s = bfd_get_linker_section
6351     (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6352 
6353   BFD_ASSERT (s != NULL);
6354 
6355   /* Add symbol for veneer.  */
6356   tmp_name = (char *)
6357       bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6358 
6359   BFD_ASSERT (tmp_name);
6360 
6361   sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6362 
6363   myh = elf_link_hash_lookup
6364     (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6365 
6366   BFD_ASSERT (myh == NULL);
6367 
6368   bh = NULL;
6369   val = globals->bx_glue_size;
6370   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6371 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6372 				    NULL, TRUE, FALSE, &bh);
6373 
6374   myh = (struct elf_link_hash_entry *) bh;
6375   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6376   myh->forced_local = 1;
6377 
6378   s->size += ARM_BX_VENEER_SIZE;
6379   globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6380   globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6381 }
6382 
6383 
6384 /* Add an entry to the code/data map for section SEC.  */
6385 
6386 static void
6387 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6388 {
6389   struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6390   unsigned int newidx;
6391 
6392   if (sec_data->map == NULL)
6393     {
6394       sec_data->map = (elf32_arm_section_map *)
6395 	  bfd_malloc (sizeof (elf32_arm_section_map));
6396       sec_data->mapcount = 0;
6397       sec_data->mapsize = 1;
6398     }
6399 
6400   newidx = sec_data->mapcount++;
6401 
6402   if (sec_data->mapcount > sec_data->mapsize)
6403     {
6404       sec_data->mapsize *= 2;
6405       sec_data->map = (elf32_arm_section_map *)
6406 	  bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6407 			       * sizeof (elf32_arm_section_map));
6408     }
6409 
6410   if (sec_data->map)
6411     {
6412       sec_data->map[newidx].vma = vma;
6413       sec_data->map[newidx].type = type;
6414     }
6415 }
6416 
6417 
6418 /* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
6419    veneers are handled for now.  */
6420 
6421 static bfd_vma
6422 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6423 			     elf32_vfp11_erratum_list *branch,
6424 			     bfd *branch_bfd,
6425 			     asection *branch_sec,
6426 			     unsigned int offset)
6427 {
6428   asection *s;
6429   struct elf32_arm_link_hash_table *hash_table;
6430   char *tmp_name;
6431   struct elf_link_hash_entry *myh;
6432   struct bfd_link_hash_entry *bh;
6433   bfd_vma val;
6434   struct _arm_elf_section_data *sec_data;
6435   elf32_vfp11_erratum_list *newerr;
6436 
6437   hash_table = elf32_arm_hash_table (link_info);
6438   BFD_ASSERT (hash_table != NULL);
6439   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6440 
6441   s = bfd_get_linker_section
6442     (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6443 
6444   sec_data = elf32_arm_section_data (s);
6445 
6446   BFD_ASSERT (s != NULL);
6447 
6448   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6449 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6450 
6451   BFD_ASSERT (tmp_name);
6452 
6453   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6454 	   hash_table->num_vfp11_fixes);
6455 
6456   myh = elf_link_hash_lookup
6457     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6458 
6459   BFD_ASSERT (myh == NULL);
6460 
6461   bh = NULL;
6462   val = hash_table->vfp11_erratum_glue_size;
6463   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6464 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6465 				    NULL, TRUE, FALSE, &bh);
6466 
6467   myh = (struct elf_link_hash_entry *) bh;
6468   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6469   myh->forced_local = 1;
6470 
6471   /* Link veneer back to calling location.  */
6472   sec_data->erratumcount += 1;
6473   newerr = (elf32_vfp11_erratum_list *)
6474       bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6475 
6476   newerr->type = VFP11_ERRATUM_ARM_VENEER;
6477   newerr->vma = -1;
6478   newerr->u.v.branch = branch;
6479   newerr->u.v.id = hash_table->num_vfp11_fixes;
6480   branch->u.b.veneer = newerr;
6481 
6482   newerr->next = sec_data->erratumlist;
6483   sec_data->erratumlist = newerr;
6484 
6485   /* A symbol for the return from the veneer.  */
6486   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6487 	   hash_table->num_vfp11_fixes);
6488 
6489   myh = elf_link_hash_lookup
6490     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6491 
6492   if (myh != NULL)
6493     abort ();
6494 
6495   bh = NULL;
6496   val = offset + 4;
6497   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6498 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
6499 
6500   myh = (struct elf_link_hash_entry *) bh;
6501   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6502   myh->forced_local = 1;
6503 
6504   free (tmp_name);
6505 
6506   /* Generate a mapping symbol for the veneer section, and explicitly add an
6507      entry for that symbol to the code/data map for the section.  */
6508   if (hash_table->vfp11_erratum_glue_size == 0)
6509     {
6510       bh = NULL;
6511       /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
6512 	 ever requires this erratum fix.  */
6513       _bfd_generic_link_add_one_symbol (link_info,
6514 					hash_table->bfd_of_glue_owner, "$a",
6515 					BSF_LOCAL, s, 0, NULL,
6516 					TRUE, FALSE, &bh);
6517 
6518       myh = (struct elf_link_hash_entry *) bh;
6519       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6520       myh->forced_local = 1;
6521 
6522       /* The elf32_arm_init_maps function only cares about symbols from input
6523 	 BFDs.  We must make a note of this generated mapping symbol
6524 	 ourselves so that code byteswapping works properly in
6525 	 elf32_arm_write_section.  */
6526       elf32_arm_section_map_add (s, 'a', 0);
6527     }
6528 
6529   s->size += VFP11_ERRATUM_VENEER_SIZE;
6530   hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6531   hash_table->num_vfp11_fixes++;
6532 
6533   /* The offset of the veneer.  */
6534   return val;
6535 }
6536 
6537 /* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
6538    veneers need to be handled because used only in Cortex-M.  */
6539 
6540 static bfd_vma
6541 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6542 				 elf32_stm32l4xx_erratum_list *branch,
6543 				 bfd *branch_bfd,
6544 				 asection *branch_sec,
6545 				 unsigned int offset,
6546 				 bfd_size_type veneer_size)
6547 {
6548   asection *s;
6549   struct elf32_arm_link_hash_table *hash_table;
6550   char *tmp_name;
6551   struct elf_link_hash_entry *myh;
6552   struct bfd_link_hash_entry *bh;
6553   bfd_vma val;
6554   struct _arm_elf_section_data *sec_data;
6555   elf32_stm32l4xx_erratum_list *newerr;
6556 
6557   hash_table = elf32_arm_hash_table (link_info);
6558   BFD_ASSERT (hash_table != NULL);
6559   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6560 
6561   s = bfd_get_linker_section
6562     (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6563 
6564   BFD_ASSERT (s != NULL);
6565 
6566   sec_data = elf32_arm_section_data (s);
6567 
6568   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6569 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6570 
6571   BFD_ASSERT (tmp_name);
6572 
6573   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6574 	   hash_table->num_stm32l4xx_fixes);
6575 
6576   myh = elf_link_hash_lookup
6577     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6578 
6579   BFD_ASSERT (myh == NULL);
6580 
6581   bh = NULL;
6582   val = hash_table->stm32l4xx_erratum_glue_size;
6583   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6584 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6585 				    NULL, TRUE, FALSE, &bh);
6586 
6587   myh = (struct elf_link_hash_entry *) bh;
6588   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6589   myh->forced_local = 1;
6590 
6591   /* Link veneer back to calling location.  */
6592   sec_data->stm32l4xx_erratumcount += 1;
6593   newerr = (elf32_stm32l4xx_erratum_list *)
6594       bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6595 
6596   newerr->type = STM32L4XX_ERRATUM_VENEER;
6597   newerr->vma = -1;
6598   newerr->u.v.branch = branch;
6599   newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6600   branch->u.b.veneer = newerr;
6601 
6602   newerr->next = sec_data->stm32l4xx_erratumlist;
6603   sec_data->stm32l4xx_erratumlist = newerr;
6604 
6605   /* A symbol for the return from the veneer.  */
6606   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6607 	   hash_table->num_stm32l4xx_fixes);
6608 
6609   myh = elf_link_hash_lookup
6610     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6611 
6612   if (myh != NULL)
6613     abort ();
6614 
6615   bh = NULL;
6616   val = offset + 4;
6617   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6618 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
6619 
6620   myh = (struct elf_link_hash_entry *) bh;
6621   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6622   myh->forced_local = 1;
6623 
6624   free (tmp_name);
6625 
6626   /* Generate a mapping symbol for the veneer section, and explicitly add an
6627      entry for that symbol to the code/data map for the section.  */
6628   if (hash_table->stm32l4xx_erratum_glue_size == 0)
6629     {
6630       bh = NULL;
6631       /* Creates a THUMB symbol since there is no other choice.  */
6632       _bfd_generic_link_add_one_symbol (link_info,
6633 					hash_table->bfd_of_glue_owner, "$t",
6634 					BSF_LOCAL, s, 0, NULL,
6635 					TRUE, FALSE, &bh);
6636 
6637       myh = (struct elf_link_hash_entry *) bh;
6638       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6639       myh->forced_local = 1;
6640 
6641       /* The elf32_arm_init_maps function only cares about symbols from input
6642 	 BFDs.  We must make a note of this generated mapping symbol
6643 	 ourselves so that code byteswapping works properly in
6644 	 elf32_arm_write_section.  */
6645       elf32_arm_section_map_add (s, 't', 0);
6646     }
6647 
6648   s->size += veneer_size;
6649   hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6650   hash_table->num_stm32l4xx_fixes++;
6651 
6652   /* The offset of the veneer.  */
6653   return val;
6654 }
6655 
6656 #define ARM_GLUE_SECTION_FLAGS \
6657   (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6658    | SEC_READONLY | SEC_LINKER_CREATED)
6659 
6660 /* Create a fake section for use by the ARM backend of the linker.  */
6661 
6662 static bfd_boolean
6663 arm_make_glue_section (bfd * abfd, const char * name)
6664 {
6665   asection * sec;
6666 
6667   sec = bfd_get_linker_section (abfd, name);
6668   if (sec != NULL)
6669     /* Already made.  */
6670     return TRUE;
6671 
6672   sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6673 
6674   if (sec == NULL
6675       || !bfd_set_section_alignment (abfd, sec, 2))
6676     return FALSE;
6677 
6678   /* Set the gc mark to prevent the section from being removed by garbage
6679      collection, despite the fact that no relocs refer to this section.  */
6680   sec->gc_mark = 1;
6681 
6682   return TRUE;
6683 }
6684 
6685 /* Set size of .plt entries.  This function is called from the
6686    linker scripts in ld/emultempl/{armelf}.em.  */
6687 
6688 void
6689 bfd_elf32_arm_use_long_plt (void)
6690 {
6691   elf32_arm_use_long_plt_entry = TRUE;
6692 }
6693 
6694 /* Add the glue sections to ABFD.  This function is called from the
6695    linker scripts in ld/emultempl/{armelf}.em.  */
6696 
6697 bfd_boolean
6698 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6699 					struct bfd_link_info *info)
6700 {
6701   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6702   bfd_boolean dostm32l4xx = globals
6703     && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6704   bfd_boolean addglue;
6705 
6706   /* If we are only performing a partial
6707      link do not bother adding the glue.  */
6708   if (bfd_link_relocatable (info))
6709     return TRUE;
6710 
6711   addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6712     && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6713     && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6714     && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6715 
6716   if (!dostm32l4xx)
6717     return addglue;
6718 
6719   return addglue
6720     && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6721 }
6722 
6723 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
6724    ensures they are not marked for deletion by
6725    strip_excluded_output_sections () when veneers are going to be created
6726    later.  Not doing so would trigger assert on empty section size in
6727    lang_size_sections_1 ().  */
6728 
6729 void
6730 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6731 {
6732   enum elf32_arm_stub_type stub_type;
6733 
6734   /* If we are only performing a partial
6735      link do not bother adding the glue.  */
6736   if (bfd_link_relocatable (info))
6737     return;
6738 
6739   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6740     {
6741       asection *out_sec;
6742       const char *out_sec_name;
6743 
6744       if (!arm_dedicated_stub_output_section_required (stub_type))
6745 	continue;
6746 
6747      out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6748      out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6749      if (out_sec != NULL)
6750 	out_sec->flags |= SEC_KEEP;
6751     }
6752 }
6753 
6754 /* Select a BFD to be used to hold the sections used by the glue code.
6755    This function is called from the linker scripts in ld/emultempl/
6756    {armelf/pe}.em.  */
6757 
6758 bfd_boolean
6759 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6760 {
6761   struct elf32_arm_link_hash_table *globals;
6762 
6763   /* If we are only performing a partial link
6764      do not bother getting a bfd to hold the glue.  */
6765   if (bfd_link_relocatable (info))
6766     return TRUE;
6767 
6768   /* Make sure we don't attach the glue sections to a dynamic object.  */
6769   BFD_ASSERT (!(abfd->flags & DYNAMIC));
6770 
6771   globals = elf32_arm_hash_table (info);
6772   BFD_ASSERT (globals != NULL);
6773 
6774   if (globals->bfd_of_glue_owner != NULL)
6775     return TRUE;
6776 
6777   /* Save the bfd for later use.  */
6778   globals->bfd_of_glue_owner = abfd;
6779 
6780   return TRUE;
6781 }
6782 
6783 static void
6784 check_use_blx (struct elf32_arm_link_hash_table *globals)
6785 {
6786   int cpu_arch;
6787 
6788   cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6789 				       Tag_CPU_arch);
6790 
6791   if (globals->fix_arm1176)
6792     {
6793       if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6794 	globals->use_blx = 1;
6795     }
6796   else
6797     {
6798       if (cpu_arch > TAG_CPU_ARCH_V4T)
6799 	globals->use_blx = 1;
6800     }
6801 }
6802 
6803 bfd_boolean
6804 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6805 					 struct bfd_link_info *link_info)
6806 {
6807   Elf_Internal_Shdr *symtab_hdr;
6808   Elf_Internal_Rela *internal_relocs = NULL;
6809   Elf_Internal_Rela *irel, *irelend;
6810   bfd_byte *contents = NULL;
6811 
6812   asection *sec;
6813   struct elf32_arm_link_hash_table *globals;
6814 
6815   /* If we are only performing a partial link do not bother
6816      to construct any glue.  */
6817   if (bfd_link_relocatable (link_info))
6818     return TRUE;
6819 
6820   /* Here we have a bfd that is to be included on the link.  We have a
6821      hook to do reloc rummaging, before section sizes are nailed down.  */
6822   globals = elf32_arm_hash_table (link_info);
6823   BFD_ASSERT (globals != NULL);
6824 
6825   check_use_blx (globals);
6826 
6827   if (globals->byteswap_code && !bfd_big_endian (abfd))
6828     {
6829       _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6830 			  abfd);
6831       return FALSE;
6832     }
6833 
6834   /* PR 5398: If we have not decided to include any loadable sections in
6835      the output then we will not have a glue owner bfd.  This is OK, it
6836      just means that there is nothing else for us to do here.  */
6837   if (globals->bfd_of_glue_owner == NULL)
6838     return TRUE;
6839 
6840   /* Rummage around all the relocs and map the glue vectors.  */
6841   sec = abfd->sections;
6842 
6843   if (sec == NULL)
6844     return TRUE;
6845 
6846   for (; sec != NULL; sec = sec->next)
6847     {
6848       if (sec->reloc_count == 0)
6849 	continue;
6850 
6851       if ((sec->flags & SEC_EXCLUDE) != 0)
6852 	continue;
6853 
6854       symtab_hdr = & elf_symtab_hdr (abfd);
6855 
6856       /* Load the relocs.  */
6857       internal_relocs
6858 	= _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6859 
6860       if (internal_relocs == NULL)
6861 	goto error_return;
6862 
6863       irelend = internal_relocs + sec->reloc_count;
6864       for (irel = internal_relocs; irel < irelend; irel++)
6865 	{
6866 	  long r_type;
6867 	  unsigned long r_index;
6868 
6869 	  struct elf_link_hash_entry *h;
6870 
6871 	  r_type = ELF32_R_TYPE (irel->r_info);
6872 	  r_index = ELF32_R_SYM (irel->r_info);
6873 
6874 	  /* These are the only relocation types we care about.  */
6875 	  if (   r_type != R_ARM_PC24
6876 	      && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6877 	    continue;
6878 
6879 	  /* Get the section contents if we haven't done so already.  */
6880 	  if (contents == NULL)
6881 	    {
6882 	      /* Get cached copy if it exists.  */
6883 	      if (elf_section_data (sec)->this_hdr.contents != NULL)
6884 		contents = elf_section_data (sec)->this_hdr.contents;
6885 	      else
6886 		{
6887 		  /* Go get them off disk.  */
6888 		  if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6889 		    goto error_return;
6890 		}
6891 	    }
6892 
6893 	  if (r_type == R_ARM_V4BX)
6894 	    {
6895 	      int reg;
6896 
6897 	      reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6898 	      record_arm_bx_glue (link_info, reg);
6899 	      continue;
6900 	    }
6901 
6902 	  /* If the relocation is not against a symbol it cannot concern us.  */
6903 	  h = NULL;
6904 
6905 	  /* We don't care about local symbols.  */
6906 	  if (r_index < symtab_hdr->sh_info)
6907 	    continue;
6908 
6909 	  /* This is an external symbol.  */
6910 	  r_index -= symtab_hdr->sh_info;
6911 	  h = (struct elf_link_hash_entry *)
6912 	    elf_sym_hashes (abfd)[r_index];
6913 
6914 	  /* If the relocation is against a static symbol it must be within
6915 	     the current section and so cannot be a cross ARM/Thumb relocation.  */
6916 	  if (h == NULL)
6917 	    continue;
6918 
6919 	  /* If the call will go through a PLT entry then we do not need
6920 	     glue.  */
6921 	  if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6922 	    continue;
6923 
6924 	  switch (r_type)
6925 	    {
6926 	    case R_ARM_PC24:
6927 	      /* This one is a call from arm code.  We need to look up
6928 		 the target of the call.  If it is a thumb target, we
6929 		 insert glue.  */
6930 	      if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6931 		  == ST_BRANCH_TO_THUMB)
6932 		record_arm_to_thumb_glue (link_info, h);
6933 	      break;
6934 
6935 	    default:
6936 	      abort ();
6937 	    }
6938 	}
6939 
6940       if (contents != NULL
6941 	  && elf_section_data (sec)->this_hdr.contents != contents)
6942 	free (contents);
6943       contents = NULL;
6944 
6945       if (internal_relocs != NULL
6946 	  && elf_section_data (sec)->relocs != internal_relocs)
6947 	free (internal_relocs);
6948       internal_relocs = NULL;
6949     }
6950 
6951   return TRUE;
6952 
6953 error_return:
6954   if (contents != NULL
6955       && elf_section_data (sec)->this_hdr.contents != contents)
6956     free (contents);
6957   if (internal_relocs != NULL
6958       && elf_section_data (sec)->relocs != internal_relocs)
6959     free (internal_relocs);
6960 
6961   return FALSE;
6962 }
6963 #endif
6964 
6965 
6966 /* Initialise maps of ARM/Thumb/data for input BFDs.  */
6967 
6968 void
6969 bfd_elf32_arm_init_maps (bfd *abfd)
6970 {
6971   Elf_Internal_Sym *isymbuf;
6972   Elf_Internal_Shdr *hdr;
6973   unsigned int i, localsyms;
6974 
6975   /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
6976   if (! is_arm_elf (abfd))
6977     return;
6978 
6979   if ((abfd->flags & DYNAMIC) != 0)
6980     return;
6981 
6982   hdr = & elf_symtab_hdr (abfd);
6983   localsyms = hdr->sh_info;
6984 
6985   /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6986      should contain the number of local symbols, which should come before any
6987      global symbols.  Mapping symbols are always local.  */
6988   isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6989 				  NULL);
6990 
6991   /* No internal symbols read?  Skip this BFD.  */
6992   if (isymbuf == NULL)
6993     return;
6994 
6995   for (i = 0; i < localsyms; i++)
6996     {
6997       Elf_Internal_Sym *isym = &isymbuf[i];
6998       asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6999       const char *name;
7000 
7001       if (sec != NULL
7002 	  && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7003 	{
7004 	  name = bfd_elf_string_from_elf_section (abfd,
7005 	    hdr->sh_link, isym->st_name);
7006 
7007 	  if (bfd_is_arm_special_symbol_name (name,
7008 					      BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7009 	    elf32_arm_section_map_add (sec, name[1], isym->st_value);
7010 	}
7011     }
7012 }
7013 
7014 
7015 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7016    say what they wanted.  */
7017 
7018 void
7019 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7020 {
7021   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7022   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7023 
7024   if (globals == NULL)
7025     return;
7026 
7027   if (globals->fix_cortex_a8 == -1)
7028     {
7029       /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
7030       if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7031 	  && (out_attr[Tag_CPU_arch_profile].i == 'A'
7032 	      || out_attr[Tag_CPU_arch_profile].i == 0))
7033 	globals->fix_cortex_a8 = 1;
7034       else
7035 	globals->fix_cortex_a8 = 0;
7036     }
7037 }
7038 
7039 
7040 void
7041 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
7042 {
7043   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7044   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7045 
7046   if (globals == NULL)
7047     return;
7048   /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
7049   if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
7050     {
7051       switch (globals->vfp11_fix)
7052 	{
7053 	case BFD_ARM_VFP11_FIX_DEFAULT:
7054 	case BFD_ARM_VFP11_FIX_NONE:
7055 	  globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7056 	  break;
7057 
7058 	default:
7059 	  /* Give a warning, but do as the user requests anyway.  */
7060 	  (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
7061 	    "workaround is not necessary for target architecture"), obfd);
7062 	}
7063     }
7064   else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
7065     /* For earlier architectures, we might need the workaround, but do not
7066        enable it by default.  If users is running with broken hardware, they
7067        must enable the erratum fix explicitly.  */
7068     globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
7069 }
7070 
7071 void
7072 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
7073 {
7074   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7075   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7076 
7077   if (globals == NULL)
7078     return;
7079 
7080   /* We assume only Cortex-M4 may require the fix.  */
7081   if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
7082       || out_attr[Tag_CPU_arch_profile].i != 'M')
7083     {
7084       if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
7085 	/* Give a warning, but do as the user requests anyway.  */
7086 	(*_bfd_error_handler)
7087 	  (_("%B: warning: selected STM32L4XX erratum "
7088 	     "workaround is not necessary for target architecture"), obfd);
7089     }
7090 }
7091 
7092 enum bfd_arm_vfp11_pipe
7093 {
7094   VFP11_FMAC,
7095   VFP11_LS,
7096   VFP11_DS,
7097   VFP11_BAD
7098 };
7099 
7100 /* Return a VFP register number.  This is encoded as RX:X for single-precision
7101    registers, or X:RX for double-precision registers, where RX is the group of
7102    four bits in the instruction encoding and X is the single extension bit.
7103    RX and X fields are specified using their lowest (starting) bit.  The return
7104    value is:
7105 
7106      0...31: single-precision registers s0...s31
7107      32...63: double-precision registers d0...d31.
7108 
7109    Although X should be zero for VFP11 (encoding d0...d15 only), we might
7110    encounter VFP3 instructions, so we allow the full range for DP registers.  */
7111 
7112 static unsigned int
7113 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7114 		     unsigned int x)
7115 {
7116   if (is_double)
7117     return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7118   else
7119     return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7120 }
7121 
7122 /* Set bits in *WMASK according to a register number REG as encoded by
7123    bfd_arm_vfp11_regno().  Ignore d16-d31.  */
7124 
7125 static void
7126 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7127 {
7128   if (reg < 32)
7129     *wmask |= 1 << reg;
7130   else if (reg < 48)
7131     *wmask |= 3 << ((reg - 32) * 2);
7132 }
7133 
7134 /* Return TRUE if WMASK overwrites anything in REGS.  */
7135 
7136 static bfd_boolean
7137 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7138 {
7139   int i;
7140 
7141   for (i = 0; i < numregs; i++)
7142     {
7143       unsigned int reg = regs[i];
7144 
7145       if (reg < 32 && (wmask & (1 << reg)) != 0)
7146 	return TRUE;
7147 
7148       reg -= 32;
7149 
7150       if (reg >= 16)
7151 	continue;
7152 
7153       if ((wmask & (3 << (reg * 2))) != 0)
7154 	return TRUE;
7155     }
7156 
7157   return FALSE;
7158 }
7159 
7160 /* In this function, we're interested in two things: finding input registers
7161    for VFP data-processing instructions, and finding the set of registers which
7162    arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
7163    hold the written set, so FLDM etc. are easy to deal with (we're only
7164    interested in 32 SP registers or 16 dp registers, due to the VFP version
7165    implemented by the chip in question).  DP registers are marked by setting
7166    both SP registers in the write mask).  */
7167 
7168 static enum bfd_arm_vfp11_pipe
7169 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7170 			   int *numregs)
7171 {
7172   enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7173   bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7174 
7175   if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
7176     {
7177       unsigned int pqrs;
7178       unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7179       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7180 
7181       pqrs = ((insn & 0x00800000) >> 20)
7182 	   | ((insn & 0x00300000) >> 19)
7183 	   | ((insn & 0x00000040) >> 6);
7184 
7185       switch (pqrs)
7186 	{
7187 	case 0: /* fmac[sd].  */
7188 	case 1: /* fnmac[sd].  */
7189 	case 2: /* fmsc[sd].  */
7190 	case 3: /* fnmsc[sd].  */
7191 	  vpipe = VFP11_FMAC;
7192 	  bfd_arm_vfp11_write_mask (destmask, fd);
7193 	  regs[0] = fd;
7194 	  regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
7195 	  regs[2] = fm;
7196 	  *numregs = 3;
7197 	  break;
7198 
7199 	case 4: /* fmul[sd].  */
7200 	case 5: /* fnmul[sd].  */
7201 	case 6: /* fadd[sd].  */
7202 	case 7: /* fsub[sd].  */
7203 	  vpipe = VFP11_FMAC;
7204 	  goto vfp_binop;
7205 
7206 	case 8: /* fdiv[sd].  */
7207 	  vpipe = VFP11_DS;
7208 	  vfp_binop:
7209 	  bfd_arm_vfp11_write_mask (destmask, fd);
7210 	  regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
7211 	  regs[1] = fm;
7212 	  *numregs = 2;
7213 	  break;
7214 
7215 	case 15: /* extended opcode.  */
7216 	  {
7217 	    unsigned int extn = ((insn >> 15) & 0x1e)
7218 			      | ((insn >> 7) & 1);
7219 
7220 	    switch (extn)
7221 	      {
7222 	      case 0: /* fcpy[sd].  */
7223 	      case 1: /* fabs[sd].  */
7224 	      case 2: /* fneg[sd].  */
7225 	      case 8: /* fcmp[sd].  */
7226 	      case 9: /* fcmpe[sd].  */
7227 	      case 10: /* fcmpz[sd].  */
7228 	      case 11: /* fcmpez[sd].  */
7229 	      case 16: /* fuito[sd].  */
7230 	      case 17: /* fsito[sd].  */
7231 	      case 24: /* ftoui[sd].  */
7232 	      case 25: /* ftouiz[sd].  */
7233 	      case 26: /* ftosi[sd].  */
7234 	      case 27: /* ftosiz[sd].  */
7235 		/* These instructions will not bounce due to underflow.  */
7236 		*numregs = 0;
7237 		vpipe = VFP11_FMAC;
7238 		break;
7239 
7240 	      case 3: /* fsqrt[sd].  */
7241 		/* fsqrt cannot underflow, but it can (perhaps) overwrite
7242 		   registers to cause the erratum in previous instructions.  */
7243 		bfd_arm_vfp11_write_mask (destmask, fd);
7244 		vpipe = VFP11_DS;
7245 		break;
7246 
7247 	      case 15: /* fcvt{ds,sd}.  */
7248 		{
7249 		  int rnum = 0;
7250 
7251 		  bfd_arm_vfp11_write_mask (destmask, fd);
7252 
7253 		  /* Only FCVTSD can underflow.  */
7254 		  if ((insn & 0x100) != 0)
7255 		    regs[rnum++] = fm;
7256 
7257 		  *numregs = rnum;
7258 
7259 		  vpipe = VFP11_FMAC;
7260 		}
7261 		break;
7262 
7263 	      default:
7264 		return VFP11_BAD;
7265 	      }
7266 	  }
7267 	  break;
7268 
7269 	default:
7270 	  return VFP11_BAD;
7271 	}
7272     }
7273   /* Two-register transfer.  */
7274   else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7275     {
7276       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7277 
7278       if ((insn & 0x100000) == 0)
7279 	{
7280 	  if (is_double)
7281 	    bfd_arm_vfp11_write_mask (destmask, fm);
7282 	  else
7283 	    {
7284 	      bfd_arm_vfp11_write_mask (destmask, fm);
7285 	      bfd_arm_vfp11_write_mask (destmask, fm + 1);
7286 	    }
7287 	}
7288 
7289       vpipe = VFP11_LS;
7290     }
7291   else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
7292     {
7293       int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7294       unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7295 
7296       switch (puw)
7297 	{
7298 	case 0: /* Two-reg transfer.  We should catch these above.  */
7299 	  abort ();
7300 
7301 	case 2: /* fldm[sdx].  */
7302 	case 3:
7303 	case 5:
7304 	  {
7305 	    unsigned int i, offset = insn & 0xff;
7306 
7307 	    if (is_double)
7308 	      offset >>= 1;
7309 
7310 	    for (i = fd; i < fd + offset; i++)
7311 	      bfd_arm_vfp11_write_mask (destmask, i);
7312 	  }
7313 	  break;
7314 
7315 	case 4: /* fld[sd].  */
7316 	case 6:
7317 	  bfd_arm_vfp11_write_mask (destmask, fd);
7318 	  break;
7319 
7320 	default:
7321 	  return VFP11_BAD;
7322 	}
7323 
7324       vpipe = VFP11_LS;
7325     }
7326   /* Single-register transfer. Note L==0.  */
7327   else if ((insn & 0x0f100e10) == 0x0e000a10)
7328     {
7329       unsigned int opcode = (insn >> 21) & 7;
7330       unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7331 
7332       switch (opcode)
7333 	{
7334 	case 0: /* fmsr/fmdlr.  */
7335 	case 1: /* fmdhr.  */
7336 	  /* Mark fmdhr and fmdlr as writing to the whole of the DP
7337 	     destination register.  I don't know if this is exactly right,
7338 	     but it is the conservative choice.  */
7339 	  bfd_arm_vfp11_write_mask (destmask, fn);
7340 	  break;
7341 
7342 	case 7: /* fmxr.  */
7343 	  break;
7344 	}
7345 
7346       vpipe = VFP11_LS;
7347     }
7348 
7349   return vpipe;
7350 }
7351 
7352 
7353 static int elf32_arm_compare_mapping (const void * a, const void * b);
7354 
7355 
7356 /* Look for potentially-troublesome code sequences which might trigger the
7357    VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
7358    (available from ARM) for details of the erratum.  A short version is
7359    described in ld.texinfo.  */
7360 
7361 bfd_boolean
7362 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7363 {
7364   asection *sec;
7365   bfd_byte *contents = NULL;
7366   int state = 0;
7367   int regs[3], numregs = 0;
7368   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7369   int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7370 
7371   if (globals == NULL)
7372     return FALSE;
7373 
7374   /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7375      The states transition as follows:
7376 
7377        0 -> 1 (vector) or 0 -> 2 (scalar)
7378 	   A VFP FMAC-pipeline instruction has been seen. Fill
7379 	   regs[0]..regs[numregs-1] with its input operands. Remember this
7380 	   instruction in 'first_fmac'.
7381 
7382        1 -> 2
7383 	   Any instruction, except for a VFP instruction which overwrites
7384 	   regs[*].
7385 
7386        1 -> 3 [ -> 0 ]  or
7387        2 -> 3 [ -> 0 ]
7388 	   A VFP instruction has been seen which overwrites any of regs[*].
7389 	   We must make a veneer!  Reset state to 0 before examining next
7390 	   instruction.
7391 
7392        2 -> 0
7393 	   If we fail to match anything in state 2, reset to state 0 and reset
7394 	   the instruction pointer to the instruction after 'first_fmac'.
7395 
7396      If the VFP11 vector mode is in use, there must be at least two unrelated
7397      instructions between anti-dependent VFP11 instructions to properly avoid
7398      triggering the erratum, hence the use of the extra state 1.  */
7399 
7400   /* If we are only performing a partial link do not bother
7401      to construct any glue.  */
7402   if (bfd_link_relocatable (link_info))
7403     return TRUE;
7404 
7405   /* Skip if this bfd does not correspond to an ELF image.  */
7406   if (! is_arm_elf (abfd))
7407     return TRUE;
7408 
7409   /* We should have chosen a fix type by the time we get here.  */
7410   BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7411 
7412   if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7413     return TRUE;
7414 
7415   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
7416   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7417     return TRUE;
7418 
7419   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7420     {
7421       unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7422       struct _arm_elf_section_data *sec_data;
7423 
7424       /* If we don't have executable progbits, we're not interested in this
7425 	 section.  Also skip if section is to be excluded.  */
7426       if (elf_section_type (sec) != SHT_PROGBITS
7427 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7428 	  || (sec->flags & SEC_EXCLUDE) != 0
7429 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7430 	  || sec->output_section == bfd_abs_section_ptr
7431 	  || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7432 	continue;
7433 
7434       sec_data = elf32_arm_section_data (sec);
7435 
7436       if (sec_data->mapcount == 0)
7437 	continue;
7438 
7439       if (elf_section_data (sec)->this_hdr.contents != NULL)
7440 	contents = elf_section_data (sec)->this_hdr.contents;
7441       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7442 	goto error_return;
7443 
7444       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7445 	     elf32_arm_compare_mapping);
7446 
7447       for (span = 0; span < sec_data->mapcount; span++)
7448 	{
7449 	  unsigned int span_start = sec_data->map[span].vma;
7450 	  unsigned int span_end = (span == sec_data->mapcount - 1)
7451 				  ? sec->size : sec_data->map[span + 1].vma;
7452 	  char span_type = sec_data->map[span].type;
7453 
7454 	  /* FIXME: Only ARM mode is supported at present.  We may need to
7455 	     support Thumb-2 mode also at some point.  */
7456 	  if (span_type != 'a')
7457 	    continue;
7458 
7459 	  for (i = span_start; i < span_end;)
7460 	    {
7461 	      unsigned int next_i = i + 4;
7462 	      unsigned int insn = bfd_big_endian (abfd)
7463 		? (contents[i] << 24)
7464 		  | (contents[i + 1] << 16)
7465 		  | (contents[i + 2] << 8)
7466 		  | contents[i + 3]
7467 		: (contents[i + 3] << 24)
7468 		  | (contents[i + 2] << 16)
7469 		  | (contents[i + 1] << 8)
7470 		  | contents[i];
7471 	      unsigned int writemask = 0;
7472 	      enum bfd_arm_vfp11_pipe vpipe;
7473 
7474 	      switch (state)
7475 		{
7476 		case 0:
7477 		  vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7478 						    &numregs);
7479 		  /* I'm assuming the VFP11 erratum can trigger with denorm
7480 		     operands on either the FMAC or the DS pipeline. This might
7481 		     lead to slightly overenthusiastic veneer insertion.  */
7482 		  if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7483 		    {
7484 		      state = use_vector ? 1 : 2;
7485 		      first_fmac = i;
7486 		      veneer_of_insn = insn;
7487 		    }
7488 		  break;
7489 
7490 		case 1:
7491 		  {
7492 		    int other_regs[3], other_numregs;
7493 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7494 						      other_regs,
7495 						      &other_numregs);
7496 		    if (vpipe != VFP11_BAD
7497 			&& bfd_arm_vfp11_antidependency (writemask, regs,
7498 							 numregs))
7499 		      state = 3;
7500 		    else
7501 		      state = 2;
7502 		  }
7503 		  break;
7504 
7505 		case 2:
7506 		  {
7507 		    int other_regs[3], other_numregs;
7508 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7509 						      other_regs,
7510 						      &other_numregs);
7511 		    if (vpipe != VFP11_BAD
7512 			&& bfd_arm_vfp11_antidependency (writemask, regs,
7513 							 numregs))
7514 		      state = 3;
7515 		    else
7516 		      {
7517 			state = 0;
7518 			next_i = first_fmac + 4;
7519 		      }
7520 		  }
7521 		  break;
7522 
7523 		case 3:
7524 		  abort ();  /* Should be unreachable.  */
7525 		}
7526 
7527 	      if (state == 3)
7528 		{
7529 		  elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7530 		      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7531 
7532 		  elf32_arm_section_data (sec)->erratumcount += 1;
7533 
7534 		  newerr->u.b.vfp_insn = veneer_of_insn;
7535 
7536 		  switch (span_type)
7537 		    {
7538 		    case 'a':
7539 		      newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7540 		      break;
7541 
7542 		    default:
7543 		      abort ();
7544 		    }
7545 
7546 		  record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7547 					       first_fmac);
7548 
7549 		  newerr->vma = -1;
7550 
7551 		  newerr->next = sec_data->erratumlist;
7552 		  sec_data->erratumlist = newerr;
7553 
7554 		  state = 0;
7555 		}
7556 
7557 	      i = next_i;
7558 	    }
7559 	}
7560 
7561       if (contents != NULL
7562 	  && elf_section_data (sec)->this_hdr.contents != contents)
7563 	free (contents);
7564       contents = NULL;
7565     }
7566 
7567   return TRUE;
7568 
7569 error_return:
7570   if (contents != NULL
7571       && elf_section_data (sec)->this_hdr.contents != contents)
7572     free (contents);
7573 
7574   return FALSE;
7575 }
7576 
7577 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7578    after sections have been laid out, using specially-named symbols.  */
7579 
7580 void
7581 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7582 					  struct bfd_link_info *link_info)
7583 {
7584   asection *sec;
7585   struct elf32_arm_link_hash_table *globals;
7586   char *tmp_name;
7587 
7588   if (bfd_link_relocatable (link_info))
7589     return;
7590 
7591   /* Skip if this bfd does not correspond to an ELF image.  */
7592   if (! is_arm_elf (abfd))
7593     return;
7594 
7595   globals = elf32_arm_hash_table (link_info);
7596   if (globals == NULL)
7597     return;
7598 
7599   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7600 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7601 
7602   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7603     {
7604       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7605       elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7606 
7607       for (; errnode != NULL; errnode = errnode->next)
7608 	{
7609 	  struct elf_link_hash_entry *myh;
7610 	  bfd_vma vma;
7611 
7612 	  switch (errnode->type)
7613 	    {
7614 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7615 	    case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7616 	      /* Find veneer symbol.  */
7617 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7618 		       errnode->u.b.veneer->u.v.id);
7619 
7620 	      myh = elf_link_hash_lookup
7621 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7622 
7623 	      if (myh == NULL)
7624 		(*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7625 					 "`%s'"), abfd, tmp_name);
7626 
7627 	      vma = myh->root.u.def.section->output_section->vma
7628 		    + myh->root.u.def.section->output_offset
7629 		    + myh->root.u.def.value;
7630 
7631 	      errnode->u.b.veneer->vma = vma;
7632 	      break;
7633 
7634 	    case VFP11_ERRATUM_ARM_VENEER:
7635 	    case VFP11_ERRATUM_THUMB_VENEER:
7636 	      /* Find return location.  */
7637 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7638 		       errnode->u.v.id);
7639 
7640 	      myh = elf_link_hash_lookup
7641 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7642 
7643 	      if (myh == NULL)
7644 		(*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7645 					 "`%s'"), abfd, tmp_name);
7646 
7647 	      vma = myh->root.u.def.section->output_section->vma
7648 		    + myh->root.u.def.section->output_offset
7649 		    + myh->root.u.def.value;
7650 
7651 	      errnode->u.v.branch->vma = vma;
7652 	      break;
7653 
7654 	    default:
7655 	      abort ();
7656 	    }
7657 	}
7658     }
7659 
7660   free (tmp_name);
7661 }
7662 
7663 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7664    return locations after sections have been laid out, using
7665    specially-named symbols.  */
7666 
7667 void
7668 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7669 					      struct bfd_link_info *link_info)
7670 {
7671   asection *sec;
7672   struct elf32_arm_link_hash_table *globals;
7673   char *tmp_name;
7674 
7675   if (bfd_link_relocatable (link_info))
7676     return;
7677 
7678   /* Skip if this bfd does not correspond to an ELF image.  */
7679   if (! is_arm_elf (abfd))
7680     return;
7681 
7682   globals = elf32_arm_hash_table (link_info);
7683   if (globals == NULL)
7684     return;
7685 
7686   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7687 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7688 
7689   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7690     {
7691       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7692       elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7693 
7694       for (; errnode != NULL; errnode = errnode->next)
7695 	{
7696 	  struct elf_link_hash_entry *myh;
7697 	  bfd_vma vma;
7698 
7699 	  switch (errnode->type)
7700 	    {
7701 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7702 	      /* Find veneer symbol.  */
7703 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7704 		       errnode->u.b.veneer->u.v.id);
7705 
7706 	      myh = elf_link_hash_lookup
7707 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7708 
7709 	      if (myh == NULL)
7710 		(*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7711 					 "`%s'"), abfd, tmp_name);
7712 
7713 	      vma = myh->root.u.def.section->output_section->vma
7714 		+ myh->root.u.def.section->output_offset
7715 		+ myh->root.u.def.value;
7716 
7717 	      errnode->u.b.veneer->vma = vma;
7718 	      break;
7719 
7720 	    case STM32L4XX_ERRATUM_VENEER:
7721 	      /* Find return location.  */
7722 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7723 		       errnode->u.v.id);
7724 
7725 	      myh = elf_link_hash_lookup
7726 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7727 
7728 	      if (myh == NULL)
7729 		(*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7730 					 "`%s'"), abfd, tmp_name);
7731 
7732 	      vma = myh->root.u.def.section->output_section->vma
7733 		+ myh->root.u.def.section->output_offset
7734 		+ myh->root.u.def.value;
7735 
7736 	      errnode->u.v.branch->vma = vma;
7737 	      break;
7738 
7739 	    default:
7740 	      abort ();
7741 	    }
7742 	}
7743     }
7744 
7745   free (tmp_name);
7746 }
7747 
7748 static inline bfd_boolean
7749 is_thumb2_ldmia (const insn32 insn)
7750 {
7751   /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7752      1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
7753   return (insn & 0xffd02000) == 0xe8900000;
7754 }
7755 
7756 static inline bfd_boolean
7757 is_thumb2_ldmdb (const insn32 insn)
7758 {
7759   /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7760      1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
7761   return (insn & 0xffd02000) == 0xe9100000;
7762 }
7763 
7764 static inline bfd_boolean
7765 is_thumb2_vldm (const insn32 insn)
7766 {
7767   /* A6.5 Extension register load or store instruction
7768      A7.7.229
7769      We look for SP 32-bit and DP 64-bit registers.
7770      Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7771      <list> is consecutive 64-bit registers
7772      1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7773      Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7774      <list> is consecutive 32-bit registers
7775      1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7776      if P==0 && U==1 && W==1 && Rn=1101 VPOP
7777      if PUW=010 || PUW=011 || PUW=101 VLDM.  */
7778   return
7779     (((insn & 0xfe100f00) == 0xec100b00) ||
7780      ((insn & 0xfe100f00) == 0xec100a00))
7781     && /* (IA without !).  */
7782     (((((insn << 7) >> 28) & 0xd) == 0x4)
7783      /* (IA with !), includes VPOP (when reg number is SP).  */
7784      || ((((insn << 7) >> 28) & 0xd) == 0x5)
7785      /* (DB with !).  */
7786      || ((((insn << 7) >> 28) & 0xd) == 0x9));
7787 }
7788 
7789 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7790    VLDM opcode and:
7791  - computes the number and the mode of memory accesses
7792  - decides if the replacement should be done:
7793    . replaces only if > 8-word accesses
7794    . or (testing purposes only) replaces all accesses.  */
7795 
7796 static bfd_boolean
7797 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7798 				      bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7799 {
7800   int nb_words = 0;
7801 
7802   /* The field encoding the register list is the same for both LDMIA
7803      and LDMDB encodings.  */
7804   if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7805     nb_words = popcount (insn & 0x0000ffff);
7806   else if (is_thumb2_vldm (insn))
7807    nb_words = (insn & 0xff);
7808 
7809   /* DEFAULT mode accounts for the real bug condition situation,
7810      ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
7811   return
7812     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7813     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7814 }
7815 
7816 /* Look for potentially-troublesome code sequences which might trigger
7817    the STM STM32L4XX erratum.  */
7818 
7819 bfd_boolean
7820 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7821 				      struct bfd_link_info *link_info)
7822 {
7823   asection *sec;
7824   bfd_byte *contents = NULL;
7825   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7826 
7827   if (globals == NULL)
7828     return FALSE;
7829 
7830   /* If we are only performing a partial link do not bother
7831      to construct any glue.  */
7832   if (bfd_link_relocatable (link_info))
7833     return TRUE;
7834 
7835   /* Skip if this bfd does not correspond to an ELF image.  */
7836   if (! is_arm_elf (abfd))
7837     return TRUE;
7838 
7839   if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7840     return TRUE;
7841 
7842   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
7843   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7844     return TRUE;
7845 
7846   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7847     {
7848       unsigned int i, span;
7849       struct _arm_elf_section_data *sec_data;
7850 
7851       /* If we don't have executable progbits, we're not interested in this
7852 	 section.  Also skip if section is to be excluded.  */
7853       if (elf_section_type (sec) != SHT_PROGBITS
7854 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7855 	  || (sec->flags & SEC_EXCLUDE) != 0
7856 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7857 	  || sec->output_section == bfd_abs_section_ptr
7858 	  || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7859 	continue;
7860 
7861       sec_data = elf32_arm_section_data (sec);
7862 
7863       if (sec_data->mapcount == 0)
7864 	continue;
7865 
7866       if (elf_section_data (sec)->this_hdr.contents != NULL)
7867 	contents = elf_section_data (sec)->this_hdr.contents;
7868       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7869 	goto error_return;
7870 
7871       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7872 	     elf32_arm_compare_mapping);
7873 
7874       for (span = 0; span < sec_data->mapcount; span++)
7875 	{
7876 	  unsigned int span_start = sec_data->map[span].vma;
7877 	  unsigned int span_end = (span == sec_data->mapcount - 1)
7878 	    ? sec->size : sec_data->map[span + 1].vma;
7879 	  char span_type = sec_data->map[span].type;
7880 	  int itblock_current_pos = 0;
7881 
7882 	  /* Only Thumb2 mode need be supported with this CM4 specific
7883 	     code, we should not encounter any arm mode eg span_type
7884 	     != 'a'.  */
7885 	  if (span_type != 't')
7886 	    continue;
7887 
7888 	  for (i = span_start; i < span_end;)
7889 	    {
7890 	      unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7891 	      bfd_boolean insn_32bit = FALSE;
7892 	      bfd_boolean is_ldm = FALSE;
7893 	      bfd_boolean is_vldm = FALSE;
7894 	      bfd_boolean is_not_last_in_it_block = FALSE;
7895 
7896 	      /* The first 16-bits of all 32-bit thumb2 instructions start
7897 		 with opcode[15..13]=0b111 and the encoded op1 can be anything
7898 		 except opcode[12..11]!=0b00.
7899 		 See 32-bit Thumb instruction encoding.  */
7900 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7901 		insn_32bit = TRUE;
7902 
7903 	      /* Compute the predicate that tells if the instruction
7904 		 is concerned by the IT block
7905 		 - Creates an error if there is a ldm that is not
7906 		   last in the IT block thus cannot be replaced
7907 		 - Otherwise we can create a branch at the end of the
7908 		   IT block, it will be controlled naturally by IT
7909 		   with the proper pseudo-predicate
7910 		 - So the only interesting predicate is the one that
7911 		   tells that we are not on the last item of an IT
7912 		   block.  */
7913 	      if (itblock_current_pos != 0)
7914 		  is_not_last_in_it_block = !!--itblock_current_pos;
7915 
7916 	      if (insn_32bit)
7917 		{
7918 		  /* Load the rest of the insn (in manual-friendly order).  */
7919 		  insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7920 		  is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7921 		  is_vldm = is_thumb2_vldm (insn);
7922 
7923 		  /* Veneers are created for (v)ldm depending on
7924 		     option flags and memory accesses conditions; but
7925 		     if the instruction is not the last instruction of
7926 		     an IT block, we cannot create a jump there, so we
7927 		     bail out.  */
7928 		    if ((is_ldm || is_vldm) &&
7929 			stm32l4xx_need_create_replacing_stub
7930 			(insn, globals->stm32l4xx_fix))
7931 		      {
7932 			if (is_not_last_in_it_block)
7933 			  {
7934 			    (*_bfd_error_handler)
7935 			      /* Note - overlong line used here to allow for translation.  */
7936 			      (_("\
7937 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7938 				 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7939 			       abfd, sec, (long)i);
7940 			  }
7941 			else
7942 			  {
7943 			    elf32_stm32l4xx_erratum_list *newerr =
7944 			      (elf32_stm32l4xx_erratum_list *)
7945 			      bfd_zmalloc
7946 			      (sizeof (elf32_stm32l4xx_erratum_list));
7947 
7948 			    elf32_arm_section_data (sec)
7949 			      ->stm32l4xx_erratumcount += 1;
7950 			    newerr->u.b.insn = insn;
7951 			    /* We create only thumb branches.  */
7952 			    newerr->type =
7953 			      STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7954 			    record_stm32l4xx_erratum_veneer
7955 			      (link_info, newerr, abfd, sec,
7956 			       i,
7957 			       is_ldm ?
7958 			       STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7959 			       STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7960 			    newerr->vma = -1;
7961 			    newerr->next = sec_data->stm32l4xx_erratumlist;
7962 			    sec_data->stm32l4xx_erratumlist = newerr;
7963 			  }
7964 		      }
7965 		}
7966 	      else
7967 		{
7968 		  /* A7.7.37 IT p208
7969 		     IT blocks are only encoded in T1
7970 		     Encoding T1: IT{x{y{z}}} <firstcond>
7971 		     1 0 1 1 - 1 1 1 1 - firstcond - mask
7972 		     if mask = '0000' then see 'related encodings'
7973 		     We don't deal with UNPREDICTABLE, just ignore these.
7974 		     There can be no nested IT blocks so an IT block
7975 		     is naturally a new one for which it is worth
7976 		     computing its size.  */
7977 		  bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7978 		    ((insn & 0x000f) != 0x0000);
7979 		  /* If we have a new IT block we compute its size.  */
7980 		  if (is_newitblock)
7981 		    {
7982 		      /* Compute the number of instructions controlled
7983 			 by the IT block, it will be used to decide
7984 			 whether we are inside an IT block or not.  */
7985 		      unsigned int mask = insn & 0x000f;
7986 		      itblock_current_pos = 4 - ctz (mask);
7987 		    }
7988 		}
7989 
7990 	      i += insn_32bit ? 4 : 2;
7991 	    }
7992 	}
7993 
7994       if (contents != NULL
7995 	  && elf_section_data (sec)->this_hdr.contents != contents)
7996 	free (contents);
7997       contents = NULL;
7998     }
7999 
8000   return TRUE;
8001 
8002 error_return:
8003   if (contents != NULL
8004       && elf_section_data (sec)->this_hdr.contents != contents)
8005     free (contents);
8006 
8007   return FALSE;
8008 }
8009 
8010 /* Set target relocation values needed during linking.  */
8011 
8012 void
8013 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
8014 				 struct bfd_link_info *link_info,
8015 				 int target1_is_rel,
8016 				 char * target2_type,
8017 				 int fix_v4bx,
8018 				 int use_blx,
8019 				 bfd_arm_vfp11_fix vfp11_fix,
8020 				 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
8021 				 int no_enum_warn, int no_wchar_warn,
8022 				 int pic_veneer, int fix_cortex_a8,
8023 				 int fix_arm1176)
8024 {
8025   struct elf32_arm_link_hash_table *globals;
8026 
8027   globals = elf32_arm_hash_table (link_info);
8028   if (globals == NULL)
8029     return;
8030 
8031   globals->target1_is_rel = target1_is_rel;
8032   if (strcmp (target2_type, "rel") == 0)
8033     globals->target2_reloc = R_ARM_REL32;
8034   else if (strcmp (target2_type, "abs") == 0)
8035     globals->target2_reloc = R_ARM_ABS32;
8036   else if (strcmp (target2_type, "got-rel") == 0)
8037     globals->target2_reloc = R_ARM_GOT_PREL;
8038   else
8039     {
8040       _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
8041 			  target2_type);
8042     }
8043   globals->fix_v4bx = fix_v4bx;
8044   globals->use_blx |= use_blx;
8045   globals->vfp11_fix = vfp11_fix;
8046   globals->stm32l4xx_fix = stm32l4xx_fix;
8047   globals->pic_veneer = pic_veneer;
8048   globals->fix_cortex_a8 = fix_cortex_a8;
8049   globals->fix_arm1176 = fix_arm1176;
8050 
8051   BFD_ASSERT (is_arm_elf (output_bfd));
8052   elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
8053   elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
8054 }
8055 
8056 /* Replace the target offset of a Thumb bl or b.w instruction.  */
8057 
8058 static void
8059 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
8060 {
8061   bfd_vma upper;
8062   bfd_vma lower;
8063   int reloc_sign;
8064 
8065   BFD_ASSERT ((offset & 1) == 0);
8066 
8067   upper = bfd_get_16 (abfd, insn);
8068   lower = bfd_get_16 (abfd, insn + 2);
8069   reloc_sign = (offset < 0) ? 1 : 0;
8070   upper = (upper & ~(bfd_vma) 0x7ff)
8071 	  | ((offset >> 12) & 0x3ff)
8072 	  | (reloc_sign << 10);
8073   lower = (lower & ~(bfd_vma) 0x2fff)
8074 	  | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
8075 	  | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
8076 	  | ((offset >> 1) & 0x7ff);
8077   bfd_put_16 (abfd, upper, insn);
8078   bfd_put_16 (abfd, lower, insn + 2);
8079 }
8080 
8081 /* Thumb code calling an ARM function.  */
8082 
8083 static int
8084 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
8085 			 const char *           name,
8086 			 bfd *                  input_bfd,
8087 			 bfd *                  output_bfd,
8088 			 asection *             input_section,
8089 			 bfd_byte *             hit_data,
8090 			 asection *             sym_sec,
8091 			 bfd_vma                offset,
8092 			 bfd_signed_vma         addend,
8093 			 bfd_vma                val,
8094 			 char **error_message)
8095 {
8096   asection * s = 0;
8097   bfd_vma my_offset;
8098   long int ret_offset;
8099   struct elf_link_hash_entry * myh;
8100   struct elf32_arm_link_hash_table * globals;
8101 
8102   myh = find_thumb_glue (info, name, error_message);
8103   if (myh == NULL)
8104     return FALSE;
8105 
8106   globals = elf32_arm_hash_table (info);
8107   BFD_ASSERT (globals != NULL);
8108   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8109 
8110   my_offset = myh->root.u.def.value;
8111 
8112   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8113 			      THUMB2ARM_GLUE_SECTION_NAME);
8114 
8115   BFD_ASSERT (s != NULL);
8116   BFD_ASSERT (s->contents != NULL);
8117   BFD_ASSERT (s->output_section != NULL);
8118 
8119   if ((my_offset & 0x01) == 0x01)
8120     {
8121       if (sym_sec != NULL
8122 	  && sym_sec->owner != NULL
8123 	  && !INTERWORK_FLAG (sym_sec->owner))
8124 	{
8125 	  (*_bfd_error_handler)
8126 	    (_("%B(%s): warning: interworking not enabled.\n"
8127 	       "  first occurrence: %B: Thumb call to ARM"),
8128 	     sym_sec->owner, input_bfd, name);
8129 
8130 	  return FALSE;
8131 	}
8132 
8133       --my_offset;
8134       myh->root.u.def.value = my_offset;
8135 
8136       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8137 		      s->contents + my_offset);
8138 
8139       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8140 		      s->contents + my_offset + 2);
8141 
8142       ret_offset =
8143 	/* Address of destination of the stub.  */
8144 	((bfd_signed_vma) val)
8145 	- ((bfd_signed_vma)
8146 	   /* Offset from the start of the current section
8147 	      to the start of the stubs.  */
8148 	   (s->output_offset
8149 	    /* Offset of the start of this stub from the start of the stubs.  */
8150 	    + my_offset
8151 	    /* Address of the start of the current section.  */
8152 	    + s->output_section->vma)
8153 	   /* The branch instruction is 4 bytes into the stub.  */
8154 	   + 4
8155 	   /* ARM branches work from the pc of the instruction + 8.  */
8156 	   + 8);
8157 
8158       put_arm_insn (globals, output_bfd,
8159 		    (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8160 		    s->contents + my_offset + 4);
8161     }
8162 
8163   BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8164 
8165   /* Now go back and fix up the original BL insn to point to here.  */
8166   ret_offset =
8167     /* Address of where the stub is located.  */
8168     (s->output_section->vma + s->output_offset + my_offset)
8169      /* Address of where the BL is located.  */
8170     - (input_section->output_section->vma + input_section->output_offset
8171        + offset)
8172     /* Addend in the relocation.  */
8173     - addend
8174     /* Biassing for PC-relative addressing.  */
8175     - 8;
8176 
8177   insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8178 
8179   return TRUE;
8180 }
8181 
8182 /* Populate an Arm to Thumb stub.  Returns the stub symbol.  */
8183 
8184 static struct elf_link_hash_entry *
8185 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8186 			     const char *           name,
8187 			     bfd *                  input_bfd,
8188 			     bfd *                  output_bfd,
8189 			     asection *             sym_sec,
8190 			     bfd_vma                val,
8191 			     asection *             s,
8192 			     char **                error_message)
8193 {
8194   bfd_vma my_offset;
8195   long int ret_offset;
8196   struct elf_link_hash_entry * myh;
8197   struct elf32_arm_link_hash_table * globals;
8198 
8199   myh = find_arm_glue (info, name, error_message);
8200   if (myh == NULL)
8201     return NULL;
8202 
8203   globals = elf32_arm_hash_table (info);
8204   BFD_ASSERT (globals != NULL);
8205   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8206 
8207   my_offset = myh->root.u.def.value;
8208 
8209   if ((my_offset & 0x01) == 0x01)
8210     {
8211       if (sym_sec != NULL
8212 	  && sym_sec->owner != NULL
8213 	  && !INTERWORK_FLAG (sym_sec->owner))
8214 	{
8215 	  (*_bfd_error_handler)
8216 	    (_("%B(%s): warning: interworking not enabled.\n"
8217 	       "  first occurrence: %B: arm call to thumb"),
8218 	     sym_sec->owner, input_bfd, name);
8219 	}
8220 
8221       --my_offset;
8222       myh->root.u.def.value = my_offset;
8223 
8224       if (bfd_link_pic (info)
8225 	  || globals->root.is_relocatable_executable
8226 	  || globals->pic_veneer)
8227 	{
8228 	  /* For relocatable objects we can't use absolute addresses,
8229 	     so construct the address from a relative offset.  */
8230 	  /* TODO: If the offset is small it's probably worth
8231 	     constructing the address with adds.  */
8232 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8233 			s->contents + my_offset);
8234 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8235 			s->contents + my_offset + 4);
8236 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8237 			s->contents + my_offset + 8);
8238 	  /* Adjust the offset by 4 for the position of the add,
8239 	     and 8 for the pipeline offset.  */
8240 	  ret_offset = (val - (s->output_offset
8241 			       + s->output_section->vma
8242 			       + my_offset + 12))
8243 		       | 1;
8244 	  bfd_put_32 (output_bfd, ret_offset,
8245 		      s->contents + my_offset + 12);
8246 	}
8247       else if (globals->use_blx)
8248 	{
8249 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8250 			s->contents + my_offset);
8251 
8252 	  /* It's a thumb address.  Add the low order bit.  */
8253 	  bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8254 		      s->contents + my_offset + 4);
8255 	}
8256       else
8257 	{
8258 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8259 			s->contents + my_offset);
8260 
8261 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8262 			s->contents + my_offset + 4);
8263 
8264 	  /* It's a thumb address.  Add the low order bit.  */
8265 	  bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8266 		      s->contents + my_offset + 8);
8267 
8268 	  my_offset += 12;
8269 	}
8270     }
8271 
8272   BFD_ASSERT (my_offset <= globals->arm_glue_size);
8273 
8274   return myh;
8275 }
8276 
8277 /* Arm code calling a Thumb function.  */
8278 
8279 static int
8280 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8281 			 const char *           name,
8282 			 bfd *                  input_bfd,
8283 			 bfd *                  output_bfd,
8284 			 asection *             input_section,
8285 			 bfd_byte *             hit_data,
8286 			 asection *             sym_sec,
8287 			 bfd_vma                offset,
8288 			 bfd_signed_vma         addend,
8289 			 bfd_vma                val,
8290 			 char **error_message)
8291 {
8292   unsigned long int tmp;
8293   bfd_vma my_offset;
8294   asection * s;
8295   long int ret_offset;
8296   struct elf_link_hash_entry * myh;
8297   struct elf32_arm_link_hash_table * globals;
8298 
8299   globals = elf32_arm_hash_table (info);
8300   BFD_ASSERT (globals != NULL);
8301   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8302 
8303   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8304 			      ARM2THUMB_GLUE_SECTION_NAME);
8305   BFD_ASSERT (s != NULL);
8306   BFD_ASSERT (s->contents != NULL);
8307   BFD_ASSERT (s->output_section != NULL);
8308 
8309   myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8310 				     sym_sec, val, s, error_message);
8311   if (!myh)
8312     return FALSE;
8313 
8314   my_offset = myh->root.u.def.value;
8315   tmp = bfd_get_32 (input_bfd, hit_data);
8316   tmp = tmp & 0xFF000000;
8317 
8318   /* Somehow these are both 4 too far, so subtract 8.  */
8319   ret_offset = (s->output_offset
8320 		+ my_offset
8321 		+ s->output_section->vma
8322 		- (input_section->output_offset
8323 		   + input_section->output_section->vma
8324 		   + offset + addend)
8325 		- 8);
8326 
8327   tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8328 
8329   bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8330 
8331   return TRUE;
8332 }
8333 
8334 /* Populate Arm stub for an exported Thumb function.  */
8335 
8336 static bfd_boolean
8337 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8338 {
8339   struct bfd_link_info * info = (struct bfd_link_info *) inf;
8340   asection * s;
8341   struct elf_link_hash_entry * myh;
8342   struct elf32_arm_link_hash_entry *eh;
8343   struct elf32_arm_link_hash_table * globals;
8344   asection *sec;
8345   bfd_vma val;
8346   char *error_message;
8347 
8348   eh = elf32_arm_hash_entry (h);
8349   /* Allocate stubs for exported Thumb functions on v4t.  */
8350   if (eh->export_glue == NULL)
8351     return TRUE;
8352 
8353   globals = elf32_arm_hash_table (info);
8354   BFD_ASSERT (globals != NULL);
8355   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8356 
8357   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8358 			      ARM2THUMB_GLUE_SECTION_NAME);
8359   BFD_ASSERT (s != NULL);
8360   BFD_ASSERT (s->contents != NULL);
8361   BFD_ASSERT (s->output_section != NULL);
8362 
8363   sec = eh->export_glue->root.u.def.section;
8364 
8365   BFD_ASSERT (sec->output_section != NULL);
8366 
8367   val = eh->export_glue->root.u.def.value + sec->output_offset
8368 	+ sec->output_section->vma;
8369 
8370   myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8371 				     h->root.u.def.section->owner,
8372 				     globals->obfd, sec, val, s,
8373 				     &error_message);
8374   BFD_ASSERT (myh);
8375   return TRUE;
8376 }
8377 
8378 /* Populate ARMv4 BX veneers.  Returns the absolute adress of the veneer.  */
8379 
8380 static bfd_vma
8381 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8382 {
8383   bfd_byte *p;
8384   bfd_vma glue_addr;
8385   asection *s;
8386   struct elf32_arm_link_hash_table *globals;
8387 
8388   globals = elf32_arm_hash_table (info);
8389   BFD_ASSERT (globals != NULL);
8390   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8391 
8392   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8393 			      ARM_BX_GLUE_SECTION_NAME);
8394   BFD_ASSERT (s != NULL);
8395   BFD_ASSERT (s->contents != NULL);
8396   BFD_ASSERT (s->output_section != NULL);
8397 
8398   BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8399 
8400   glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8401 
8402   if ((globals->bx_glue_offset[reg] & 1) == 0)
8403     {
8404       p = s->contents + glue_addr;
8405       bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8406       bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8407       bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8408       globals->bx_glue_offset[reg] |= 1;
8409     }
8410 
8411   return glue_addr + s->output_section->vma + s->output_offset;
8412 }
8413 
8414 /* Generate Arm stubs for exported Thumb symbols.  */
8415 static void
8416 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8417 				  struct bfd_link_info *link_info)
8418 {
8419   struct elf32_arm_link_hash_table * globals;
8420 
8421   if (link_info == NULL)
8422     /* Ignore this if we are not called by the ELF backend linker.  */
8423     return;
8424 
8425   globals = elf32_arm_hash_table (link_info);
8426   if (globals == NULL)
8427     return;
8428 
8429   /* If blx is available then exported Thumb symbols are OK and there is
8430      nothing to do.  */
8431   if (globals->use_blx)
8432     return;
8433 
8434   elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8435 			  link_info);
8436 }
8437 
8438 /* Reserve space for COUNT dynamic relocations in relocation selection
8439    SRELOC.  */
8440 
8441 static void
8442 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8443 			      bfd_size_type count)
8444 {
8445   struct elf32_arm_link_hash_table *htab;
8446 
8447   htab = elf32_arm_hash_table (info);
8448   BFD_ASSERT (htab->root.dynamic_sections_created);
8449   if (sreloc == NULL)
8450     abort ();
8451   sreloc->size += RELOC_SIZE (htab) * count;
8452 }
8453 
8454 /* Reserve space for COUNT R_ARM_IRELATIVE relocations.  If the link is
8455    dynamic, the relocations should go in SRELOC, otherwise they should
8456    go in the special .rel.iplt section.  */
8457 
8458 static void
8459 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8460 			    bfd_size_type count)
8461 {
8462   struct elf32_arm_link_hash_table *htab;
8463 
8464   htab = elf32_arm_hash_table (info);
8465   if (!htab->root.dynamic_sections_created)
8466     htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8467   else
8468     {
8469       BFD_ASSERT (sreloc != NULL);
8470       sreloc->size += RELOC_SIZE (htab) * count;
8471     }
8472 }
8473 
8474 /* Add relocation REL to the end of relocation section SRELOC.  */
8475 
8476 static void
8477 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8478 			asection *sreloc, Elf_Internal_Rela *rel)
8479 {
8480   bfd_byte *loc;
8481   struct elf32_arm_link_hash_table *htab;
8482 
8483   htab = elf32_arm_hash_table (info);
8484   if (!htab->root.dynamic_sections_created
8485       && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8486     sreloc = htab->root.irelplt;
8487   if (sreloc == NULL)
8488     abort ();
8489   loc = sreloc->contents;
8490   loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8491   if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8492     abort ();
8493   SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8494 }
8495 
8496 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8497    IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8498    to .plt.  */
8499 
8500 static void
8501 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8502 			      bfd_boolean is_iplt_entry,
8503 			      union gotplt_union *root_plt,
8504 			      struct arm_plt_info *arm_plt)
8505 {
8506   struct elf32_arm_link_hash_table *htab;
8507   asection *splt;
8508   asection *sgotplt;
8509 
8510   htab = elf32_arm_hash_table (info);
8511 
8512   if (is_iplt_entry)
8513     {
8514       splt = htab->root.iplt;
8515       sgotplt = htab->root.igotplt;
8516 
8517       /* NaCl uses a special first entry in .iplt too.  */
8518       if (htab->nacl_p && splt->size == 0)
8519 	splt->size += htab->plt_header_size;
8520 
8521       /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt.  */
8522       elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8523     }
8524   else
8525     {
8526       splt = htab->root.splt;
8527       sgotplt = htab->root.sgotplt;
8528 
8529       /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt.  */
8530       elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8531 
8532       /* If this is the first .plt entry, make room for the special
8533 	 first entry.  */
8534       if (splt->size == 0)
8535 	splt->size += htab->plt_header_size;
8536 
8537       htab->next_tls_desc_index++;
8538     }
8539 
8540   /* Allocate the PLT entry itself, including any leading Thumb stub.  */
8541   if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8542     splt->size += PLT_THUMB_STUB_SIZE;
8543   root_plt->offset = splt->size;
8544   splt->size += htab->plt_entry_size;
8545 
8546   if (!htab->symbian_p)
8547     {
8548       /* We also need to make an entry in the .got.plt section, which
8549 	 will be placed in the .got section by the linker script.  */
8550       if (is_iplt_entry)
8551 	arm_plt->got_offset = sgotplt->size;
8552       else
8553 	arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8554       sgotplt->size += 4;
8555     }
8556 }
8557 
8558 static bfd_vma
8559 arm_movw_immediate (bfd_vma value)
8560 {
8561   return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8562 }
8563 
8564 static bfd_vma
8565 arm_movt_immediate (bfd_vma value)
8566 {
8567   return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8568 }
8569 
8570 /* Fill in a PLT entry and its associated GOT slot.  If DYNINDX == -1,
8571    the entry lives in .iplt and resolves to (*SYM_VALUE)().
8572    Otherwise, DYNINDX is the index of the symbol in the dynamic
8573    symbol table and SYM_VALUE is undefined.
8574 
8575    ROOT_PLT points to the offset of the PLT entry from the start of its
8576    section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
8577    bookkeeping information.
8578 
8579    Returns FALSE if there was a problem.  */
8580 
8581 static bfd_boolean
8582 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8583 			      union gotplt_union *root_plt,
8584 			      struct arm_plt_info *arm_plt,
8585 			      int dynindx, bfd_vma sym_value)
8586 {
8587   struct elf32_arm_link_hash_table *htab;
8588   asection *sgot;
8589   asection *splt;
8590   asection *srel;
8591   bfd_byte *loc;
8592   bfd_vma plt_index;
8593   Elf_Internal_Rela rel;
8594   bfd_vma plt_header_size;
8595   bfd_vma got_header_size;
8596 
8597   htab = elf32_arm_hash_table (info);
8598 
8599   /* Pick the appropriate sections and sizes.  */
8600   if (dynindx == -1)
8601     {
8602       splt = htab->root.iplt;
8603       sgot = htab->root.igotplt;
8604       srel = htab->root.irelplt;
8605 
8606       /* There are no reserved entries in .igot.plt, and no special
8607 	 first entry in .iplt.  */
8608       got_header_size = 0;
8609       plt_header_size = 0;
8610     }
8611   else
8612     {
8613       splt = htab->root.splt;
8614       sgot = htab->root.sgotplt;
8615       srel = htab->root.srelplt;
8616 
8617       got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8618       plt_header_size = htab->plt_header_size;
8619     }
8620   BFD_ASSERT (splt != NULL && srel != NULL);
8621 
8622   /* Fill in the entry in the procedure linkage table.  */
8623   if (htab->symbian_p)
8624     {
8625       BFD_ASSERT (dynindx >= 0);
8626       put_arm_insn (htab, output_bfd,
8627 		    elf32_arm_symbian_plt_entry[0],
8628 		    splt->contents + root_plt->offset);
8629       bfd_put_32 (output_bfd,
8630 		  elf32_arm_symbian_plt_entry[1],
8631 		  splt->contents + root_plt->offset + 4);
8632 
8633       /* Fill in the entry in the .rel.plt section.  */
8634       rel.r_offset = (splt->output_section->vma
8635 		      + splt->output_offset
8636 		      + root_plt->offset + 4);
8637       rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8638 
8639       /* Get the index in the procedure linkage table which
8640 	 corresponds to this symbol.  This is the index of this symbol
8641 	 in all the symbols for which we are making plt entries.  The
8642 	 first entry in the procedure linkage table is reserved.  */
8643       plt_index = ((root_plt->offset - plt_header_size)
8644 		   / htab->plt_entry_size);
8645     }
8646   else
8647     {
8648       bfd_vma got_offset, got_address, plt_address;
8649       bfd_vma got_displacement, initial_got_entry;
8650       bfd_byte * ptr;
8651 
8652       BFD_ASSERT (sgot != NULL);
8653 
8654       /* Get the offset into the .(i)got.plt table of the entry that
8655 	 corresponds to this function.  */
8656       got_offset = (arm_plt->got_offset & -2);
8657 
8658       /* Get the index in the procedure linkage table which
8659 	 corresponds to this symbol.  This is the index of this symbol
8660 	 in all the symbols for which we are making plt entries.
8661 	 After the reserved .got.plt entries, all symbols appear in
8662 	 the same order as in .plt.  */
8663       plt_index = (got_offset - got_header_size) / 4;
8664 
8665       /* Calculate the address of the GOT entry.  */
8666       got_address = (sgot->output_section->vma
8667 		     + sgot->output_offset
8668 		     + got_offset);
8669 
8670       /* ...and the address of the PLT entry.  */
8671       plt_address = (splt->output_section->vma
8672 		     + splt->output_offset
8673 		     + root_plt->offset);
8674 
8675       ptr = splt->contents + root_plt->offset;
8676       if (htab->vxworks_p && bfd_link_pic (info))
8677 	{
8678 	  unsigned int i;
8679 	  bfd_vma val;
8680 
8681 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8682 	    {
8683 	      val = elf32_arm_vxworks_shared_plt_entry[i];
8684 	      if (i == 2)
8685 		val |= got_address - sgot->output_section->vma;
8686 	      if (i == 5)
8687 		val |= plt_index * RELOC_SIZE (htab);
8688 	      if (i == 2 || i == 5)
8689 		bfd_put_32 (output_bfd, val, ptr);
8690 	      else
8691 		put_arm_insn (htab, output_bfd, val, ptr);
8692 	    }
8693 	}
8694       else if (htab->vxworks_p)
8695 	{
8696 	  unsigned int i;
8697 	  bfd_vma val;
8698 
8699 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8700 	    {
8701 	      val = elf32_arm_vxworks_exec_plt_entry[i];
8702 	      if (i == 2)
8703 		val |= got_address;
8704 	      if (i == 4)
8705 		val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8706 	      if (i == 5)
8707 		val |= plt_index * RELOC_SIZE (htab);
8708 	      if (i == 2 || i == 5)
8709 		bfd_put_32 (output_bfd, val, ptr);
8710 	      else
8711 		put_arm_insn (htab, output_bfd, val, ptr);
8712 	    }
8713 
8714 	  loc = (htab->srelplt2->contents
8715 		 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8716 
8717 	  /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8718 	     referencing the GOT for this PLT entry.  */
8719 	  rel.r_offset = plt_address + 8;
8720 	  rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8721 	  rel.r_addend = got_offset;
8722 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8723 	  loc += RELOC_SIZE (htab);
8724 
8725 	  /* Create the R_ARM_ABS32 relocation referencing the
8726 	     beginning of the PLT for this GOT entry.  */
8727 	  rel.r_offset = got_address;
8728 	  rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8729 	  rel.r_addend = 0;
8730 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8731 	}
8732       else if (htab->nacl_p)
8733 	{
8734 	  /* Calculate the displacement between the PLT slot and the
8735 	     common tail that's part of the special initial PLT slot.  */
8736 	  int32_t tail_displacement
8737 	    = ((splt->output_section->vma + splt->output_offset
8738 		+ ARM_NACL_PLT_TAIL_OFFSET)
8739 	       - (plt_address + htab->plt_entry_size + 4));
8740 	  BFD_ASSERT ((tail_displacement & 3) == 0);
8741 	  tail_displacement >>= 2;
8742 
8743 	  BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8744 		      || (-tail_displacement & 0xff000000) == 0);
8745 
8746 	  /* Calculate the displacement between the PLT slot and the entry
8747 	     in the GOT.  The offset accounts for the value produced by
8748 	     adding to pc in the penultimate instruction of the PLT stub.  */
8749 	  got_displacement = (got_address
8750 			      - (plt_address + htab->plt_entry_size));
8751 
8752 	  /* NaCl does not support interworking at all.  */
8753 	  BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8754 
8755 	  put_arm_insn (htab, output_bfd,
8756 			elf32_arm_nacl_plt_entry[0]
8757 			| arm_movw_immediate (got_displacement),
8758 			ptr + 0);
8759 	  put_arm_insn (htab, output_bfd,
8760 			elf32_arm_nacl_plt_entry[1]
8761 			| arm_movt_immediate (got_displacement),
8762 			ptr + 4);
8763 	  put_arm_insn (htab, output_bfd,
8764 			elf32_arm_nacl_plt_entry[2],
8765 			ptr + 8);
8766 	  put_arm_insn (htab, output_bfd,
8767 			elf32_arm_nacl_plt_entry[3]
8768 			| (tail_displacement & 0x00ffffff),
8769 			ptr + 12);
8770 	}
8771       else if (using_thumb_only (htab))
8772 	{
8773 	  /* PR ld/16017: Generate thumb only PLT entries.  */
8774 	  if (!using_thumb2 (htab))
8775 	    {
8776 	      /* FIXME: We ought to be able to generate thumb-1 PLT
8777 		 instructions...  */
8778 	      _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8779 				  output_bfd);
8780 	      return FALSE;
8781 	    }
8782 
8783 	  /* Calculate the displacement between the PLT slot and the entry in
8784 	     the GOT.  The 12-byte offset accounts for the value produced by
8785 	     adding to pc in the 3rd instruction of the PLT stub.  */
8786 	  got_displacement = got_address - (plt_address + 12);
8787 
8788 	  /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8789 	     instead of 'put_thumb_insn'.  */
8790 	  put_arm_insn (htab, output_bfd,
8791 			elf32_thumb2_plt_entry[0]
8792 			| ((got_displacement & 0x000000ff) << 16)
8793 			| ((got_displacement & 0x00000700) << 20)
8794 			| ((got_displacement & 0x00000800) >>  1)
8795 			| ((got_displacement & 0x0000f000) >> 12),
8796 			ptr + 0);
8797 	  put_arm_insn (htab, output_bfd,
8798 			elf32_thumb2_plt_entry[1]
8799 			| ((got_displacement & 0x00ff0000)      )
8800 			| ((got_displacement & 0x07000000) <<  4)
8801 			| ((got_displacement & 0x08000000) >> 17)
8802 			| ((got_displacement & 0xf0000000) >> 28),
8803 			ptr + 4);
8804 	  put_arm_insn (htab, output_bfd,
8805 			elf32_thumb2_plt_entry[2],
8806 			ptr + 8);
8807 	  put_arm_insn (htab, output_bfd,
8808 			elf32_thumb2_plt_entry[3],
8809 			ptr + 12);
8810 	}
8811       else
8812 	{
8813 	  /* Calculate the displacement between the PLT slot and the
8814 	     entry in the GOT.  The eight-byte offset accounts for the
8815 	     value produced by adding to pc in the first instruction
8816 	     of the PLT stub.  */
8817 	  got_displacement = got_address - (plt_address + 8);
8818 
8819 	  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8820 	    {
8821 	      put_thumb_insn (htab, output_bfd,
8822 			      elf32_arm_plt_thumb_stub[0], ptr - 4);
8823 	      put_thumb_insn (htab, output_bfd,
8824 			      elf32_arm_plt_thumb_stub[1], ptr - 2);
8825 	    }
8826 
8827 	  if (!elf32_arm_use_long_plt_entry)
8828 	    {
8829 	      BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8830 
8831 	      put_arm_insn (htab, output_bfd,
8832 			    elf32_arm_plt_entry_short[0]
8833 			    | ((got_displacement & 0x0ff00000) >> 20),
8834 			    ptr + 0);
8835 	      put_arm_insn (htab, output_bfd,
8836 			    elf32_arm_plt_entry_short[1]
8837 			    | ((got_displacement & 0x000ff000) >> 12),
8838 			    ptr+ 4);
8839 	      put_arm_insn (htab, output_bfd,
8840 			    elf32_arm_plt_entry_short[2]
8841 			    | (got_displacement & 0x00000fff),
8842 			    ptr + 8);
8843 #ifdef FOUR_WORD_PLT
8844 	      bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8845 #endif
8846 	    }
8847 	  else
8848 	    {
8849 	      put_arm_insn (htab, output_bfd,
8850 			    elf32_arm_plt_entry_long[0]
8851 			    | ((got_displacement & 0xf0000000) >> 28),
8852 			    ptr + 0);
8853 	      put_arm_insn (htab, output_bfd,
8854 			    elf32_arm_plt_entry_long[1]
8855 			    | ((got_displacement & 0x0ff00000) >> 20),
8856 			    ptr + 4);
8857 	      put_arm_insn (htab, output_bfd,
8858 			    elf32_arm_plt_entry_long[2]
8859 			    | ((got_displacement & 0x000ff000) >> 12),
8860 			    ptr+ 8);
8861 	      put_arm_insn (htab, output_bfd,
8862 			    elf32_arm_plt_entry_long[3]
8863 			    | (got_displacement & 0x00000fff),
8864 			    ptr + 12);
8865 	    }
8866 	}
8867 
8868       /* Fill in the entry in the .rel(a).(i)plt section.  */
8869       rel.r_offset = got_address;
8870       rel.r_addend = 0;
8871       if (dynindx == -1)
8872 	{
8873 	  /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8874 	     The dynamic linker or static executable then calls SYM_VALUE
8875 	     to determine the correct run-time value of the .igot.plt entry.  */
8876 	  rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8877 	  initial_got_entry = sym_value;
8878 	}
8879       else
8880 	{
8881 	  rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8882 	  initial_got_entry = (splt->output_section->vma
8883 			       + splt->output_offset);
8884 	}
8885 
8886       /* Fill in the entry in the global offset table.  */
8887       bfd_put_32 (output_bfd, initial_got_entry,
8888 		  sgot->contents + got_offset);
8889     }
8890 
8891   if (dynindx == -1)
8892     elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8893   else
8894     {
8895       loc = srel->contents + plt_index * RELOC_SIZE (htab);
8896       SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8897     }
8898 
8899   return TRUE;
8900 }
8901 
8902 /* Some relocations map to different relocations depending on the
8903    target.  Return the real relocation.  */
8904 
8905 static int
8906 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8907 		     int r_type)
8908 {
8909   switch (r_type)
8910     {
8911     case R_ARM_TARGET1:
8912       if (globals->target1_is_rel)
8913 	return R_ARM_REL32;
8914       else
8915 	return R_ARM_ABS32;
8916 
8917     case R_ARM_TARGET2:
8918       return globals->target2_reloc;
8919 
8920     default:
8921       return r_type;
8922     }
8923 }
8924 
8925 /* Return the base VMA address which should be subtracted from real addresses
8926    when resolving @dtpoff relocation.
8927    This is PT_TLS segment p_vaddr.  */
8928 
8929 static bfd_vma
8930 dtpoff_base (struct bfd_link_info *info)
8931 {
8932   /* If tls_sec is NULL, we should have signalled an error already.  */
8933   if (elf_hash_table (info)->tls_sec == NULL)
8934     return 0;
8935   return elf_hash_table (info)->tls_sec->vma;
8936 }
8937 
8938 /* Return the relocation value for @tpoff relocation
8939    if STT_TLS virtual address is ADDRESS.  */
8940 
8941 static bfd_vma
8942 tpoff (struct bfd_link_info *info, bfd_vma address)
8943 {
8944   struct elf_link_hash_table *htab = elf_hash_table (info);
8945   bfd_vma base;
8946 
8947   /* If tls_sec is NULL, we should have signalled an error already.  */
8948   if (htab->tls_sec == NULL)
8949     return 0;
8950   base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8951   return address - htab->tls_sec->vma + base;
8952 }
8953 
8954 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8955    VALUE is the relocation value.  */
8956 
8957 static bfd_reloc_status_type
8958 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8959 {
8960   if (value > 0xfff)
8961     return bfd_reloc_overflow;
8962 
8963   value |= bfd_get_32 (abfd, data) & 0xfffff000;
8964   bfd_put_32 (abfd, value, data);
8965   return bfd_reloc_ok;
8966 }
8967 
8968 /* Handle TLS relaxations.  Relaxing is possible for symbols that use
8969    R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8970    R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8971 
8972    Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8973    is to then call final_link_relocate.  Return other values in the
8974    case of error.
8975 
8976    FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8977    the pre-relaxed code.  It would be nice if the relocs were updated
8978    to match the optimization.   */
8979 
8980 static bfd_reloc_status_type
8981 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8982 		     bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8983 		     Elf_Internal_Rela *rel, unsigned long is_local)
8984 {
8985   unsigned long insn;
8986 
8987   switch (ELF32_R_TYPE (rel->r_info))
8988     {
8989     default:
8990       return bfd_reloc_notsupported;
8991 
8992     case R_ARM_TLS_GOTDESC:
8993       if (is_local)
8994 	insn = 0;
8995       else
8996 	{
8997 	  insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8998 	  if (insn & 1)
8999 	    insn -= 5; /* THUMB */
9000 	  else
9001 	    insn -= 8; /* ARM */
9002 	}
9003       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9004       return bfd_reloc_continue;
9005 
9006     case R_ARM_THM_TLS_DESCSEQ:
9007       /* Thumb insn.  */
9008       insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
9009       if ((insn & 0xff78) == 0x4478)	  /* add rx, pc */
9010 	{
9011 	  if (is_local)
9012 	    /* nop */
9013 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9014 	}
9015       else if ((insn & 0xffc0) == 0x6840)  /* ldr rx,[ry,#4] */
9016 	{
9017 	  if (is_local)
9018 	    /* nop */
9019 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9020 	  else
9021 	    /* ldr rx,[ry] */
9022 	    bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
9023 	}
9024       else if ((insn & 0xff87) == 0x4780)  /* blx rx */
9025 	{
9026 	  if (is_local)
9027 	    /* nop */
9028 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
9029 	  else
9030 	    /* mov r0, rx */
9031 	    bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
9032 			contents + rel->r_offset);
9033 	}
9034       else
9035 	{
9036 	  if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
9037 	    /* It's a 32 bit instruction, fetch the rest of it for
9038 	       error generation.  */
9039 	    insn = (insn << 16)
9040 	      | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
9041 	  (*_bfd_error_handler)
9042 	    (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
9043 	     input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
9044 	  return bfd_reloc_notsupported;
9045 	}
9046       break;
9047 
9048     case R_ARM_TLS_DESCSEQ:
9049       /* arm insn.  */
9050       insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
9051       if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
9052 	{
9053 	  if (is_local)
9054 	    /* mov rx, ry */
9055 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
9056 			contents + rel->r_offset);
9057 	}
9058       else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
9059 	{
9060 	  if (is_local)
9061 	    /* nop */
9062 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9063 	  else
9064 	    /* ldr rx,[ry] */
9065 	    bfd_put_32 (input_bfd, insn & 0xfffff000,
9066 			contents + rel->r_offset);
9067 	}
9068       else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
9069 	{
9070 	  if (is_local)
9071 	    /* nop */
9072 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
9073 	  else
9074 	    /* mov r0, rx */
9075 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
9076 			contents + rel->r_offset);
9077 	}
9078       else
9079 	{
9080 	  (*_bfd_error_handler)
9081 	    (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
9082 	     input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
9083 	  return bfd_reloc_notsupported;
9084 	}
9085       break;
9086 
9087     case R_ARM_TLS_CALL:
9088       /* GD->IE relaxation, turn the instruction into 'nop' or
9089 	 'ldr r0, [pc,r0]'  */
9090       insn = is_local ? 0xe1a00000 : 0xe79f0000;
9091       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9092       break;
9093 
9094     case R_ARM_THM_TLS_CALL:
9095       /* GD->IE relaxation.  */
9096       if (!is_local)
9097 	/* add r0,pc; ldr r0, [r0]  */
9098 	insn = 0x44786800;
9099       else if (using_thumb2 (globals))
9100 	/* nop.w */
9101 	insn = 0xf3af8000;
9102       else
9103 	/* nop; nop */
9104 	insn = 0xbf00bf00;
9105 
9106       bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9107       bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9108       break;
9109     }
9110   return bfd_reloc_ok;
9111 }
9112 
9113 /* For a given value of n, calculate the value of G_n as required to
9114    deal with group relocations.  We return it in the form of an
9115    encoded constant-and-rotation, together with the final residual.  If n is
9116    specified as less than zero, then final_residual is filled with the
9117    input value and no further action is performed.  */
9118 
9119 static bfd_vma
9120 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9121 {
9122   int current_n;
9123   bfd_vma g_n;
9124   bfd_vma encoded_g_n = 0;
9125   bfd_vma residual = value; /* Also known as Y_n.  */
9126 
9127   for (current_n = 0; current_n <= n; current_n++)
9128     {
9129       int shift;
9130 
9131       /* Calculate which part of the value to mask.  */
9132       if (residual == 0)
9133 	shift = 0;
9134       else
9135 	{
9136 	  int msb;
9137 
9138 	  /* Determine the most significant bit in the residual and
9139 	     align the resulting value to a 2-bit boundary.  */
9140 	  for (msb = 30; msb >= 0; msb -= 2)
9141 	    if (residual & (3 << msb))
9142 	      break;
9143 
9144 	  /* The desired shift is now (msb - 6), or zero, whichever
9145 	     is the greater.  */
9146 	  shift = msb - 6;
9147 	  if (shift < 0)
9148 	    shift = 0;
9149 	}
9150 
9151       /* Calculate g_n in 32-bit as well as encoded constant+rotation form.  */
9152       g_n = residual & (0xff << shift);
9153       encoded_g_n = (g_n >> shift)
9154 		    | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9155 
9156       /* Calculate the residual for the next time around.  */
9157       residual &= ~g_n;
9158     }
9159 
9160   *final_residual = residual;
9161 
9162   return encoded_g_n;
9163 }
9164 
9165 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9166    Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise.  */
9167 
9168 static int
9169 identify_add_or_sub (bfd_vma insn)
9170 {
9171   int opcode = insn & 0x1e00000;
9172 
9173   if (opcode == 1 << 23) /* ADD */
9174     return 1;
9175 
9176   if (opcode == 1 << 22) /* SUB */
9177     return -1;
9178 
9179   return 0;
9180 }
9181 
9182 /* Perform a relocation as part of a final link.  */
9183 
9184 static bfd_reloc_status_type
9185 elf32_arm_final_link_relocate (reloc_howto_type *           howto,
9186 			       bfd *                        input_bfd,
9187 			       bfd *                        output_bfd,
9188 			       asection *                   input_section,
9189 			       bfd_byte *                   contents,
9190 			       Elf_Internal_Rela *          rel,
9191 			       bfd_vma                      value,
9192 			       struct bfd_link_info *       info,
9193 			       asection *                   sym_sec,
9194 			       const char *                 sym_name,
9195 			       unsigned char                st_type,
9196 			       enum arm_st_branch_type      branch_type,
9197 			       struct elf_link_hash_entry * h,
9198 			       bfd_boolean *                unresolved_reloc_p,
9199 			       char **                      error_message)
9200 {
9201   unsigned long                 r_type = howto->type;
9202   unsigned long                 r_symndx;
9203   bfd_byte *                    hit_data = contents + rel->r_offset;
9204   bfd_vma *                     local_got_offsets;
9205   bfd_vma *                     local_tlsdesc_gotents;
9206   asection *                    sgot;
9207   asection *                    splt;
9208   asection *                    sreloc = NULL;
9209   asection *                    srelgot;
9210   bfd_vma                       addend;
9211   bfd_signed_vma                signed_addend;
9212   unsigned char                 dynreloc_st_type;
9213   bfd_vma                       dynreloc_value;
9214   struct elf32_arm_link_hash_table * globals;
9215   struct elf32_arm_link_hash_entry *eh;
9216   union gotplt_union           *root_plt;
9217   struct arm_plt_info          *arm_plt;
9218   bfd_vma                       plt_offset;
9219   bfd_vma                       gotplt_offset;
9220   bfd_boolean                   has_iplt_entry;
9221 
9222   globals = elf32_arm_hash_table (info);
9223   if (globals == NULL)
9224     return bfd_reloc_notsupported;
9225 
9226   BFD_ASSERT (is_arm_elf (input_bfd));
9227 
9228   /* Some relocation types map to different relocations depending on the
9229      target.  We pick the right one here.  */
9230   r_type = arm_real_reloc_type (globals, r_type);
9231 
9232   /* It is possible to have linker relaxations on some TLS access
9233      models.  Update our information here.  */
9234   r_type = elf32_arm_tls_transition (info, r_type, h);
9235 
9236   if (r_type != howto->type)
9237     howto = elf32_arm_howto_from_type (r_type);
9238 
9239   eh = (struct elf32_arm_link_hash_entry *) h;
9240   sgot = globals->root.sgot;
9241   local_got_offsets = elf_local_got_offsets (input_bfd);
9242   local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9243 
9244   if (globals->root.dynamic_sections_created)
9245     srelgot = globals->root.srelgot;
9246   else
9247     srelgot = NULL;
9248 
9249   r_symndx = ELF32_R_SYM (rel->r_info);
9250 
9251   if (globals->use_rel)
9252     {
9253       addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9254 
9255       if (addend & ((howto->src_mask + 1) >> 1))
9256 	{
9257 	  signed_addend = -1;
9258 	  signed_addend &= ~ howto->src_mask;
9259 	  signed_addend |= addend;
9260 	}
9261       else
9262 	signed_addend = addend;
9263     }
9264   else
9265     addend = signed_addend = rel->r_addend;
9266 
9267   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9268      are resolving a function call relocation.  */
9269   if (using_thumb_only (globals)
9270       && (r_type == R_ARM_THM_CALL
9271 	  || r_type == R_ARM_THM_JUMP24)
9272       && branch_type == ST_BRANCH_TO_ARM)
9273     branch_type = ST_BRANCH_TO_THUMB;
9274 
9275   /* Record the symbol information that should be used in dynamic
9276      relocations.  */
9277   dynreloc_st_type = st_type;
9278   dynreloc_value = value;
9279   if (branch_type == ST_BRANCH_TO_THUMB)
9280     dynreloc_value |= 1;
9281 
9282   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
9283      VALUE appropriately for relocations that we resolve at link time.  */
9284   has_iplt_entry = FALSE;
9285   if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9286       && root_plt->offset != (bfd_vma) -1)
9287     {
9288       plt_offset = root_plt->offset;
9289       gotplt_offset = arm_plt->got_offset;
9290 
9291       if (h == NULL || eh->is_iplt)
9292 	{
9293 	  has_iplt_entry = TRUE;
9294 	  splt = globals->root.iplt;
9295 
9296 	  /* Populate .iplt entries here, because not all of them will
9297 	     be seen by finish_dynamic_symbol.  The lower bit is set if
9298 	     we have already populated the entry.  */
9299 	  if (plt_offset & 1)
9300 	    plt_offset--;
9301 	  else
9302 	    {
9303 	      if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9304 						-1, dynreloc_value))
9305 		root_plt->offset |= 1;
9306 	      else
9307 		return bfd_reloc_notsupported;
9308 	    }
9309 
9310 	  /* Static relocations always resolve to the .iplt entry.  */
9311 	  st_type = STT_FUNC;
9312 	  value = (splt->output_section->vma
9313 		   + splt->output_offset
9314 		   + plt_offset);
9315 	  branch_type = ST_BRANCH_TO_ARM;
9316 
9317 	  /* If there are non-call relocations that resolve to the .iplt
9318 	     entry, then all dynamic ones must too.  */
9319 	  if (arm_plt->noncall_refcount != 0)
9320 	    {
9321 	      dynreloc_st_type = st_type;
9322 	      dynreloc_value = value;
9323 	    }
9324 	}
9325       else
9326 	/* We populate the .plt entry in finish_dynamic_symbol.  */
9327 	splt = globals->root.splt;
9328     }
9329   else
9330     {
9331       splt = NULL;
9332       plt_offset = (bfd_vma) -1;
9333       gotplt_offset = (bfd_vma) -1;
9334     }
9335 
9336   switch (r_type)
9337     {
9338     case R_ARM_NONE:
9339       /* We don't need to find a value for this symbol.  It's just a
9340 	 marker.  */
9341       *unresolved_reloc_p = FALSE;
9342       return bfd_reloc_ok;
9343 
9344     case R_ARM_ABS12:
9345       if (!globals->vxworks_p)
9346 	return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9347 
9348     case R_ARM_PC24:
9349     case R_ARM_ABS32:
9350     case R_ARM_ABS32_NOI:
9351     case R_ARM_REL32:
9352     case R_ARM_REL32_NOI:
9353     case R_ARM_CALL:
9354     case R_ARM_JUMP24:
9355     case R_ARM_XPC25:
9356     case R_ARM_PREL31:
9357     case R_ARM_PLT32:
9358       /* Handle relocations which should use the PLT entry.  ABS32/REL32
9359 	 will use the symbol's value, which may point to a PLT entry, but we
9360 	 don't need to handle that here.  If we created a PLT entry, all
9361 	 branches in this object should go to it, except if the PLT is too
9362 	 far away, in which case a long branch stub should be inserted.  */
9363       if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9364 	   && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9365 	   && r_type != R_ARM_CALL
9366 	   && r_type != R_ARM_JUMP24
9367 	   && r_type != R_ARM_PLT32)
9368 	  && plt_offset != (bfd_vma) -1)
9369 	{
9370 	  /* If we've created a .plt section, and assigned a PLT entry
9371 	     to this function, it must either be a STT_GNU_IFUNC reference
9372 	     or not be known to bind locally.  In other cases, we should
9373 	     have cleared the PLT entry by now.  */
9374 	  BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9375 
9376 	  value = (splt->output_section->vma
9377 		   + splt->output_offset
9378 		   + plt_offset);
9379 	  *unresolved_reloc_p = FALSE;
9380 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
9381 					   contents, rel->r_offset, value,
9382 					   rel->r_addend);
9383 	}
9384 
9385       /* When generating a shared object or relocatable executable, these
9386 	 relocations are copied into the output file to be resolved at
9387 	 run time.  */
9388       if ((bfd_link_pic (info)
9389 	   || globals->root.is_relocatable_executable)
9390 	  && (input_section->flags & SEC_ALLOC)
9391 	  && !(globals->vxworks_p
9392 	       && strcmp (input_section->output_section->name,
9393 			  ".tls_vars") == 0)
9394 	  && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9395 	      || !SYMBOL_CALLS_LOCAL (info, h))
9396 	  && !(input_bfd == globals->stub_bfd
9397 	       && strstr (input_section->name, STUB_SUFFIX))
9398 	  && (h == NULL
9399 	      || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9400 	      || h->root.type != bfd_link_hash_undefweak)
9401 	  && r_type != R_ARM_PC24
9402 	  && r_type != R_ARM_CALL
9403 	  && r_type != R_ARM_JUMP24
9404 	  && r_type != R_ARM_PREL31
9405 	  && r_type != R_ARM_PLT32)
9406 	{
9407 	  Elf_Internal_Rela outrel;
9408 	  bfd_boolean skip, relocate;
9409 
9410 	  if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9411 	      && !h->def_regular)
9412 	    {
9413 	      char *v = _("shared object");
9414 
9415 	      if (bfd_link_executable (info))
9416 		v = _("PIE executable");
9417 
9418 	      (*_bfd_error_handler)
9419 		(_("%B: relocation %s against external or undefined symbol `%s'"
9420 		   " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9421 		 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9422 	      return bfd_reloc_notsupported;
9423 	    }
9424 
9425 	  *unresolved_reloc_p = FALSE;
9426 
9427 	  if (sreloc == NULL && globals->root.dynamic_sections_created)
9428 	    {
9429 	      sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9430 							   ! globals->use_rel);
9431 
9432 	      if (sreloc == NULL)
9433 		return bfd_reloc_notsupported;
9434 	    }
9435 
9436 	  skip = FALSE;
9437 	  relocate = FALSE;
9438 
9439 	  outrel.r_addend = addend;
9440 	  outrel.r_offset =
9441 	    _bfd_elf_section_offset (output_bfd, info, input_section,
9442 				     rel->r_offset);
9443 	  if (outrel.r_offset == (bfd_vma) -1)
9444 	    skip = TRUE;
9445 	  else if (outrel.r_offset == (bfd_vma) -2)
9446 	    skip = TRUE, relocate = TRUE;
9447 	  outrel.r_offset += (input_section->output_section->vma
9448 			      + input_section->output_offset);
9449 
9450 	  if (skip)
9451 	    memset (&outrel, 0, sizeof outrel);
9452 	  else if (h != NULL
9453 		   && h->dynindx != -1
9454 		   && (!bfd_link_pic (info)
9455 		       || !SYMBOLIC_BIND (info, h)
9456 		       || !h->def_regular))
9457 	    outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9458 	  else
9459 	    {
9460 	      int symbol;
9461 
9462 	      /* This symbol is local, or marked to become local.  */
9463 	      BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9464 	      if (globals->symbian_p)
9465 		{
9466 		  asection *osec;
9467 
9468 		  /* On Symbian OS, the data segment and text segement
9469 		     can be relocated independently.  Therefore, we
9470 		     must indicate the segment to which this
9471 		     relocation is relative.  The BPABI allows us to
9472 		     use any symbol in the right segment; we just use
9473 		     the section symbol as it is convenient.  (We
9474 		     cannot use the symbol given by "h" directly as it
9475 		     will not appear in the dynamic symbol table.)
9476 
9477 		     Note that the dynamic linker ignores the section
9478 		     symbol value, so we don't subtract osec->vma
9479 		     from the emitted reloc addend.  */
9480 		  if (sym_sec)
9481 		    osec = sym_sec->output_section;
9482 		  else
9483 		    osec = input_section->output_section;
9484 		  symbol = elf_section_data (osec)->dynindx;
9485 		  if (symbol == 0)
9486 		    {
9487 		      struct elf_link_hash_table *htab = elf_hash_table (info);
9488 
9489 		      if ((osec->flags & SEC_READONLY) == 0
9490 			  && htab->data_index_section != NULL)
9491 			osec = htab->data_index_section;
9492 		      else
9493 			osec = htab->text_index_section;
9494 		      symbol = elf_section_data (osec)->dynindx;
9495 		    }
9496 		  BFD_ASSERT (symbol != 0);
9497 		}
9498 	      else
9499 		/* On SVR4-ish systems, the dynamic loader cannot
9500 		   relocate the text and data segments independently,
9501 		   so the symbol does not matter.  */
9502 		symbol = 0;
9503 	      if (dynreloc_st_type == STT_GNU_IFUNC)
9504 		/* We have an STT_GNU_IFUNC symbol that doesn't resolve
9505 		   to the .iplt entry.  Instead, every non-call reference
9506 		   must use an R_ARM_IRELATIVE relocation to obtain the
9507 		   correct run-time address.  */
9508 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9509 	      else
9510 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9511 	      if (globals->use_rel)
9512 		relocate = TRUE;
9513 	      else
9514 		outrel.r_addend += dynreloc_value;
9515 	    }
9516 
9517 	  elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9518 
9519 	  /* If this reloc is against an external symbol, we do not want to
9520 	     fiddle with the addend.  Otherwise, we need to include the symbol
9521 	     value so that it becomes an addend for the dynamic reloc.  */
9522 	  if (! relocate)
9523 	    return bfd_reloc_ok;
9524 
9525 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
9526 					   contents, rel->r_offset,
9527 					   dynreloc_value, (bfd_vma) 0);
9528 	}
9529       else switch (r_type)
9530 	{
9531 	case R_ARM_ABS12:
9532 	  return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9533 
9534 	case R_ARM_XPC25:	  /* Arm BLX instruction.  */
9535 	case R_ARM_CALL:
9536 	case R_ARM_JUMP24:
9537 	case R_ARM_PC24:	  /* Arm B/BL instruction.  */
9538 	case R_ARM_PLT32:
9539 	  {
9540 	  struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9541 
9542 	  if (r_type == R_ARM_XPC25)
9543 	    {
9544 	      /* Check for Arm calling Arm function.  */
9545 	      /* FIXME: Should we translate the instruction into a BL
9546 		 instruction instead ?  */
9547 	      if (branch_type != ST_BRANCH_TO_THUMB)
9548 		(*_bfd_error_handler)
9549 		  (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9550 		   input_bfd,
9551 		   h ? h->root.root.string : "(local)");
9552 	    }
9553 	  else if (r_type == R_ARM_PC24)
9554 	    {
9555 	      /* Check for Arm calling Thumb function.  */
9556 	      if (branch_type == ST_BRANCH_TO_THUMB)
9557 		{
9558 		  if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9559 					       output_bfd, input_section,
9560 					       hit_data, sym_sec, rel->r_offset,
9561 					       signed_addend, value,
9562 					       error_message))
9563 		    return bfd_reloc_ok;
9564 		  else
9565 		    return bfd_reloc_dangerous;
9566 		}
9567 	    }
9568 
9569 	  /* Check if a stub has to be inserted because the
9570 	     destination is too far or we are changing mode.  */
9571 	  if (   r_type == R_ARM_CALL
9572 	      || r_type == R_ARM_JUMP24
9573 	      || r_type == R_ARM_PLT32)
9574 	    {
9575 	      enum elf32_arm_stub_type stub_type = arm_stub_none;
9576 	      struct elf32_arm_link_hash_entry *hash;
9577 
9578 	      hash = (struct elf32_arm_link_hash_entry *) h;
9579 	      stub_type = arm_type_of_stub (info, input_section, rel,
9580 					    st_type, &branch_type,
9581 					    hash, value, sym_sec,
9582 					    input_bfd, sym_name);
9583 
9584 	      if (stub_type != arm_stub_none)
9585 		{
9586 		  /* The target is out of reach, so redirect the
9587 		     branch to the local stub for this function.  */
9588 		  stub_entry = elf32_arm_get_stub_entry (input_section,
9589 							 sym_sec, h,
9590 							 rel, globals,
9591 							 stub_type);
9592 		  {
9593 		    if (stub_entry != NULL)
9594 		      value = (stub_entry->stub_offset
9595 			       + stub_entry->stub_sec->output_offset
9596 			       + stub_entry->stub_sec->output_section->vma);
9597 
9598 		    if (plt_offset != (bfd_vma) -1)
9599 		      *unresolved_reloc_p = FALSE;
9600 		  }
9601 		}
9602 	      else
9603 		{
9604 		  /* If the call goes through a PLT entry, make sure to
9605 		     check distance to the right destination address.  */
9606 		  if (plt_offset != (bfd_vma) -1)
9607 		    {
9608 		      value = (splt->output_section->vma
9609 			       + splt->output_offset
9610 			       + plt_offset);
9611 		      *unresolved_reloc_p = FALSE;
9612 		      /* The PLT entry is in ARM mode, regardless of the
9613 			 target function.  */
9614 		      branch_type = ST_BRANCH_TO_ARM;
9615 		    }
9616 		}
9617 	    }
9618 
9619 	  /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9620 	     where:
9621 	      S is the address of the symbol in the relocation.
9622 	      P is address of the instruction being relocated.
9623 	      A is the addend (extracted from the instruction) in bytes.
9624 
9625 	     S is held in 'value'.
9626 	     P is the base address of the section containing the
9627 	       instruction plus the offset of the reloc into that
9628 	       section, ie:
9629 		 (input_section->output_section->vma +
9630 		  input_section->output_offset +
9631 		  rel->r_offset).
9632 	     A is the addend, converted into bytes, ie:
9633 		 (signed_addend * 4)
9634 
9635 	     Note: None of these operations have knowledge of the pipeline
9636 	     size of the processor, thus it is up to the assembler to
9637 	     encode this information into the addend.  */
9638 	  value -= (input_section->output_section->vma
9639 		    + input_section->output_offset);
9640 	  value -= rel->r_offset;
9641 	  if (globals->use_rel)
9642 	    value += (signed_addend << howto->size);
9643 	  else
9644 	    /* RELA addends do not have to be adjusted by howto->size.  */
9645 	    value += signed_addend;
9646 
9647 	  signed_addend = value;
9648 	  signed_addend >>= howto->rightshift;
9649 
9650 	  /* A branch to an undefined weak symbol is turned into a jump to
9651 	     the next instruction unless a PLT entry will be created.
9652 	     Do the same for local undefined symbols (but not for STN_UNDEF).
9653 	     The jump to the next instruction is optimized as a NOP depending
9654 	     on the architecture.  */
9655 	  if (h ? (h->root.type == bfd_link_hash_undefweak
9656 		   && plt_offset == (bfd_vma) -1)
9657 	      : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9658 	    {
9659 	      value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9660 
9661 	      if (arch_has_arm_nop (globals))
9662 		value |= 0x0320f000;
9663 	      else
9664 		value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0.  */
9665 	    }
9666 	  else
9667 	    {
9668 	      /* Perform a signed range check.  */
9669 	      if (   signed_addend >   ((bfd_signed_vma)  (howto->dst_mask >> 1))
9670 		  || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9671 		return bfd_reloc_overflow;
9672 
9673 	      addend = (value & 2);
9674 
9675 	      value = (signed_addend & howto->dst_mask)
9676 		| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9677 
9678 	      if (r_type == R_ARM_CALL)
9679 		{
9680 		  /* Set the H bit in the BLX instruction.  */
9681 		  if (branch_type == ST_BRANCH_TO_THUMB)
9682 		    {
9683 		      if (addend)
9684 			value |= (1 << 24);
9685 		      else
9686 			value &= ~(bfd_vma)(1 << 24);
9687 		    }
9688 
9689 		  /* Select the correct instruction (BL or BLX).  */
9690 		  /* Only if we are not handling a BL to a stub. In this
9691 		     case, mode switching is performed by the stub.  */
9692 		  if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9693 		    value |= (1 << 28);
9694 		  else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9695 		    {
9696 		      value &= ~(bfd_vma)(1 << 28);
9697 		      value |= (1 << 24);
9698 		    }
9699 		}
9700 	    }
9701 	  }
9702 	  break;
9703 
9704 	case R_ARM_ABS32:
9705 	  value += addend;
9706 	  if (branch_type == ST_BRANCH_TO_THUMB)
9707 	    value |= 1;
9708 	  break;
9709 
9710 	case R_ARM_ABS32_NOI:
9711 	  value += addend;
9712 	  break;
9713 
9714 	case R_ARM_REL32:
9715 	  value += addend;
9716 	  if (branch_type == ST_BRANCH_TO_THUMB)
9717 	    value |= 1;
9718 	  value -= (input_section->output_section->vma
9719 		    + input_section->output_offset + rel->r_offset);
9720 	  break;
9721 
9722 	case R_ARM_REL32_NOI:
9723 	  value += addend;
9724 	  value -= (input_section->output_section->vma
9725 		    + input_section->output_offset + rel->r_offset);
9726 	  break;
9727 
9728 	case R_ARM_PREL31:
9729 	  value -= (input_section->output_section->vma
9730 		    + input_section->output_offset + rel->r_offset);
9731 	  value += signed_addend;
9732 	  if (! h || h->root.type != bfd_link_hash_undefweak)
9733 	    {
9734 	      /* Check for overflow.  */
9735 	      if ((value ^ (value >> 1)) & (1 << 30))
9736 		return bfd_reloc_overflow;
9737 	    }
9738 	  value &= 0x7fffffff;
9739 	  value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9740 	  if (branch_type == ST_BRANCH_TO_THUMB)
9741 	    value |= 1;
9742 	  break;
9743 	}
9744 
9745       bfd_put_32 (input_bfd, value, hit_data);
9746       return bfd_reloc_ok;
9747 
9748     case R_ARM_ABS8:
9749       /* PR 16202: Refectch the addend using the correct size.  */
9750       if (globals->use_rel)
9751 	addend = bfd_get_8 (input_bfd, hit_data);
9752       value += addend;
9753 
9754       /* There is no way to tell whether the user intended to use a signed or
9755 	 unsigned addend.  When checking for overflow we accept either,
9756 	 as specified by the AAELF.  */
9757       if ((long) value > 0xff || (long) value < -0x80)
9758 	return bfd_reloc_overflow;
9759 
9760       bfd_put_8 (input_bfd, value, hit_data);
9761       return bfd_reloc_ok;
9762 
9763     case R_ARM_ABS16:
9764       /* PR 16202: Refectch the addend using the correct size.  */
9765       if (globals->use_rel)
9766 	addend = bfd_get_16 (input_bfd, hit_data);
9767       value += addend;
9768 
9769       /* See comment for R_ARM_ABS8.  */
9770       if ((long) value > 0xffff || (long) value < -0x8000)
9771 	return bfd_reloc_overflow;
9772 
9773       bfd_put_16 (input_bfd, value, hit_data);
9774       return bfd_reloc_ok;
9775 
9776     case R_ARM_THM_ABS5:
9777       /* Support ldr and str instructions for the thumb.  */
9778       if (globals->use_rel)
9779 	{
9780 	  /* Need to refetch addend.  */
9781 	  addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9782 	  /* ??? Need to determine shift amount from operand size.  */
9783 	  addend >>= howto->rightshift;
9784 	}
9785       value += addend;
9786 
9787       /* ??? Isn't value unsigned?  */
9788       if ((long) value > 0x1f || (long) value < -0x10)
9789 	return bfd_reloc_overflow;
9790 
9791       /* ??? Value needs to be properly shifted into place first.  */
9792       value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9793       bfd_put_16 (input_bfd, value, hit_data);
9794       return bfd_reloc_ok;
9795 
9796     case R_ARM_THM_ALU_PREL_11_0:
9797       /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw).  */
9798       {
9799 	bfd_vma insn;
9800 	bfd_signed_vma relocation;
9801 
9802 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9803 	     | bfd_get_16 (input_bfd, hit_data + 2);
9804 
9805 	if (globals->use_rel)
9806 	  {
9807 	    signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9808 			  | ((insn & (1 << 26)) >> 15);
9809 	    if (insn & 0xf00000)
9810 	      signed_addend = -signed_addend;
9811 	  }
9812 
9813 	relocation = value + signed_addend;
9814 	relocation -= Pa (input_section->output_section->vma
9815 			  + input_section->output_offset
9816 			  + rel->r_offset);
9817 
9818 	value = relocation;
9819 
9820 	if (value >= 0x1000)
9821 	  return bfd_reloc_overflow;
9822 
9823 	insn = (insn & 0xfb0f8f00) | (value & 0xff)
9824 	     | ((value & 0x700) << 4)
9825 	     | ((value & 0x800) << 15);
9826 	if (relocation < 0)
9827 	  insn |= 0xa00000;
9828 
9829 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
9830 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9831 
9832 	return bfd_reloc_ok;
9833       }
9834 
9835     case R_ARM_THM_PC8:
9836       /* PR 10073:  This reloc is not generated by the GNU toolchain,
9837 	 but it is supported for compatibility with third party libraries
9838 	 generated by other compilers, specifically the ARM/IAR.  */
9839       {
9840 	bfd_vma insn;
9841 	bfd_signed_vma relocation;
9842 
9843 	insn = bfd_get_16 (input_bfd, hit_data);
9844 
9845 	if (globals->use_rel)
9846 	  addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9847 
9848 	relocation = value + addend;
9849 	relocation -= Pa (input_section->output_section->vma
9850 			  + input_section->output_offset
9851 			  + rel->r_offset);
9852 
9853 	value = relocation;
9854 
9855 	/* We do not check for overflow of this reloc.  Although strictly
9856 	   speaking this is incorrect, it appears to be necessary in order
9857 	   to work with IAR generated relocs.  Since GCC and GAS do not
9858 	   generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9859 	   a problem for them.  */
9860 	value &= 0x3fc;
9861 
9862 	insn = (insn & 0xff00) | (value >> 2);
9863 
9864 	bfd_put_16 (input_bfd, insn, hit_data);
9865 
9866 	return bfd_reloc_ok;
9867       }
9868 
9869     case R_ARM_THM_PC12:
9870       /* Corresponds to: ldr.w reg, [pc, #offset].  */
9871       {
9872 	bfd_vma insn;
9873 	bfd_signed_vma relocation;
9874 
9875 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9876 	     | bfd_get_16 (input_bfd, hit_data + 2);
9877 
9878 	if (globals->use_rel)
9879 	  {
9880 	    signed_addend = insn & 0xfff;
9881 	    if (!(insn & (1 << 23)))
9882 	      signed_addend = -signed_addend;
9883 	  }
9884 
9885 	relocation = value + signed_addend;
9886 	relocation -= Pa (input_section->output_section->vma
9887 			  + input_section->output_offset
9888 			  + rel->r_offset);
9889 
9890 	value = relocation;
9891 
9892 	if (value >= 0x1000)
9893 	  return bfd_reloc_overflow;
9894 
9895 	insn = (insn & 0xff7ff000) | value;
9896 	if (relocation >= 0)
9897 	  insn |= (1 << 23);
9898 
9899 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
9900 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9901 
9902 	return bfd_reloc_ok;
9903       }
9904 
9905     case R_ARM_THM_XPC22:
9906     case R_ARM_THM_CALL:
9907     case R_ARM_THM_JUMP24:
9908       /* Thumb BL (branch long instruction).  */
9909       {
9910 	bfd_vma relocation;
9911 	bfd_vma reloc_sign;
9912 	bfd_boolean overflow = FALSE;
9913 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9914 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9915 	bfd_signed_vma reloc_signed_max;
9916 	bfd_signed_vma reloc_signed_min;
9917 	bfd_vma check;
9918 	bfd_signed_vma signed_check;
9919 	int bitsize;
9920 	const int thumb2 = using_thumb2 (globals);
9921 	const int thumb2_bl = using_thumb2_bl (globals);
9922 
9923 	/* A branch to an undefined weak symbol is turned into a jump to
9924 	   the next instruction unless a PLT entry will be created.
9925 	   The jump to the next instruction is optimized as a NOP.W for
9926 	   Thumb-2 enabled architectures.  */
9927 	if (h && h->root.type == bfd_link_hash_undefweak
9928 	    && plt_offset == (bfd_vma) -1)
9929 	  {
9930 	    if (thumb2)
9931 	      {
9932 		bfd_put_16 (input_bfd, 0xf3af, hit_data);
9933 		bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9934 	      }
9935 	    else
9936 	      {
9937 		bfd_put_16 (input_bfd, 0xe000, hit_data);
9938 		bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9939 	      }
9940 	    return bfd_reloc_ok;
9941 	  }
9942 
9943 	/* Fetch the addend.  We use the Thumb-2 encoding (backwards compatible
9944 	   with Thumb-1) involving the J1 and J2 bits.  */
9945 	if (globals->use_rel)
9946 	  {
9947 	    bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9948 	    bfd_vma upper = upper_insn & 0x3ff;
9949 	    bfd_vma lower = lower_insn & 0x7ff;
9950 	    bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9951 	    bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9952 	    bfd_vma i1 = j1 ^ s ? 0 : 1;
9953 	    bfd_vma i2 = j2 ^ s ? 0 : 1;
9954 
9955 	    addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9956 	    /* Sign extend.  */
9957 	    addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9958 
9959 	    signed_addend = addend;
9960 	  }
9961 
9962 	if (r_type == R_ARM_THM_XPC22)
9963 	  {
9964 	    /* Check for Thumb to Thumb call.  */
9965 	    /* FIXME: Should we translate the instruction into a BL
9966 	       instruction instead ?  */
9967 	    if (branch_type == ST_BRANCH_TO_THUMB)
9968 	      (*_bfd_error_handler)
9969 		(_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9970 		 input_bfd,
9971 		 h ? h->root.root.string : "(local)");
9972 	  }
9973 	else
9974 	  {
9975 	    /* If it is not a call to Thumb, assume call to Arm.
9976 	       If it is a call relative to a section name, then it is not a
9977 	       function call at all, but rather a long jump.  Calls through
9978 	       the PLT do not require stubs.  */
9979 	    if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9980 	      {
9981 		if (globals->use_blx && r_type == R_ARM_THM_CALL)
9982 		  {
9983 		    /* Convert BL to BLX.  */
9984 		    lower_insn = (lower_insn & ~0x1000) | 0x0800;
9985 		  }
9986 		else if ((   r_type != R_ARM_THM_CALL)
9987 			 && (r_type != R_ARM_THM_JUMP24))
9988 		  {
9989 		    if (elf32_thumb_to_arm_stub
9990 			(info, sym_name, input_bfd, output_bfd, input_section,
9991 			 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9992 			 error_message))
9993 		      return bfd_reloc_ok;
9994 		    else
9995 		      return bfd_reloc_dangerous;
9996 		  }
9997 	      }
9998 	    else if (branch_type == ST_BRANCH_TO_THUMB
9999 		     && globals->use_blx
10000 		     && r_type == R_ARM_THM_CALL)
10001 	      {
10002 		/* Make sure this is a BL.  */
10003 		lower_insn |= 0x1800;
10004 	      }
10005 	  }
10006 
10007 	enum elf32_arm_stub_type stub_type = arm_stub_none;
10008 	if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
10009 	  {
10010 	    /* Check if a stub has to be inserted because the destination
10011 	       is too far.  */
10012 	    struct elf32_arm_stub_hash_entry *stub_entry;
10013 	    struct elf32_arm_link_hash_entry *hash;
10014 
10015 	    hash = (struct elf32_arm_link_hash_entry *) h;
10016 
10017 	    stub_type = arm_type_of_stub (info, input_section, rel,
10018 					  st_type, &branch_type,
10019 					  hash, value, sym_sec,
10020 					  input_bfd, sym_name);
10021 
10022 	    if (stub_type != arm_stub_none)
10023 	      {
10024 		/* The target is out of reach or we are changing modes, so
10025 		   redirect the branch to the local stub for this
10026 		   function.  */
10027 		stub_entry = elf32_arm_get_stub_entry (input_section,
10028 						       sym_sec, h,
10029 						       rel, globals,
10030 						       stub_type);
10031 		if (stub_entry != NULL)
10032 		  {
10033 		    value = (stub_entry->stub_offset
10034 			     + stub_entry->stub_sec->output_offset
10035 			     + stub_entry->stub_sec->output_section->vma);
10036 
10037 		    if (plt_offset != (bfd_vma) -1)
10038 		      *unresolved_reloc_p = FALSE;
10039 		  }
10040 
10041 		/* If this call becomes a call to Arm, force BLX.  */
10042 		if (globals->use_blx && (r_type == R_ARM_THM_CALL))
10043 		  {
10044 		    if ((stub_entry
10045 			 && !arm_stub_is_thumb (stub_entry->stub_type))
10046 			|| branch_type != ST_BRANCH_TO_THUMB)
10047 		      lower_insn = (lower_insn & ~0x1000) | 0x0800;
10048 		  }
10049 	      }
10050 	  }
10051 
10052 	/* Handle calls via the PLT.  */
10053 	if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
10054 	  {
10055 	    value = (splt->output_section->vma
10056 		     + splt->output_offset
10057 		     + plt_offset);
10058 
10059 	    if (globals->use_blx
10060 		&& r_type == R_ARM_THM_CALL
10061 		&& ! using_thumb_only (globals))
10062 	      {
10063 		/* If the Thumb BLX instruction is available, convert
10064 		   the BL to a BLX instruction to call the ARM-mode
10065 		   PLT entry.  */
10066 		lower_insn = (lower_insn & ~0x1000) | 0x0800;
10067 		branch_type = ST_BRANCH_TO_ARM;
10068 	      }
10069 	    else
10070 	      {
10071 		if (! using_thumb_only (globals))
10072 		  /* Target the Thumb stub before the ARM PLT entry.  */
10073 		  value -= PLT_THUMB_STUB_SIZE;
10074 		branch_type = ST_BRANCH_TO_THUMB;
10075 	      }
10076 	    *unresolved_reloc_p = FALSE;
10077 	  }
10078 
10079 	relocation = value + signed_addend;
10080 
10081 	relocation -= (input_section->output_section->vma
10082 		       + input_section->output_offset
10083 		       + rel->r_offset);
10084 
10085 	check = relocation >> howto->rightshift;
10086 
10087 	/* If this is a signed value, the rightshift just dropped
10088 	   leading 1 bits (assuming twos complement).  */
10089 	if ((bfd_signed_vma) relocation >= 0)
10090 	  signed_check = check;
10091 	else
10092 	  signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10093 
10094 	/* Calculate the permissable maximum and minimum values for
10095 	   this relocation according to whether we're relocating for
10096 	   Thumb-2 or not.  */
10097 	bitsize = howto->bitsize;
10098 	if (!thumb2_bl)
10099 	  bitsize -= 2;
10100 	reloc_signed_max = (1 << (bitsize - 1)) - 1;
10101 	reloc_signed_min = ~reloc_signed_max;
10102 
10103 	/* Assumes two's complement.  */
10104 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10105 	  overflow = TRUE;
10106 
10107 	if ((lower_insn & 0x5000) == 0x4000)
10108 	  /* For a BLX instruction, make sure that the relocation is rounded up
10109 	     to a word boundary.  This follows the semantics of the instruction
10110 	     which specifies that bit 1 of the target address will come from bit
10111 	     1 of the base address.  */
10112 	  relocation = (relocation + 2) & ~ 3;
10113 
10114 	/* Put RELOCATION back into the insn.  Assumes two's complement.
10115 	   We use the Thumb-2 encoding, which is safe even if dealing with
10116 	   a Thumb-1 instruction by virtue of our overflow check above.  */
10117 	reloc_sign = (signed_check < 0) ? 1 : 0;
10118 	upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10119 		     | ((relocation >> 12) & 0x3ff)
10120 		     | (reloc_sign << 10);
10121 	lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10122 		     | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10123 		     | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10124 		     | ((relocation >> 1) & 0x7ff);
10125 
10126 	/* Put the relocated value back in the object file:  */
10127 	bfd_put_16 (input_bfd, upper_insn, hit_data);
10128 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10129 
10130 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10131       }
10132       break;
10133 
10134     case R_ARM_THM_JUMP19:
10135       /* Thumb32 conditional branch instruction.  */
10136       {
10137 	bfd_vma relocation;
10138 	bfd_boolean overflow = FALSE;
10139 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10140 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10141 	bfd_signed_vma reloc_signed_max = 0xffffe;
10142 	bfd_signed_vma reloc_signed_min = -0x100000;
10143 	bfd_signed_vma signed_check;
10144         enum elf32_arm_stub_type stub_type = arm_stub_none;
10145 	struct elf32_arm_stub_hash_entry *stub_entry;
10146 	struct elf32_arm_link_hash_entry *hash;
10147 
10148 	/* Need to refetch the addend, reconstruct the top three bits,
10149 	   and squish the two 11 bit pieces together.  */
10150 	if (globals->use_rel)
10151 	  {
10152 	    bfd_vma S     = (upper_insn & 0x0400) >> 10;
10153 	    bfd_vma upper = (upper_insn & 0x003f);
10154 	    bfd_vma J1    = (lower_insn & 0x2000) >> 13;
10155 	    bfd_vma J2    = (lower_insn & 0x0800) >> 11;
10156 	    bfd_vma lower = (lower_insn & 0x07ff);
10157 
10158 	    upper |= J1 << 6;
10159 	    upper |= J2 << 7;
10160 	    upper |= (!S) << 8;
10161 	    upper -= 0x0100; /* Sign extend.  */
10162 
10163 	    addend = (upper << 12) | (lower << 1);
10164 	    signed_addend = addend;
10165 	  }
10166 
10167 	/* Handle calls via the PLT.  */
10168 	if (plt_offset != (bfd_vma) -1)
10169 	  {
10170 	    value = (splt->output_section->vma
10171 		     + splt->output_offset
10172 		     + plt_offset);
10173 	    /* Target the Thumb stub before the ARM PLT entry.  */
10174 	    value -= PLT_THUMB_STUB_SIZE;
10175 	    *unresolved_reloc_p = FALSE;
10176 	  }
10177 
10178 	hash = (struct elf32_arm_link_hash_entry *)h;
10179 
10180 	stub_type = arm_type_of_stub (info, input_section, rel,
10181 		                      st_type, &branch_type,
10182 		                      hash, value, sym_sec,
10183 		                      input_bfd, sym_name);
10184 	if (stub_type != arm_stub_none)
10185 	  {
10186 	    stub_entry = elf32_arm_get_stub_entry (input_section,
10187 				                   sym_sec, h,
10188 				                   rel, globals,
10189 				                   stub_type);
10190 	    if (stub_entry != NULL)
10191 	      {
10192 	        value = (stub_entry->stub_offset
10193                         + stub_entry->stub_sec->output_offset
10194                         + stub_entry->stub_sec->output_section->vma);
10195 	      }
10196 	  }
10197 
10198 	relocation = value + signed_addend;
10199 	relocation -= (input_section->output_section->vma
10200 		       + input_section->output_offset
10201 		       + rel->r_offset);
10202 	signed_check = (bfd_signed_vma) relocation;
10203 
10204 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10205 	  overflow = TRUE;
10206 
10207 	/* Put RELOCATION back into the insn.  */
10208 	{
10209 	  bfd_vma S  = (relocation & 0x00100000) >> 20;
10210 	  bfd_vma J2 = (relocation & 0x00080000) >> 19;
10211 	  bfd_vma J1 = (relocation & 0x00040000) >> 18;
10212 	  bfd_vma hi = (relocation & 0x0003f000) >> 12;
10213 	  bfd_vma lo = (relocation & 0x00000ffe) >>  1;
10214 
10215 	  upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10216 	  lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10217 	}
10218 
10219 	/* Put the relocated value back in the object file:  */
10220 	bfd_put_16 (input_bfd, upper_insn, hit_data);
10221 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10222 
10223 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10224       }
10225 
10226     case R_ARM_THM_JUMP11:
10227     case R_ARM_THM_JUMP8:
10228     case R_ARM_THM_JUMP6:
10229       /* Thumb B (branch) instruction).  */
10230       {
10231 	bfd_signed_vma relocation;
10232 	bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10233 	bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10234 	bfd_signed_vma signed_check;
10235 
10236 	/* CZB cannot jump backward.  */
10237 	if (r_type == R_ARM_THM_JUMP6)
10238 	  reloc_signed_min = 0;
10239 
10240 	if (globals->use_rel)
10241 	  {
10242 	    /* Need to refetch addend.  */
10243 	    addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10244 	    if (addend & ((howto->src_mask + 1) >> 1))
10245 	      {
10246 		signed_addend = -1;
10247 		signed_addend &= ~ howto->src_mask;
10248 		signed_addend |= addend;
10249 	      }
10250 	    else
10251 	      signed_addend = addend;
10252 	    /* The value in the insn has been right shifted.  We need to
10253 	       undo this, so that we can perform the address calculation
10254 	       in terms of bytes.  */
10255 	    signed_addend <<= howto->rightshift;
10256 	  }
10257 	relocation = value + signed_addend;
10258 
10259 	relocation -= (input_section->output_section->vma
10260 		       + input_section->output_offset
10261 		       + rel->r_offset);
10262 
10263 	relocation >>= howto->rightshift;
10264 	signed_check = relocation;
10265 
10266 	if (r_type == R_ARM_THM_JUMP6)
10267 	  relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10268 	else
10269 	  relocation &= howto->dst_mask;
10270 	relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10271 
10272 	bfd_put_16 (input_bfd, relocation, hit_data);
10273 
10274 	/* Assumes two's complement.  */
10275 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10276 	  return bfd_reloc_overflow;
10277 
10278 	return bfd_reloc_ok;
10279       }
10280 
10281     case R_ARM_ALU_PCREL7_0:
10282     case R_ARM_ALU_PCREL15_8:
10283     case R_ARM_ALU_PCREL23_15:
10284       {
10285 	bfd_vma insn;
10286 	bfd_vma relocation;
10287 
10288 	insn = bfd_get_32 (input_bfd, hit_data);
10289 	if (globals->use_rel)
10290 	  {
10291 	    /* Extract the addend.  */
10292 	    addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10293 	    signed_addend = addend;
10294 	  }
10295 	relocation = value + signed_addend;
10296 
10297 	relocation -= (input_section->output_section->vma
10298 		       + input_section->output_offset
10299 		       + rel->r_offset);
10300 	insn = (insn & ~0xfff)
10301 	       | ((howto->bitpos << 7) & 0xf00)
10302 	       | ((relocation >> howto->bitpos) & 0xff);
10303 	bfd_put_32 (input_bfd, value, hit_data);
10304       }
10305       return bfd_reloc_ok;
10306 
10307     case R_ARM_GNU_VTINHERIT:
10308     case R_ARM_GNU_VTENTRY:
10309       return bfd_reloc_ok;
10310 
10311     case R_ARM_GOTOFF32:
10312       /* Relocation is relative to the start of the
10313 	 global offset table.  */
10314 
10315       BFD_ASSERT (sgot != NULL);
10316       if (sgot == NULL)
10317 	return bfd_reloc_notsupported;
10318 
10319       /* If we are addressing a Thumb function, we need to adjust the
10320 	 address by one, so that attempts to call the function pointer will
10321 	 correctly interpret it as Thumb code.  */
10322       if (branch_type == ST_BRANCH_TO_THUMB)
10323 	value += 1;
10324 
10325       /* Note that sgot->output_offset is not involved in this
10326 	 calculation.  We always want the start of .got.  If we
10327 	 define _GLOBAL_OFFSET_TABLE in a different way, as is
10328 	 permitted by the ABI, we might have to change this
10329 	 calculation.  */
10330       value -= sgot->output_section->vma;
10331       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10332 				       contents, rel->r_offset, value,
10333 				       rel->r_addend);
10334 
10335     case R_ARM_GOTPC:
10336       /* Use global offset table as symbol value.  */
10337       BFD_ASSERT (sgot != NULL);
10338 
10339       if (sgot == NULL)
10340 	return bfd_reloc_notsupported;
10341 
10342       *unresolved_reloc_p = FALSE;
10343       value = sgot->output_section->vma;
10344       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10345 				       contents, rel->r_offset, value,
10346 				       rel->r_addend);
10347 
10348     case R_ARM_GOT32:
10349     case R_ARM_GOT_PREL:
10350       /* Relocation is to the entry for this symbol in the
10351 	 global offset table.  */
10352       if (sgot == NULL)
10353 	return bfd_reloc_notsupported;
10354 
10355       if (dynreloc_st_type == STT_GNU_IFUNC
10356 	  && plt_offset != (bfd_vma) -1
10357 	  && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10358 	{
10359 	  /* We have a relocation against a locally-binding STT_GNU_IFUNC
10360 	     symbol, and the relocation resolves directly to the runtime
10361 	     target rather than to the .iplt entry.  This means that any
10362 	     .got entry would be the same value as the .igot.plt entry,
10363 	     so there's no point creating both.  */
10364 	  sgot = globals->root.igotplt;
10365 	  value = sgot->output_offset + gotplt_offset;
10366 	}
10367       else if (h != NULL)
10368 	{
10369 	  bfd_vma off;
10370 
10371 	  off = h->got.offset;
10372 	  BFD_ASSERT (off != (bfd_vma) -1);
10373 	  if ((off & 1) != 0)
10374 	    {
10375 	      /* We have already processsed one GOT relocation against
10376 		 this symbol.  */
10377 	      off &= ~1;
10378 	      if (globals->root.dynamic_sections_created
10379 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
10380 		*unresolved_reloc_p = FALSE;
10381 	    }
10382 	  else
10383 	    {
10384 	      Elf_Internal_Rela outrel;
10385 
10386 	      if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10387 		{
10388 		  /* If the symbol doesn't resolve locally in a static
10389 		     object, we have an undefined reference.  If the
10390 		     symbol doesn't resolve locally in a dynamic object,
10391 		     it should be resolved by the dynamic linker.  */
10392 		  if (globals->root.dynamic_sections_created)
10393 		    {
10394 		      outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10395 		      *unresolved_reloc_p = FALSE;
10396 		    }
10397 		  else
10398 		    outrel.r_info = 0;
10399 		  outrel.r_addend = 0;
10400 		}
10401 	      else
10402 		{
10403 		  if (dynreloc_st_type == STT_GNU_IFUNC)
10404 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10405 		  else if (bfd_link_pic (info) &&
10406 			   (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10407 			    || h->root.type != bfd_link_hash_undefweak))
10408 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10409 		  else
10410 		    outrel.r_info = 0;
10411 		  outrel.r_addend = dynreloc_value;
10412 		}
10413 
10414 	      /* The GOT entry is initialized to zero by default.
10415 		 See if we should install a different value.  */
10416 	      if (outrel.r_addend != 0
10417 		  && (outrel.r_info == 0 || globals->use_rel))
10418 		{
10419 		  bfd_put_32 (output_bfd, outrel.r_addend,
10420 			      sgot->contents + off);
10421 		  outrel.r_addend = 0;
10422 		}
10423 
10424 	      if (outrel.r_info != 0)
10425 		{
10426 		  outrel.r_offset = (sgot->output_section->vma
10427 				     + sgot->output_offset
10428 				     + off);
10429 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10430 		}
10431 	      h->got.offset |= 1;
10432 	    }
10433 	  value = sgot->output_offset + off;
10434 	}
10435       else
10436 	{
10437 	  bfd_vma off;
10438 
10439 	  BFD_ASSERT (local_got_offsets != NULL &&
10440 		      local_got_offsets[r_symndx] != (bfd_vma) -1);
10441 
10442 	  off = local_got_offsets[r_symndx];
10443 
10444 	  /* The offset must always be a multiple of 4.  We use the
10445 	     least significant bit to record whether we have already
10446 	     generated the necessary reloc.  */
10447 	  if ((off & 1) != 0)
10448 	    off &= ~1;
10449 	  else
10450 	    {
10451 	      if (globals->use_rel)
10452 		bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10453 
10454 	      if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10455 		{
10456 		  Elf_Internal_Rela outrel;
10457 
10458 		  outrel.r_addend = addend + dynreloc_value;
10459 		  outrel.r_offset = (sgot->output_section->vma
10460 				     + sgot->output_offset
10461 				     + off);
10462 		  if (dynreloc_st_type == STT_GNU_IFUNC)
10463 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10464 		  else
10465 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10466 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10467 		}
10468 
10469 	      local_got_offsets[r_symndx] |= 1;
10470 	    }
10471 
10472 	  value = sgot->output_offset + off;
10473 	}
10474       if (r_type != R_ARM_GOT32)
10475 	value += sgot->output_section->vma;
10476 
10477       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10478 				       contents, rel->r_offset, value,
10479 				       rel->r_addend);
10480 
10481     case R_ARM_TLS_LDO32:
10482       value = value - dtpoff_base (info);
10483 
10484       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10485 				       contents, rel->r_offset, value,
10486 				       rel->r_addend);
10487 
10488     case R_ARM_TLS_LDM32:
10489       {
10490 	bfd_vma off;
10491 
10492 	if (sgot == NULL)
10493 	  abort ();
10494 
10495 	off = globals->tls_ldm_got.offset;
10496 
10497 	if ((off & 1) != 0)
10498 	  off &= ~1;
10499 	else
10500 	  {
10501 	    /* If we don't know the module number, create a relocation
10502 	       for it.  */
10503 	    if (bfd_link_pic (info))
10504 	      {
10505 		Elf_Internal_Rela outrel;
10506 
10507 		if (srelgot == NULL)
10508 		  abort ();
10509 
10510 		outrel.r_addend = 0;
10511 		outrel.r_offset = (sgot->output_section->vma
10512 				   + sgot->output_offset + off);
10513 		outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10514 
10515 		if (globals->use_rel)
10516 		  bfd_put_32 (output_bfd, outrel.r_addend,
10517 			      sgot->contents + off);
10518 
10519 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10520 	      }
10521 	    else
10522 	      bfd_put_32 (output_bfd, 1, sgot->contents + off);
10523 
10524 	    globals->tls_ldm_got.offset |= 1;
10525 	  }
10526 
10527 	value = sgot->output_section->vma + sgot->output_offset + off
10528 	  - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10529 
10530 	return _bfd_final_link_relocate (howto, input_bfd, input_section,
10531 					 contents, rel->r_offset, value,
10532 					 rel->r_addend);
10533       }
10534 
10535     case R_ARM_TLS_CALL:
10536     case R_ARM_THM_TLS_CALL:
10537     case R_ARM_TLS_GD32:
10538     case R_ARM_TLS_IE32:
10539     case R_ARM_TLS_GOTDESC:
10540     case R_ARM_TLS_DESCSEQ:
10541     case R_ARM_THM_TLS_DESCSEQ:
10542       {
10543 	bfd_vma off, offplt;
10544 	int indx = 0;
10545 	char tls_type;
10546 
10547 	BFD_ASSERT (sgot != NULL);
10548 
10549 	if (h != NULL)
10550 	  {
10551 	    bfd_boolean dyn;
10552 	    dyn = globals->root.dynamic_sections_created;
10553 	    if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10554 						 bfd_link_pic (info),
10555 						 h)
10556 		&& (!bfd_link_pic (info)
10557 		    || !SYMBOL_REFERENCES_LOCAL (info, h)))
10558 	      {
10559 		*unresolved_reloc_p = FALSE;
10560 		indx = h->dynindx;
10561 	      }
10562 	    off = h->got.offset;
10563 	    offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10564 	    tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10565 	  }
10566 	else
10567 	  {
10568 	    BFD_ASSERT (local_got_offsets != NULL);
10569 	    off = local_got_offsets[r_symndx];
10570 	    offplt = local_tlsdesc_gotents[r_symndx];
10571 	    tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10572 	  }
10573 
10574 	/* Linker relaxations happens from one of the
10575 	   R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE.  */
10576 	if (ELF32_R_TYPE(rel->r_info) != r_type)
10577 	  tls_type = GOT_TLS_IE;
10578 
10579 	BFD_ASSERT (tls_type != GOT_UNKNOWN);
10580 
10581 	if ((off & 1) != 0)
10582 	  off &= ~1;
10583 	else
10584 	  {
10585 	    bfd_boolean need_relocs = FALSE;
10586 	    Elf_Internal_Rela outrel;
10587 	    int cur_off = off;
10588 
10589 	    /* The GOT entries have not been initialized yet.  Do it
10590 	       now, and emit any relocations.  If both an IE GOT and a
10591 	       GD GOT are necessary, we emit the GD first.  */
10592 
10593 	    if ((bfd_link_pic (info) || indx != 0)
10594 		&& (h == NULL
10595 		    || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10596 		    || h->root.type != bfd_link_hash_undefweak))
10597 	      {
10598 		need_relocs = TRUE;
10599 		BFD_ASSERT (srelgot != NULL);
10600 	      }
10601 
10602 	    if (tls_type & GOT_TLS_GDESC)
10603 	      {
10604 		bfd_byte *loc;
10605 
10606 		/* We should have relaxed, unless this is an undefined
10607 		   weak symbol.  */
10608 		BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10609 			    || bfd_link_pic (info));
10610 		BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10611 			    <= globals->root.sgotplt->size);
10612 
10613 		outrel.r_addend = 0;
10614 		outrel.r_offset = (globals->root.sgotplt->output_section->vma
10615 				   + globals->root.sgotplt->output_offset
10616 				   + offplt
10617 				   + globals->sgotplt_jump_table_size);
10618 
10619 		outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10620 		sreloc = globals->root.srelplt;
10621 		loc = sreloc->contents;
10622 		loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10623 		BFD_ASSERT (loc + RELOC_SIZE (globals)
10624 			   <= sreloc->contents + sreloc->size);
10625 
10626 		SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10627 
10628 		/* For globals, the first word in the relocation gets
10629 		   the relocation index and the top bit set, or zero,
10630 		   if we're binding now.  For locals, it gets the
10631 		   symbol's offset in the tls section.  */
10632 		bfd_put_32 (output_bfd,
10633 			    !h ? value - elf_hash_table (info)->tls_sec->vma
10634 			    : info->flags & DF_BIND_NOW ? 0
10635 			    : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10636 			    globals->root.sgotplt->contents + offplt
10637 			    + globals->sgotplt_jump_table_size);
10638 
10639 		/* Second word in the relocation is always zero.  */
10640 		bfd_put_32 (output_bfd, 0,
10641 			    globals->root.sgotplt->contents + offplt
10642 			    + globals->sgotplt_jump_table_size + 4);
10643 	      }
10644 	    if (tls_type & GOT_TLS_GD)
10645 	      {
10646 		if (need_relocs)
10647 		  {
10648 		    outrel.r_addend = 0;
10649 		    outrel.r_offset = (sgot->output_section->vma
10650 				       + sgot->output_offset
10651 				       + cur_off);
10652 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10653 
10654 		    if (globals->use_rel)
10655 		      bfd_put_32 (output_bfd, outrel.r_addend,
10656 				  sgot->contents + cur_off);
10657 
10658 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10659 
10660 		    if (indx == 0)
10661 		      bfd_put_32 (output_bfd, value - dtpoff_base (info),
10662 				  sgot->contents + cur_off + 4);
10663 		    else
10664 		      {
10665 			outrel.r_addend = 0;
10666 			outrel.r_info = ELF32_R_INFO (indx,
10667 						      R_ARM_TLS_DTPOFF32);
10668 			outrel.r_offset += 4;
10669 
10670 			if (globals->use_rel)
10671 			  bfd_put_32 (output_bfd, outrel.r_addend,
10672 				      sgot->contents + cur_off + 4);
10673 
10674 			elf32_arm_add_dynreloc (output_bfd, info,
10675 						srelgot, &outrel);
10676 		      }
10677 		  }
10678 		else
10679 		  {
10680 		    /* If we are not emitting relocations for a
10681 		       general dynamic reference, then we must be in a
10682 		       static link or an executable link with the
10683 		       symbol binding locally.  Mark it as belonging
10684 		       to module 1, the executable.  */
10685 		    bfd_put_32 (output_bfd, 1,
10686 				sgot->contents + cur_off);
10687 		    bfd_put_32 (output_bfd, value - dtpoff_base (info),
10688 				sgot->contents + cur_off + 4);
10689 		  }
10690 
10691 		cur_off += 8;
10692 	      }
10693 
10694 	    if (tls_type & GOT_TLS_IE)
10695 	      {
10696 		if (need_relocs)
10697 		  {
10698 		    if (indx == 0)
10699 		      outrel.r_addend = value - dtpoff_base (info);
10700 		    else
10701 		      outrel.r_addend = 0;
10702 		    outrel.r_offset = (sgot->output_section->vma
10703 				       + sgot->output_offset
10704 				       + cur_off);
10705 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10706 
10707 		    if (globals->use_rel)
10708 		      bfd_put_32 (output_bfd, outrel.r_addend,
10709 				  sgot->contents + cur_off);
10710 
10711 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10712 		  }
10713 		else
10714 		  bfd_put_32 (output_bfd, tpoff (info, value),
10715 			      sgot->contents + cur_off);
10716 		cur_off += 4;
10717 	      }
10718 
10719 	    if (h != NULL)
10720 	      h->got.offset |= 1;
10721 	    else
10722 	      local_got_offsets[r_symndx] |= 1;
10723 	  }
10724 
10725 	if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10726 	  off += 8;
10727 	else if (tls_type & GOT_TLS_GDESC)
10728 	  off = offplt;
10729 
10730 	if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10731 	    || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10732 	  {
10733 	    bfd_signed_vma offset;
10734 	    /* TLS stubs are arm mode.  The original symbol is a
10735 	       data object, so branch_type is bogus.  */
10736 	    branch_type = ST_BRANCH_TO_ARM;
10737 	    enum elf32_arm_stub_type stub_type
10738 	      = arm_type_of_stub (info, input_section, rel,
10739 				  st_type, &branch_type,
10740 				  (struct elf32_arm_link_hash_entry *)h,
10741 				  globals->tls_trampoline, globals->root.splt,
10742 				  input_bfd, sym_name);
10743 
10744 	    if (stub_type != arm_stub_none)
10745 	      {
10746 		struct elf32_arm_stub_hash_entry *stub_entry
10747 		  = elf32_arm_get_stub_entry
10748 		  (input_section, globals->root.splt, 0, rel,
10749 		   globals, stub_type);
10750 		offset = (stub_entry->stub_offset
10751 			  + stub_entry->stub_sec->output_offset
10752 			  + stub_entry->stub_sec->output_section->vma);
10753 	      }
10754 	    else
10755 	      offset = (globals->root.splt->output_section->vma
10756 			+ globals->root.splt->output_offset
10757 			+ globals->tls_trampoline);
10758 
10759 	    if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10760 	      {
10761 		unsigned long inst;
10762 
10763 		offset -= (input_section->output_section->vma
10764 			   + input_section->output_offset
10765 			   + rel->r_offset + 8);
10766 
10767 		inst = offset >> 2;
10768 		inst &= 0x00ffffff;
10769 		value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10770 	      }
10771 	    else
10772 	      {
10773 		/* Thumb blx encodes the offset in a complicated
10774 		   fashion.  */
10775 		unsigned upper_insn, lower_insn;
10776 		unsigned neg;
10777 
10778 		offset -= (input_section->output_section->vma
10779 			   + input_section->output_offset
10780 			   + rel->r_offset + 4);
10781 
10782 		if (stub_type != arm_stub_none
10783 		    && arm_stub_is_thumb (stub_type))
10784 		  {
10785 		    lower_insn = 0xd000;
10786 		  }
10787 		else
10788 		  {
10789 		    lower_insn = 0xc000;
10790 		    /* Round up the offset to a word boundary.  */
10791 		    offset = (offset + 2) & ~2;
10792 		  }
10793 
10794 		neg = offset < 0;
10795 		upper_insn = (0xf000
10796 			      | ((offset >> 12) & 0x3ff)
10797 			      | (neg << 10));
10798 		lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10799 			      | (((!((offset >> 22) & 1)) ^ neg) << 11)
10800 			      | ((offset >> 1) & 0x7ff);
10801 		bfd_put_16 (input_bfd, upper_insn, hit_data);
10802 		bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10803 		return bfd_reloc_ok;
10804 	      }
10805 	  }
10806 	/* These relocations needs special care, as besides the fact
10807 	   they point somewhere in .gotplt, the addend must be
10808 	   adjusted accordingly depending on the type of instruction
10809 	   we refer to.  */
10810 	else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10811 	  {
10812 	    unsigned long data, insn;
10813 	    unsigned thumb;
10814 
10815 	    data = bfd_get_32 (input_bfd, hit_data);
10816 	    thumb = data & 1;
10817 	    data &= ~1u;
10818 
10819 	    if (thumb)
10820 	      {
10821 		insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10822 		if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10823 		  insn = (insn << 16)
10824 		    | bfd_get_16 (input_bfd,
10825 				  contents + rel->r_offset - data + 2);
10826 		if ((insn & 0xf800c000) == 0xf000c000)
10827 		  /* bl/blx */
10828 		  value = -6;
10829 		else if ((insn & 0xffffff00) == 0x4400)
10830 		  /* add */
10831 		  value = -5;
10832 		else
10833 		  {
10834 		    (*_bfd_error_handler)
10835 		      (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10836 		       input_bfd, input_section,
10837 		       (unsigned long)rel->r_offset, insn);
10838 		    return bfd_reloc_notsupported;
10839 		  }
10840 	      }
10841 	    else
10842 	      {
10843 		insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10844 
10845 		switch (insn >> 24)
10846 		  {
10847 		  case 0xeb:  /* bl */
10848 		  case 0xfa:  /* blx */
10849 		    value = -4;
10850 		    break;
10851 
10852 		  case 0xe0:	/* add */
10853 		    value = -8;
10854 		    break;
10855 
10856 		  default:
10857 		    (*_bfd_error_handler)
10858 		      (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10859 		       input_bfd, input_section,
10860 		       (unsigned long)rel->r_offset, insn);
10861 		    return bfd_reloc_notsupported;
10862 		  }
10863 	      }
10864 
10865 	    value += ((globals->root.sgotplt->output_section->vma
10866 		       + globals->root.sgotplt->output_offset + off)
10867 		      - (input_section->output_section->vma
10868 			 + input_section->output_offset
10869 			 + rel->r_offset)
10870 		      + globals->sgotplt_jump_table_size);
10871 	  }
10872 	else
10873 	  value = ((globals->root.sgot->output_section->vma
10874 		    + globals->root.sgot->output_offset + off)
10875 		   - (input_section->output_section->vma
10876 		      + input_section->output_offset + rel->r_offset));
10877 
10878 	return _bfd_final_link_relocate (howto, input_bfd, input_section,
10879 					 contents, rel->r_offset, value,
10880 					 rel->r_addend);
10881       }
10882 
10883     case R_ARM_TLS_LE32:
10884       if (bfd_link_dll (info))
10885 	{
10886 	  (*_bfd_error_handler)
10887 	    (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10888 	     input_bfd, input_section,
10889 	     (long) rel->r_offset, howto->name);
10890 	  return bfd_reloc_notsupported;
10891 	}
10892       else
10893 	value = tpoff (info, value);
10894 
10895       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10896 				       contents, rel->r_offset, value,
10897 				       rel->r_addend);
10898 
10899     case R_ARM_V4BX:
10900       if (globals->fix_v4bx)
10901 	{
10902 	  bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10903 
10904 	  /* Ensure that we have a BX instruction.  */
10905 	  BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10906 
10907 	  if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10908 	    {
10909 	      /* Branch to veneer.  */
10910 	      bfd_vma glue_addr;
10911 	      glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10912 	      glue_addr -= input_section->output_section->vma
10913 			   + input_section->output_offset
10914 			   + rel->r_offset + 8;
10915 	      insn = (insn & 0xf0000000) | 0x0a000000
10916 		     | ((glue_addr >> 2) & 0x00ffffff);
10917 	    }
10918 	  else
10919 	    {
10920 	      /* Preserve Rm (lowest four bits) and the condition code
10921 		 (highest four bits). Other bits encode MOV PC,Rm.  */
10922 	      insn = (insn & 0xf000000f) | 0x01a0f000;
10923 	    }
10924 
10925 	  bfd_put_32 (input_bfd, insn, hit_data);
10926 	}
10927       return bfd_reloc_ok;
10928 
10929     case R_ARM_MOVW_ABS_NC:
10930     case R_ARM_MOVT_ABS:
10931     case R_ARM_MOVW_PREL_NC:
10932     case R_ARM_MOVT_PREL:
10933     /* Until we properly support segment-base-relative addressing then
10934        we assume the segment base to be zero, as for the group relocations.
10935        Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10936        and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS.  */
10937     case R_ARM_MOVW_BREL_NC:
10938     case R_ARM_MOVW_BREL:
10939     case R_ARM_MOVT_BREL:
10940       {
10941 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10942 
10943 	if (globals->use_rel)
10944 	  {
10945 	    addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10946 	    signed_addend = (addend ^ 0x8000) - 0x8000;
10947 	  }
10948 
10949 	value += signed_addend;
10950 
10951 	if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10952 	  value -= (input_section->output_section->vma
10953 		    + input_section->output_offset + rel->r_offset);
10954 
10955 	if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10956 	  return bfd_reloc_overflow;
10957 
10958 	if (branch_type == ST_BRANCH_TO_THUMB)
10959 	  value |= 1;
10960 
10961 	if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10962 	    || r_type == R_ARM_MOVT_BREL)
10963 	  value >>= 16;
10964 
10965 	insn &= 0xfff0f000;
10966 	insn |= value & 0xfff;
10967 	insn |= (value & 0xf000) << 4;
10968 	bfd_put_32 (input_bfd, insn, hit_data);
10969       }
10970       return bfd_reloc_ok;
10971 
10972     case R_ARM_THM_MOVW_ABS_NC:
10973     case R_ARM_THM_MOVT_ABS:
10974     case R_ARM_THM_MOVW_PREL_NC:
10975     case R_ARM_THM_MOVT_PREL:
10976     /* Until we properly support segment-base-relative addressing then
10977        we assume the segment base to be zero, as for the above relocations.
10978        Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10979        R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10980        as R_ARM_THM_MOVT_ABS.  */
10981     case R_ARM_THM_MOVW_BREL_NC:
10982     case R_ARM_THM_MOVW_BREL:
10983     case R_ARM_THM_MOVT_BREL:
10984       {
10985 	bfd_vma insn;
10986 
10987 	insn = bfd_get_16 (input_bfd, hit_data) << 16;
10988 	insn |= bfd_get_16 (input_bfd, hit_data + 2);
10989 
10990 	if (globals->use_rel)
10991 	  {
10992 	    addend = ((insn >> 4)  & 0xf000)
10993 		   | ((insn >> 15) & 0x0800)
10994 		   | ((insn >> 4)  & 0x0700)
10995 		   | (insn         & 0x00ff);
10996 	    signed_addend = (addend ^ 0x8000) - 0x8000;
10997 	  }
10998 
10999 	value += signed_addend;
11000 
11001 	if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
11002 	  value -= (input_section->output_section->vma
11003 		    + input_section->output_offset + rel->r_offset);
11004 
11005 	if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
11006 	  return bfd_reloc_overflow;
11007 
11008 	if (branch_type == ST_BRANCH_TO_THUMB)
11009 	  value |= 1;
11010 
11011 	if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
11012 	    || r_type == R_ARM_THM_MOVT_BREL)
11013 	  value >>= 16;
11014 
11015 	insn &= 0xfbf08f00;
11016 	insn |= (value & 0xf000) << 4;
11017 	insn |= (value & 0x0800) << 15;
11018 	insn |= (value & 0x0700) << 4;
11019 	insn |= (value & 0x00ff);
11020 
11021 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
11022 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11023       }
11024       return bfd_reloc_ok;
11025 
11026     case R_ARM_ALU_PC_G0_NC:
11027     case R_ARM_ALU_PC_G1_NC:
11028     case R_ARM_ALU_PC_G0:
11029     case R_ARM_ALU_PC_G1:
11030     case R_ARM_ALU_PC_G2:
11031     case R_ARM_ALU_SB_G0_NC:
11032     case R_ARM_ALU_SB_G1_NC:
11033     case R_ARM_ALU_SB_G0:
11034     case R_ARM_ALU_SB_G1:
11035     case R_ARM_ALU_SB_G2:
11036       {
11037 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11038 	bfd_vma pc = input_section->output_section->vma
11039 		     + input_section->output_offset + rel->r_offset;
11040 	/* sb is the origin of the *segment* containing the symbol.  */
11041 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11042 	bfd_vma residual;
11043 	bfd_vma g_n;
11044 	bfd_signed_vma signed_value;
11045 	int group = 0;
11046 
11047 	/* Determine which group of bits to select.  */
11048 	switch (r_type)
11049 	  {
11050 	  case R_ARM_ALU_PC_G0_NC:
11051 	  case R_ARM_ALU_PC_G0:
11052 	  case R_ARM_ALU_SB_G0_NC:
11053 	  case R_ARM_ALU_SB_G0:
11054 	    group = 0;
11055 	    break;
11056 
11057 	  case R_ARM_ALU_PC_G1_NC:
11058 	  case R_ARM_ALU_PC_G1:
11059 	  case R_ARM_ALU_SB_G1_NC:
11060 	  case R_ARM_ALU_SB_G1:
11061 	    group = 1;
11062 	    break;
11063 
11064 	  case R_ARM_ALU_PC_G2:
11065 	  case R_ARM_ALU_SB_G2:
11066 	    group = 2;
11067 	    break;
11068 
11069 	  default:
11070 	    abort ();
11071 	  }
11072 
11073 	/* If REL, extract the addend from the insn.  If RELA, it will
11074 	   have already been fetched for us.  */
11075 	if (globals->use_rel)
11076 	  {
11077 	    int negative;
11078 	    bfd_vma constant = insn & 0xff;
11079 	    bfd_vma rotation = (insn & 0xf00) >> 8;
11080 
11081 	    if (rotation == 0)
11082 	      signed_addend = constant;
11083 	    else
11084 	      {
11085 		/* Compensate for the fact that in the instruction, the
11086 		   rotation is stored in multiples of 2 bits.  */
11087 		rotation *= 2;
11088 
11089 		/* Rotate "constant" right by "rotation" bits.  */
11090 		signed_addend = (constant >> rotation) |
11091 				(constant << (8 * sizeof (bfd_vma) - rotation));
11092 	      }
11093 
11094 	    /* Determine if the instruction is an ADD or a SUB.
11095 	       (For REL, this determines the sign of the addend.)  */
11096 	    negative = identify_add_or_sub (insn);
11097 	    if (negative == 0)
11098 	      {
11099 		(*_bfd_error_handler)
11100 		  (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11101 		  input_bfd, input_section,
11102 		  (long) rel->r_offset, howto->name);
11103 		return bfd_reloc_overflow;
11104 	      }
11105 
11106 	    signed_addend *= negative;
11107 	  }
11108 
11109 	/* Compute the value (X) to go in the place.  */
11110 	if (r_type == R_ARM_ALU_PC_G0_NC
11111 	    || r_type == R_ARM_ALU_PC_G1_NC
11112 	    || r_type == R_ARM_ALU_PC_G0
11113 	    || r_type == R_ARM_ALU_PC_G1
11114 	    || r_type == R_ARM_ALU_PC_G2)
11115 	  /* PC relative.  */
11116 	  signed_value = value - pc + signed_addend;
11117 	else
11118 	  /* Section base relative.  */
11119 	  signed_value = value - sb + signed_addend;
11120 
11121 	/* If the target symbol is a Thumb function, then set the
11122 	   Thumb bit in the address.  */
11123 	if (branch_type == ST_BRANCH_TO_THUMB)
11124 	  signed_value |= 1;
11125 
11126 	/* Calculate the value of the relevant G_n, in encoded
11127 	   constant-with-rotation format.  */
11128 	g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11129 					  group, &residual);
11130 
11131 	/* Check for overflow if required.  */
11132 	if ((r_type == R_ARM_ALU_PC_G0
11133 	     || r_type == R_ARM_ALU_PC_G1
11134 	     || r_type == R_ARM_ALU_PC_G2
11135 	     || r_type == R_ARM_ALU_SB_G0
11136 	     || r_type == R_ARM_ALU_SB_G1
11137 	     || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11138 	  {
11139 	    (*_bfd_error_handler)
11140 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11141 	      input_bfd, input_section,
11142 	       (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11143 	       howto->name);
11144 	    return bfd_reloc_overflow;
11145 	  }
11146 
11147 	/* Mask out the value and the ADD/SUB part of the opcode; take care
11148 	   not to destroy the S bit.  */
11149 	insn &= 0xff1ff000;
11150 
11151 	/* Set the opcode according to whether the value to go in the
11152 	   place is negative.  */
11153 	if (signed_value < 0)
11154 	  insn |= 1 << 22;
11155 	else
11156 	  insn |= 1 << 23;
11157 
11158 	/* Encode the offset.  */
11159 	insn |= g_n;
11160 
11161 	bfd_put_32 (input_bfd, insn, hit_data);
11162       }
11163       return bfd_reloc_ok;
11164 
11165     case R_ARM_LDR_PC_G0:
11166     case R_ARM_LDR_PC_G1:
11167     case R_ARM_LDR_PC_G2:
11168     case R_ARM_LDR_SB_G0:
11169     case R_ARM_LDR_SB_G1:
11170     case R_ARM_LDR_SB_G2:
11171       {
11172 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11173 	bfd_vma pc = input_section->output_section->vma
11174 		     + input_section->output_offset + rel->r_offset;
11175 	/* sb is the origin of the *segment* containing the symbol.  */
11176 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11177 	bfd_vma residual;
11178 	bfd_signed_vma signed_value;
11179 	int group = 0;
11180 
11181 	/* Determine which groups of bits to calculate.  */
11182 	switch (r_type)
11183 	  {
11184 	  case R_ARM_LDR_PC_G0:
11185 	  case R_ARM_LDR_SB_G0:
11186 	    group = 0;
11187 	    break;
11188 
11189 	  case R_ARM_LDR_PC_G1:
11190 	  case R_ARM_LDR_SB_G1:
11191 	    group = 1;
11192 	    break;
11193 
11194 	  case R_ARM_LDR_PC_G2:
11195 	  case R_ARM_LDR_SB_G2:
11196 	    group = 2;
11197 	    break;
11198 
11199 	  default:
11200 	    abort ();
11201 	  }
11202 
11203 	/* If REL, extract the addend from the insn.  If RELA, it will
11204 	   have already been fetched for us.  */
11205 	if (globals->use_rel)
11206 	  {
11207 	    int negative = (insn & (1 << 23)) ? 1 : -1;
11208 	    signed_addend = negative * (insn & 0xfff);
11209 	  }
11210 
11211 	/* Compute the value (X) to go in the place.  */
11212 	if (r_type == R_ARM_LDR_PC_G0
11213 	    || r_type == R_ARM_LDR_PC_G1
11214 	    || r_type == R_ARM_LDR_PC_G2)
11215 	  /* PC relative.  */
11216 	  signed_value = value - pc + signed_addend;
11217 	else
11218 	  /* Section base relative.  */
11219 	  signed_value = value - sb + signed_addend;
11220 
11221 	/* Calculate the value of the relevant G_{n-1} to obtain
11222 	   the residual at that stage.  */
11223 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11224 				    group - 1, &residual);
11225 
11226 	/* Check for overflow.  */
11227 	if (residual >= 0x1000)
11228 	  {
11229 	    (*_bfd_error_handler)
11230 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11231 	       input_bfd, input_section,
11232 	       (long) rel->r_offset, labs (signed_value), howto->name);
11233 	    return bfd_reloc_overflow;
11234 	  }
11235 
11236 	/* Mask out the value and U bit.  */
11237 	insn &= 0xff7ff000;
11238 
11239 	/* Set the U bit if the value to go in the place is non-negative.  */
11240 	if (signed_value >= 0)
11241 	  insn |= 1 << 23;
11242 
11243 	/* Encode the offset.  */
11244 	insn |= residual;
11245 
11246 	bfd_put_32 (input_bfd, insn, hit_data);
11247       }
11248       return bfd_reloc_ok;
11249 
11250     case R_ARM_LDRS_PC_G0:
11251     case R_ARM_LDRS_PC_G1:
11252     case R_ARM_LDRS_PC_G2:
11253     case R_ARM_LDRS_SB_G0:
11254     case R_ARM_LDRS_SB_G1:
11255     case R_ARM_LDRS_SB_G2:
11256       {
11257 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11258 	bfd_vma pc = input_section->output_section->vma
11259 		     + input_section->output_offset + rel->r_offset;
11260 	/* sb is the origin of the *segment* containing the symbol.  */
11261 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11262 	bfd_vma residual;
11263 	bfd_signed_vma signed_value;
11264 	int group = 0;
11265 
11266 	/* Determine which groups of bits to calculate.  */
11267 	switch (r_type)
11268 	  {
11269 	  case R_ARM_LDRS_PC_G0:
11270 	  case R_ARM_LDRS_SB_G0:
11271 	    group = 0;
11272 	    break;
11273 
11274 	  case R_ARM_LDRS_PC_G1:
11275 	  case R_ARM_LDRS_SB_G1:
11276 	    group = 1;
11277 	    break;
11278 
11279 	  case R_ARM_LDRS_PC_G2:
11280 	  case R_ARM_LDRS_SB_G2:
11281 	    group = 2;
11282 	    break;
11283 
11284 	  default:
11285 	    abort ();
11286 	  }
11287 
11288 	/* If REL, extract the addend from the insn.  If RELA, it will
11289 	   have already been fetched for us.  */
11290 	if (globals->use_rel)
11291 	  {
11292 	    int negative = (insn & (1 << 23)) ? 1 : -1;
11293 	    signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11294 	  }
11295 
11296 	/* Compute the value (X) to go in the place.  */
11297 	if (r_type == R_ARM_LDRS_PC_G0
11298 	    || r_type == R_ARM_LDRS_PC_G1
11299 	    || r_type == R_ARM_LDRS_PC_G2)
11300 	  /* PC relative.  */
11301 	  signed_value = value - pc + signed_addend;
11302 	else
11303 	  /* Section base relative.  */
11304 	  signed_value = value - sb + signed_addend;
11305 
11306 	/* Calculate the value of the relevant G_{n-1} to obtain
11307 	   the residual at that stage.  */
11308 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11309 				    group - 1, &residual);
11310 
11311 	/* Check for overflow.  */
11312 	if (residual >= 0x100)
11313 	  {
11314 	    (*_bfd_error_handler)
11315 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11316 	       input_bfd, input_section,
11317 	       (long) rel->r_offset, labs (signed_value), howto->name);
11318 	    return bfd_reloc_overflow;
11319 	  }
11320 
11321 	/* Mask out the value and U bit.  */
11322 	insn &= 0xff7ff0f0;
11323 
11324 	/* Set the U bit if the value to go in the place is non-negative.  */
11325 	if (signed_value >= 0)
11326 	  insn |= 1 << 23;
11327 
11328 	/* Encode the offset.  */
11329 	insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11330 
11331 	bfd_put_32 (input_bfd, insn, hit_data);
11332       }
11333       return bfd_reloc_ok;
11334 
11335     case R_ARM_LDC_PC_G0:
11336     case R_ARM_LDC_PC_G1:
11337     case R_ARM_LDC_PC_G2:
11338     case R_ARM_LDC_SB_G0:
11339     case R_ARM_LDC_SB_G1:
11340     case R_ARM_LDC_SB_G2:
11341       {
11342 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11343 	bfd_vma pc = input_section->output_section->vma
11344 		     + input_section->output_offset + rel->r_offset;
11345 	/* sb is the origin of the *segment* containing the symbol.  */
11346 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11347 	bfd_vma residual;
11348 	bfd_signed_vma signed_value;
11349 	int group = 0;
11350 
11351 	/* Determine which groups of bits to calculate.  */
11352 	switch (r_type)
11353 	  {
11354 	  case R_ARM_LDC_PC_G0:
11355 	  case R_ARM_LDC_SB_G0:
11356 	    group = 0;
11357 	    break;
11358 
11359 	  case R_ARM_LDC_PC_G1:
11360 	  case R_ARM_LDC_SB_G1:
11361 	    group = 1;
11362 	    break;
11363 
11364 	  case R_ARM_LDC_PC_G2:
11365 	  case R_ARM_LDC_SB_G2:
11366 	    group = 2;
11367 	    break;
11368 
11369 	  default:
11370 	    abort ();
11371 	  }
11372 
11373 	/* If REL, extract the addend from the insn.  If RELA, it will
11374 	   have already been fetched for us.  */
11375 	if (globals->use_rel)
11376 	  {
11377 	    int negative = (insn & (1 << 23)) ? 1 : -1;
11378 	    signed_addend = negative * ((insn & 0xff) << 2);
11379 	  }
11380 
11381 	/* Compute the value (X) to go in the place.  */
11382 	if (r_type == R_ARM_LDC_PC_G0
11383 	    || r_type == R_ARM_LDC_PC_G1
11384 	    || r_type == R_ARM_LDC_PC_G2)
11385 	  /* PC relative.  */
11386 	  signed_value = value - pc + signed_addend;
11387 	else
11388 	  /* Section base relative.  */
11389 	  signed_value = value - sb + signed_addend;
11390 
11391 	/* Calculate the value of the relevant G_{n-1} to obtain
11392 	   the residual at that stage.  */
11393 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11394 				    group - 1, &residual);
11395 
11396 	/* Check for overflow.  (The absolute value to go in the place must be
11397 	   divisible by four and, after having been divided by four, must
11398 	   fit in eight bits.)  */
11399 	if ((residual & 0x3) != 0 || residual >= 0x400)
11400 	  {
11401 	    (*_bfd_error_handler)
11402 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11403 	      input_bfd, input_section,
11404 	      (long) rel->r_offset, labs (signed_value), howto->name);
11405 	    return bfd_reloc_overflow;
11406 	  }
11407 
11408 	/* Mask out the value and U bit.  */
11409 	insn &= 0xff7fff00;
11410 
11411 	/* Set the U bit if the value to go in the place is non-negative.  */
11412 	if (signed_value >= 0)
11413 	  insn |= 1 << 23;
11414 
11415 	/* Encode the offset.  */
11416 	insn |= residual >> 2;
11417 
11418 	bfd_put_32 (input_bfd, insn, hit_data);
11419       }
11420       return bfd_reloc_ok;
11421 
11422     case R_ARM_THM_ALU_ABS_G0_NC:
11423     case R_ARM_THM_ALU_ABS_G1_NC:
11424     case R_ARM_THM_ALU_ABS_G2_NC:
11425     case R_ARM_THM_ALU_ABS_G3_NC:
11426 	{
11427 	    const int shift_array[4] = {0, 8, 16, 24};
11428 	    bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11429 	    bfd_vma addr = value;
11430 	    int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11431 
11432 	    /* Compute address.  */
11433 	    if (globals->use_rel)
11434 		signed_addend = insn & 0xff;
11435 	    addr += signed_addend;
11436 	    if (branch_type == ST_BRANCH_TO_THUMB)
11437 		addr |= 1;
11438 	    /* Clean imm8 insn.  */
11439 	    insn &= 0xff00;
11440 	    /* And update with correct part of address.  */
11441 	    insn |= (addr >> shift) & 0xff;
11442 	    /* Update insn.  */
11443 	    bfd_put_16 (input_bfd, insn, hit_data);
11444 	}
11445 
11446 	*unresolved_reloc_p = FALSE;
11447 	return bfd_reloc_ok;
11448 
11449     default:
11450       return bfd_reloc_notsupported;
11451     }
11452 }
11453 
11454 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS.  */
11455 static void
11456 arm_add_to_rel (bfd *              abfd,
11457 		bfd_byte *         address,
11458 		reloc_howto_type * howto,
11459 		bfd_signed_vma     increment)
11460 {
11461   bfd_signed_vma addend;
11462 
11463   if (howto->type == R_ARM_THM_CALL
11464       || howto->type == R_ARM_THM_JUMP24)
11465     {
11466       int upper_insn, lower_insn;
11467       int upper, lower;
11468 
11469       upper_insn = bfd_get_16 (abfd, address);
11470       lower_insn = bfd_get_16 (abfd, address + 2);
11471       upper = upper_insn & 0x7ff;
11472       lower = lower_insn & 0x7ff;
11473 
11474       addend = (upper << 12) | (lower << 1);
11475       addend += increment;
11476       addend >>= 1;
11477 
11478       upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11479       lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11480 
11481       bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11482       bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11483     }
11484   else
11485     {
11486       bfd_vma        contents;
11487 
11488       contents = bfd_get_32 (abfd, address);
11489 
11490       /* Get the (signed) value from the instruction.  */
11491       addend = contents & howto->src_mask;
11492       if (addend & ((howto->src_mask + 1) >> 1))
11493 	{
11494 	  bfd_signed_vma mask;
11495 
11496 	  mask = -1;
11497 	  mask &= ~ howto->src_mask;
11498 	  addend |= mask;
11499 	}
11500 
11501       /* Add in the increment, (which is a byte value).  */
11502       switch (howto->type)
11503 	{
11504 	default:
11505 	  addend += increment;
11506 	  break;
11507 
11508 	case R_ARM_PC24:
11509 	case R_ARM_PLT32:
11510 	case R_ARM_CALL:
11511 	case R_ARM_JUMP24:
11512 	  addend <<= howto->size;
11513 	  addend += increment;
11514 
11515 	  /* Should we check for overflow here ?  */
11516 
11517 	  /* Drop any undesired bits.  */
11518 	  addend >>= howto->rightshift;
11519 	  break;
11520 	}
11521 
11522       contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11523 
11524       bfd_put_32 (abfd, contents, address);
11525     }
11526 }
11527 
11528 #define IS_ARM_TLS_RELOC(R_TYPE)	\
11529   ((R_TYPE) == R_ARM_TLS_GD32		\
11530    || (R_TYPE) == R_ARM_TLS_LDO32	\
11531    || (R_TYPE) == R_ARM_TLS_LDM32	\
11532    || (R_TYPE) == R_ARM_TLS_DTPOFF32	\
11533    || (R_TYPE) == R_ARM_TLS_DTPMOD32	\
11534    || (R_TYPE) == R_ARM_TLS_TPOFF32	\
11535    || (R_TYPE) == R_ARM_TLS_LE32	\
11536    || (R_TYPE) == R_ARM_TLS_IE32	\
11537    || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11538 
11539 /* Specific set of relocations for the gnu tls dialect.  */
11540 #define IS_ARM_TLS_GNU_RELOC(R_TYPE)	\
11541   ((R_TYPE) == R_ARM_TLS_GOTDESC	\
11542    || (R_TYPE) == R_ARM_TLS_CALL	\
11543    || (R_TYPE) == R_ARM_THM_TLS_CALL	\
11544    || (R_TYPE) == R_ARM_TLS_DESCSEQ	\
11545    || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11546 
11547 /* Relocate an ARM ELF section.  */
11548 
11549 static bfd_boolean
11550 elf32_arm_relocate_section (bfd *                  output_bfd,
11551 			    struct bfd_link_info * info,
11552 			    bfd *                  input_bfd,
11553 			    asection *             input_section,
11554 			    bfd_byte *             contents,
11555 			    Elf_Internal_Rela *    relocs,
11556 			    Elf_Internal_Sym *     local_syms,
11557 			    asection **            local_sections)
11558 {
11559   Elf_Internal_Shdr *symtab_hdr;
11560   struct elf_link_hash_entry **sym_hashes;
11561   Elf_Internal_Rela *rel;
11562   Elf_Internal_Rela *relend;
11563   const char *name;
11564   struct elf32_arm_link_hash_table * globals;
11565 
11566   globals = elf32_arm_hash_table (info);
11567   if (globals == NULL)
11568     return FALSE;
11569 
11570   symtab_hdr = & elf_symtab_hdr (input_bfd);
11571   sym_hashes = elf_sym_hashes (input_bfd);
11572 
11573   rel = relocs;
11574   relend = relocs + input_section->reloc_count;
11575   for (; rel < relend; rel++)
11576     {
11577       int                          r_type;
11578       reloc_howto_type *           howto;
11579       unsigned long                r_symndx;
11580       Elf_Internal_Sym *           sym;
11581       asection *                   sec;
11582       struct elf_link_hash_entry * h;
11583       bfd_vma                      relocation;
11584       bfd_reloc_status_type        r;
11585       arelent                      bfd_reloc;
11586       char                         sym_type;
11587       bfd_boolean                  unresolved_reloc = FALSE;
11588       char *error_message = NULL;
11589 
11590       r_symndx = ELF32_R_SYM (rel->r_info);
11591       r_type   = ELF32_R_TYPE (rel->r_info);
11592       r_type   = arm_real_reloc_type (globals, r_type);
11593 
11594       if (   r_type == R_ARM_GNU_VTENTRY
11595 	  || r_type == R_ARM_GNU_VTINHERIT)
11596 	continue;
11597 
11598       bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11599       howto = bfd_reloc.howto;
11600 
11601       h = NULL;
11602       sym = NULL;
11603       sec = NULL;
11604 
11605       if (r_symndx < symtab_hdr->sh_info)
11606 	{
11607 	  sym = local_syms + r_symndx;
11608 	  sym_type = ELF32_ST_TYPE (sym->st_info);
11609 	  sec = local_sections[r_symndx];
11610 
11611 	  /* An object file might have a reference to a local
11612 	     undefined symbol.  This is a daft object file, but we
11613 	     should at least do something about it.  V4BX & NONE
11614 	     relocations do not use the symbol and are explicitly
11615 	     allowed to use the undefined symbol, so allow those.
11616 	     Likewise for relocations against STN_UNDEF.  */
11617 	  if (r_type != R_ARM_V4BX
11618 	      && r_type != R_ARM_NONE
11619 	      && r_symndx != STN_UNDEF
11620 	      && bfd_is_und_section (sec)
11621 	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11622 	    (*info->callbacks->undefined_symbol)
11623 	      (info, bfd_elf_string_from_elf_section
11624 	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
11625 	       input_bfd, input_section,
11626 	       rel->r_offset, TRUE);
11627 
11628 	  if (globals->use_rel)
11629 	    {
11630 	      relocation = (sec->output_section->vma
11631 			    + sec->output_offset
11632 			    + sym->st_value);
11633 	      if (!bfd_link_relocatable (info)
11634 		  && (sec->flags & SEC_MERGE)
11635 		  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11636 		{
11637 		  asection *msec;
11638 		  bfd_vma addend, value;
11639 
11640 		  switch (r_type)
11641 		    {
11642 		    case R_ARM_MOVW_ABS_NC:
11643 		    case R_ARM_MOVT_ABS:
11644 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11645 		      addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11646 		      addend = (addend ^ 0x8000) - 0x8000;
11647 		      break;
11648 
11649 		    case R_ARM_THM_MOVW_ABS_NC:
11650 		    case R_ARM_THM_MOVT_ABS:
11651 		      value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11652 			      << 16;
11653 		      value |= bfd_get_16 (input_bfd,
11654 					   contents + rel->r_offset + 2);
11655 		      addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11656 			       | ((value & 0x04000000) >> 15);
11657 		      addend = (addend ^ 0x8000) - 0x8000;
11658 		      break;
11659 
11660 		    default:
11661 		      if (howto->rightshift
11662 			  || (howto->src_mask & (howto->src_mask + 1)))
11663 			{
11664 			  (*_bfd_error_handler)
11665 			    (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11666 			     input_bfd, input_section,
11667 			     (long) rel->r_offset, howto->name);
11668 			  return FALSE;
11669 			}
11670 
11671 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11672 
11673 		      /* Get the (signed) value from the instruction.  */
11674 		      addend = value & howto->src_mask;
11675 		      if (addend & ((howto->src_mask + 1) >> 1))
11676 			{
11677 			  bfd_signed_vma mask;
11678 
11679 			  mask = -1;
11680 			  mask &= ~ howto->src_mask;
11681 			  addend |= mask;
11682 			}
11683 		      break;
11684 		    }
11685 
11686 		  msec = sec;
11687 		  addend =
11688 		    _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11689 		    - relocation;
11690 		  addend += msec->output_section->vma + msec->output_offset;
11691 
11692 		  /* Cases here must match those in the preceding
11693 		     switch statement.  */
11694 		  switch (r_type)
11695 		    {
11696 		    case R_ARM_MOVW_ABS_NC:
11697 		    case R_ARM_MOVT_ABS:
11698 		      value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11699 			      | (addend & 0xfff);
11700 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11701 		      break;
11702 
11703 		    case R_ARM_THM_MOVW_ABS_NC:
11704 		    case R_ARM_THM_MOVT_ABS:
11705 		      value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11706 			      | (addend & 0xff) | ((addend & 0x0800) << 15);
11707 		      bfd_put_16 (input_bfd, value >> 16,
11708 				  contents + rel->r_offset);
11709 		      bfd_put_16 (input_bfd, value,
11710 				  contents + rel->r_offset + 2);
11711 		      break;
11712 
11713 		    default:
11714 		      value = (value & ~ howto->dst_mask)
11715 			      | (addend & howto->dst_mask);
11716 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11717 		      break;
11718 		    }
11719 		}
11720 	    }
11721 	  else
11722 	    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11723 	}
11724       else
11725 	{
11726 	  bfd_boolean warned, ignored;
11727 
11728 	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11729 				   r_symndx, symtab_hdr, sym_hashes,
11730 				   h, sec, relocation,
11731 				   unresolved_reloc, warned, ignored);
11732 
11733 	  sym_type = h->type;
11734 	}
11735 
11736       if (sec != NULL && discarded_section (sec))
11737 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11738 					 rel, 1, relend, howto, 0, contents);
11739 
11740       if (bfd_link_relocatable (info))
11741 	{
11742 	  /* This is a relocatable link.  We don't have to change
11743 	     anything, unless the reloc is against a section symbol,
11744 	     in which case we have to adjust according to where the
11745 	     section symbol winds up in the output section.  */
11746 	  if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11747 	    {
11748 	      if (globals->use_rel)
11749 		arm_add_to_rel (input_bfd, contents + rel->r_offset,
11750 				howto, (bfd_signed_vma) sec->output_offset);
11751 	      else
11752 		rel->r_addend += sec->output_offset;
11753 	    }
11754 	  continue;
11755 	}
11756 
11757       if (h != NULL)
11758 	name = h->root.root.string;
11759       else
11760 	{
11761 	  name = (bfd_elf_string_from_elf_section
11762 		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
11763 	  if (name == NULL || *name == '\0')
11764 	    name = bfd_section_name (input_bfd, sec);
11765 	}
11766 
11767       if (r_symndx != STN_UNDEF
11768 	  && r_type != R_ARM_NONE
11769 	  && (h == NULL
11770 	      || h->root.type == bfd_link_hash_defined
11771 	      || h->root.type == bfd_link_hash_defweak)
11772 	  && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11773 	{
11774 	  (*_bfd_error_handler)
11775 	    ((sym_type == STT_TLS
11776 	      ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11777 	      : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11778 	     input_bfd,
11779 	     input_section,
11780 	     (long) rel->r_offset,
11781 	     howto->name,
11782 	     name);
11783 	}
11784 
11785       /* We call elf32_arm_final_link_relocate unless we're completely
11786 	 done, i.e., the relaxation produced the final output we want,
11787 	 and we won't let anybody mess with it. Also, we have to do
11788 	 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11789 	 both in relaxed and non-relaxed cases.  */
11790       if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11791 	  || (IS_ARM_TLS_GNU_RELOC (r_type)
11792 	      && !((h ? elf32_arm_hash_entry (h)->tls_type :
11793 		    elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11794 		   & GOT_TLS_GDESC)))
11795 	{
11796 	  r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11797 				   contents, rel, h == NULL);
11798 	  /* This may have been marked unresolved because it came from
11799 	     a shared library.  But we've just dealt with that.  */
11800 	  unresolved_reloc = 0;
11801 	}
11802       else
11803 	r = bfd_reloc_continue;
11804 
11805       if (r == bfd_reloc_continue)
11806 	{
11807 	  unsigned char branch_type =
11808 	    h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11809 	      : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11810 
11811 	  r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11812 					     input_section, contents, rel,
11813 					     relocation, info, sec, name,
11814 					     sym_type, branch_type, h,
11815 					     &unresolved_reloc,
11816 					     &error_message);
11817 	}
11818 
11819       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11820 	 because such sections are not SEC_ALLOC and thus ld.so will
11821 	 not process them.  */
11822       if (unresolved_reloc
11823 	  && !((input_section->flags & SEC_DEBUGGING) != 0
11824 	       && h->def_dynamic)
11825 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
11826 				      rel->r_offset) != (bfd_vma) -1)
11827 	{
11828 	  (*_bfd_error_handler)
11829 	    (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11830 	     input_bfd,
11831 	     input_section,
11832 	     (long) rel->r_offset,
11833 	     howto->name,
11834 	     h->root.root.string);
11835 	  return FALSE;
11836 	}
11837 
11838       if (r != bfd_reloc_ok)
11839 	{
11840 	  switch (r)
11841 	    {
11842 	    case bfd_reloc_overflow:
11843 	      /* If the overflowing reloc was to an undefined symbol,
11844 		 we have already printed one error message and there
11845 		 is no point complaining again.  */
11846 	      if (!h || h->root.type != bfd_link_hash_undefined)
11847 		(*info->callbacks->reloc_overflow)
11848 		  (info, (h ? &h->root : NULL), name, howto->name,
11849 		   (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
11850 	      break;
11851 
11852 	    case bfd_reloc_undefined:
11853 	      (*info->callbacks->undefined_symbol)
11854 		(info, name, input_bfd, input_section, rel->r_offset, TRUE);
11855 	      break;
11856 
11857 	    case bfd_reloc_outofrange:
11858 	      error_message = _("out of range");
11859 	      goto common_error;
11860 
11861 	    case bfd_reloc_notsupported:
11862 	      error_message = _("unsupported relocation");
11863 	      goto common_error;
11864 
11865 	    case bfd_reloc_dangerous:
11866 	      /* error_message should already be set.  */
11867 	      goto common_error;
11868 
11869 	    default:
11870 	      error_message = _("unknown error");
11871 	      /* Fall through.  */
11872 
11873 	    common_error:
11874 	      BFD_ASSERT (error_message != NULL);
11875 	      (*info->callbacks->reloc_dangerous)
11876 		(info, error_message, input_bfd, input_section, rel->r_offset);
11877 	      break;
11878 	    }
11879 	}
11880     }
11881 
11882   return TRUE;
11883 }
11884 
11885 /* Add a new unwind edit to the list described by HEAD, TAIL.  If TINDEX is zero,
11886    adds the edit to the start of the list.  (The list must be built in order of
11887    ascending TINDEX: the function's callers are primarily responsible for
11888    maintaining that condition).  */
11889 
11890 static void
11891 add_unwind_table_edit (arm_unwind_table_edit **head,
11892 		       arm_unwind_table_edit **tail,
11893 		       arm_unwind_edit_type type,
11894 		       asection *linked_section,
11895 		       unsigned int tindex)
11896 {
11897   arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11898       xmalloc (sizeof (arm_unwind_table_edit));
11899 
11900   new_edit->type = type;
11901   new_edit->linked_section = linked_section;
11902   new_edit->index = tindex;
11903 
11904   if (tindex > 0)
11905     {
11906       new_edit->next = NULL;
11907 
11908       if (*tail)
11909 	(*tail)->next = new_edit;
11910 
11911       (*tail) = new_edit;
11912 
11913       if (!*head)
11914 	(*head) = new_edit;
11915     }
11916   else
11917     {
11918       new_edit->next = *head;
11919 
11920       if (!*tail)
11921 	*tail = new_edit;
11922 
11923       *head = new_edit;
11924     }
11925 }
11926 
11927 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11928 
11929 /* Increase the size of EXIDX_SEC by ADJUST bytes.  ADJUST mau be negative.  */
11930 static void
11931 adjust_exidx_size(asection *exidx_sec, int adjust)
11932 {
11933   asection *out_sec;
11934 
11935   if (!exidx_sec->rawsize)
11936     exidx_sec->rawsize = exidx_sec->size;
11937 
11938   bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11939   out_sec = exidx_sec->output_section;
11940   /* Adjust size of output section.  */
11941   bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11942 }
11943 
11944 /* Insert an EXIDX_CANTUNWIND marker at the end of a section.  */
11945 static void
11946 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11947 {
11948   struct _arm_elf_section_data *exidx_arm_data;
11949 
11950   exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11951   add_unwind_table_edit (
11952     &exidx_arm_data->u.exidx.unwind_edit_list,
11953     &exidx_arm_data->u.exidx.unwind_edit_tail,
11954     INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11955 
11956   exidx_arm_data->additional_reloc_count++;
11957 
11958   adjust_exidx_size(exidx_sec, 8);
11959 }
11960 
11961 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11962    made to those tables, such that:
11963 
11964      1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11965      2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11966 	codes which have been inlined into the index).
11967 
11968    If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11969 
11970    The edits are applied when the tables are written
11971    (in elf32_arm_write_section).  */
11972 
11973 bfd_boolean
11974 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11975 			      unsigned int num_text_sections,
11976 			      struct bfd_link_info *info,
11977 			      bfd_boolean merge_exidx_entries)
11978 {
11979   bfd *inp;
11980   unsigned int last_second_word = 0, i;
11981   asection *last_exidx_sec = NULL;
11982   asection *last_text_sec = NULL;
11983   int last_unwind_type = -1;
11984 
11985   /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11986      text sections.  */
11987   for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11988     {
11989       asection *sec;
11990 
11991       for (sec = inp->sections; sec != NULL; sec = sec->next)
11992 	{
11993 	  struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11994 	  Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11995 
11996 	  if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11997 	    continue;
11998 
11999 	  if (elf_sec->linked_to)
12000 	    {
12001 	      Elf_Internal_Shdr *linked_hdr
12002 		= &elf_section_data (elf_sec->linked_to)->this_hdr;
12003 	      struct _arm_elf_section_data *linked_sec_arm_data
12004 		= get_arm_elf_section_data (linked_hdr->bfd_section);
12005 
12006 	      if (linked_sec_arm_data == NULL)
12007 		continue;
12008 
12009 	      /* Link this .ARM.exidx section back from the text section it
12010 		 describes.  */
12011 	      linked_sec_arm_data->u.text.arm_exidx_sec = sec;
12012 	    }
12013 	}
12014     }
12015 
12016   /* Walk all text sections in order of increasing VMA.  Eilminate duplicate
12017      index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
12018      and add EXIDX_CANTUNWIND entries for sections with no unwind table data.  */
12019 
12020   for (i = 0; i < num_text_sections; i++)
12021     {
12022       asection *sec = text_section_order[i];
12023       asection *exidx_sec;
12024       struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
12025       struct _arm_elf_section_data *exidx_arm_data;
12026       bfd_byte *contents = NULL;
12027       int deleted_exidx_bytes = 0;
12028       bfd_vma j;
12029       arm_unwind_table_edit *unwind_edit_head = NULL;
12030       arm_unwind_table_edit *unwind_edit_tail = NULL;
12031       Elf_Internal_Shdr *hdr;
12032       bfd *ibfd;
12033 
12034       if (arm_data == NULL)
12035 	continue;
12036 
12037       exidx_sec = arm_data->u.text.arm_exidx_sec;
12038       if (exidx_sec == NULL)
12039 	{
12040 	  /* Section has no unwind data.  */
12041 	  if (last_unwind_type == 0 || !last_exidx_sec)
12042 	    continue;
12043 
12044 	  /* Ignore zero sized sections.  */
12045 	  if (sec->size == 0)
12046 	    continue;
12047 
12048 	  insert_cantunwind_after(last_text_sec, last_exidx_sec);
12049 	  last_unwind_type = 0;
12050 	  continue;
12051 	}
12052 
12053       /* Skip /DISCARD/ sections.  */
12054       if (bfd_is_abs_section (exidx_sec->output_section))
12055 	continue;
12056 
12057       hdr = &elf_section_data (exidx_sec)->this_hdr;
12058       if (hdr->sh_type != SHT_ARM_EXIDX)
12059 	continue;
12060 
12061       exidx_arm_data = get_arm_elf_section_data (exidx_sec);
12062       if (exidx_arm_data == NULL)
12063 	continue;
12064 
12065       ibfd = exidx_sec->owner;
12066 
12067       if (hdr->contents != NULL)
12068 	contents = hdr->contents;
12069       else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
12070 	/* An error?  */
12071 	continue;
12072 
12073       if (last_unwind_type > 0)
12074 	{
12075 	  unsigned int first_word = bfd_get_32 (ibfd, contents);
12076 	  /* Add cantunwind if first unwind item does not match section
12077 	     start.  */
12078 	  if (first_word != sec->vma)
12079 	    {
12080 	      insert_cantunwind_after (last_text_sec, last_exidx_sec);
12081 	      last_unwind_type = 0;
12082 	    }
12083 	}
12084 
12085       for (j = 0; j < hdr->sh_size; j += 8)
12086 	{
12087 	  unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
12088 	  int unwind_type;
12089 	  int elide = 0;
12090 
12091 	  /* An EXIDX_CANTUNWIND entry.  */
12092 	  if (second_word == 1)
12093 	    {
12094 	      if (last_unwind_type == 0)
12095 		elide = 1;
12096 	      unwind_type = 0;
12097 	    }
12098 	  /* Inlined unwinding data.  Merge if equal to previous.  */
12099 	  else if ((second_word & 0x80000000) != 0)
12100 	    {
12101 	      if (merge_exidx_entries
12102 		   && last_second_word == second_word && last_unwind_type == 1)
12103 		elide = 1;
12104 	      unwind_type = 1;
12105 	      last_second_word = second_word;
12106 	    }
12107 	  /* Normal table entry.  In theory we could merge these too,
12108 	     but duplicate entries are likely to be much less common.  */
12109 	  else
12110 	    unwind_type = 2;
12111 
12112 	  if (elide && !bfd_link_relocatable (info))
12113 	    {
12114 	      add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12115 				     DELETE_EXIDX_ENTRY, NULL, j / 8);
12116 
12117 	      deleted_exidx_bytes += 8;
12118 	    }
12119 
12120 	  last_unwind_type = unwind_type;
12121 	}
12122 
12123       /* Free contents if we allocated it ourselves.  */
12124       if (contents != hdr->contents)
12125 	free (contents);
12126 
12127       /* Record edits to be applied later (in elf32_arm_write_section).  */
12128       exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12129       exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12130 
12131       if (deleted_exidx_bytes > 0)
12132 	adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12133 
12134       last_exidx_sec = exidx_sec;
12135       last_text_sec = sec;
12136     }
12137 
12138   /* Add terminating CANTUNWIND entry.  */
12139   if (!bfd_link_relocatable (info) && last_exidx_sec
12140       && last_unwind_type != 0)
12141     insert_cantunwind_after(last_text_sec, last_exidx_sec);
12142 
12143   return TRUE;
12144 }
12145 
12146 static bfd_boolean
12147 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12148 			       bfd *ibfd, const char *name)
12149 {
12150   asection *sec, *osec;
12151 
12152   sec = bfd_get_linker_section (ibfd, name);
12153   if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12154     return TRUE;
12155 
12156   osec = sec->output_section;
12157   if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12158     return TRUE;
12159 
12160   if (! bfd_set_section_contents (obfd, osec, sec->contents,
12161 				  sec->output_offset, sec->size))
12162     return FALSE;
12163 
12164   return TRUE;
12165 }
12166 
12167 static bfd_boolean
12168 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12169 {
12170   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12171   asection *sec, *osec;
12172 
12173   if (globals == NULL)
12174     return FALSE;
12175 
12176   /* Invoke the regular ELF backend linker to do all the work.  */
12177   if (!bfd_elf_final_link (abfd, info))
12178     return FALSE;
12179 
12180   /* Process stub sections (eg BE8 encoding, ...).  */
12181   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12182   unsigned int i;
12183   for (i=0; i<htab->top_id; i++)
12184     {
12185       sec = htab->stub_group[i].stub_sec;
12186       /* Only process it once, in its link_sec slot.  */
12187       if (sec && i == htab->stub_group[i].link_sec->id)
12188 	{
12189 	  osec = sec->output_section;
12190 	  elf32_arm_write_section (abfd, info, sec, sec->contents);
12191 	  if (! bfd_set_section_contents (abfd, osec, sec->contents,
12192 					  sec->output_offset, sec->size))
12193 	    return FALSE;
12194 	}
12195     }
12196 
12197   /* Write out any glue sections now that we have created all the
12198      stubs.  */
12199   if (globals->bfd_of_glue_owner != NULL)
12200     {
12201       if (! elf32_arm_output_glue_section (info, abfd,
12202 					   globals->bfd_of_glue_owner,
12203 					   ARM2THUMB_GLUE_SECTION_NAME))
12204 	return FALSE;
12205 
12206       if (! elf32_arm_output_glue_section (info, abfd,
12207 					   globals->bfd_of_glue_owner,
12208 					   THUMB2ARM_GLUE_SECTION_NAME))
12209 	return FALSE;
12210 
12211       if (! elf32_arm_output_glue_section (info, abfd,
12212 					   globals->bfd_of_glue_owner,
12213 					   VFP11_ERRATUM_VENEER_SECTION_NAME))
12214 	return FALSE;
12215 
12216       if (! elf32_arm_output_glue_section (info, abfd,
12217 					   globals->bfd_of_glue_owner,
12218 					   STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12219 	return FALSE;
12220 
12221       if (! elf32_arm_output_glue_section (info, abfd,
12222 					   globals->bfd_of_glue_owner,
12223 					   ARM_BX_GLUE_SECTION_NAME))
12224 	return FALSE;
12225     }
12226 
12227   return TRUE;
12228 }
12229 
12230 /* Return a best guess for the machine number based on the attributes.  */
12231 
12232 static unsigned int
12233 bfd_arm_get_mach_from_attributes (bfd * abfd)
12234 {
12235   int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12236 
12237   switch (arch)
12238     {
12239     case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12240     case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12241     case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12242 
12243     case TAG_CPU_ARCH_V5TE:
12244       {
12245 	char * name;
12246 
12247 	BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12248 	name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12249 
12250 	if (name)
12251 	  {
12252 	    if (strcmp (name, "IWMMXT2") == 0)
12253 	      return bfd_mach_arm_iWMMXt2;
12254 
12255 	    if (strcmp (name, "IWMMXT") == 0)
12256 	      return bfd_mach_arm_iWMMXt;
12257 
12258 	    if (strcmp (name, "XSCALE") == 0)
12259 	      {
12260 		int wmmx;
12261 
12262 		BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12263 		wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12264 		switch (wmmx)
12265 		  {
12266 		  case 1: return bfd_mach_arm_iWMMXt;
12267 		  case 2: return bfd_mach_arm_iWMMXt2;
12268 		  default: return bfd_mach_arm_XScale;
12269 		  }
12270 	      }
12271 	  }
12272 
12273 	return bfd_mach_arm_5TE;
12274       }
12275 
12276     default:
12277       return bfd_mach_arm_unknown;
12278     }
12279 }
12280 
12281 /* Set the right machine number.  */
12282 
12283 static bfd_boolean
12284 elf32_arm_object_p (bfd *abfd)
12285 {
12286   unsigned int mach;
12287 
12288   mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12289 
12290   if (mach == bfd_mach_arm_unknown)
12291     {
12292       if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12293 	mach = bfd_mach_arm_ep9312;
12294       else
12295 	mach = bfd_arm_get_mach_from_attributes (abfd);
12296     }
12297 
12298   bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12299   return TRUE;
12300 }
12301 
12302 /* Function to keep ARM specific flags in the ELF header.  */
12303 
12304 static bfd_boolean
12305 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12306 {
12307   if (elf_flags_init (abfd)
12308       && elf_elfheader (abfd)->e_flags != flags)
12309     {
12310       if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12311 	{
12312 	  if (flags & EF_ARM_INTERWORK)
12313 	    (*_bfd_error_handler)
12314 	      (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12315 	       abfd);
12316 	  else
12317 	    _bfd_error_handler
12318 	      (_("Warning: Clearing the interworking flag of %B due to outside request"),
12319 	       abfd);
12320 	}
12321     }
12322   else
12323     {
12324       elf_elfheader (abfd)->e_flags = flags;
12325       elf_flags_init (abfd) = TRUE;
12326     }
12327 
12328   return TRUE;
12329 }
12330 
12331 /* Copy backend specific data from one object module to another.  */
12332 
12333 static bfd_boolean
12334 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12335 {
12336   flagword in_flags;
12337   flagword out_flags;
12338 
12339   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12340     return TRUE;
12341 
12342   in_flags  = elf_elfheader (ibfd)->e_flags;
12343   out_flags = elf_elfheader (obfd)->e_flags;
12344 
12345   if (elf_flags_init (obfd)
12346       && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12347       && in_flags != out_flags)
12348     {
12349       /* Cannot mix APCS26 and APCS32 code.  */
12350       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12351 	return FALSE;
12352 
12353       /* Cannot mix float APCS and non-float APCS code.  */
12354       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12355 	return FALSE;
12356 
12357       /* If the src and dest have different interworking flags
12358 	 then turn off the interworking bit.  */
12359       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12360 	{
12361 	  if (out_flags & EF_ARM_INTERWORK)
12362 	    _bfd_error_handler
12363 	      (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12364 	       obfd, ibfd);
12365 
12366 	  in_flags &= ~EF_ARM_INTERWORK;
12367 	}
12368 
12369       /* Likewise for PIC, though don't warn for this case.  */
12370       if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12371 	in_flags &= ~EF_ARM_PIC;
12372     }
12373 
12374   elf_elfheader (obfd)->e_flags = in_flags;
12375   elf_flags_init (obfd) = TRUE;
12376 
12377   return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12378 }
12379 
12380 /* Values for Tag_ABI_PCS_R9_use.  */
12381 enum
12382 {
12383   AEABI_R9_V6,
12384   AEABI_R9_SB,
12385   AEABI_R9_TLS,
12386   AEABI_R9_unused
12387 };
12388 
12389 /* Values for Tag_ABI_PCS_RW_data.  */
12390 enum
12391 {
12392   AEABI_PCS_RW_data_absolute,
12393   AEABI_PCS_RW_data_PCrel,
12394   AEABI_PCS_RW_data_SBrel,
12395   AEABI_PCS_RW_data_unused
12396 };
12397 
12398 /* Values for Tag_ABI_enum_size.  */
12399 enum
12400 {
12401   AEABI_enum_unused,
12402   AEABI_enum_short,
12403   AEABI_enum_wide,
12404   AEABI_enum_forced_wide
12405 };
12406 
12407 /* Determine whether an object attribute tag takes an integer, a
12408    string or both.  */
12409 
12410 static int
12411 elf32_arm_obj_attrs_arg_type (int tag)
12412 {
12413   if (tag == Tag_compatibility)
12414     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12415   else if (tag == Tag_nodefaults)
12416     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12417   else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12418     return ATTR_TYPE_FLAG_STR_VAL;
12419   else if (tag < 32)
12420     return ATTR_TYPE_FLAG_INT_VAL;
12421   else
12422     return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12423 }
12424 
12425 /* The ABI defines that Tag_conformance should be emitted first, and that
12426    Tag_nodefaults should be second (if either is defined).  This sets those
12427    two positions, and bumps up the position of all the remaining tags to
12428    compensate.  */
12429 static int
12430 elf32_arm_obj_attrs_order (int num)
12431 {
12432   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12433     return Tag_conformance;
12434   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12435     return Tag_nodefaults;
12436   if ((num - 2) < Tag_nodefaults)
12437     return num - 2;
12438   if ((num - 1) < Tag_conformance)
12439     return num - 1;
12440   return num;
12441 }
12442 
12443 /* Attribute numbers >=64 (mod 128) can be safely ignored.  */
12444 static bfd_boolean
12445 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12446 {
12447   if ((tag & 127) < 64)
12448     {
12449       _bfd_error_handler
12450 	(_("%B: Unknown mandatory EABI object attribute %d"),
12451 	 abfd, tag);
12452       bfd_set_error (bfd_error_bad_value);
12453       return FALSE;
12454     }
12455   else
12456     {
12457       _bfd_error_handler
12458 	(_("Warning: %B: Unknown EABI object attribute %d"),
12459 	 abfd, tag);
12460       return TRUE;
12461     }
12462 }
12463 
12464 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12465    Returns -1 if no architecture could be read.  */
12466 
12467 static int
12468 get_secondary_compatible_arch (bfd *abfd)
12469 {
12470   obj_attribute *attr =
12471     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12472 
12473   /* Note: the tag and its argument below are uleb128 values, though
12474      currently-defined values fit in one byte for each.  */
12475   if (attr->s
12476       && attr->s[0] == Tag_CPU_arch
12477       && (attr->s[1] & 128) != 128
12478       && attr->s[2] == 0)
12479    return attr->s[1];
12480 
12481   /* This tag is "safely ignorable", so don't complain if it looks funny.  */
12482   return -1;
12483 }
12484 
12485 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12486    The tag is removed if ARCH is -1.  */
12487 
12488 static void
12489 set_secondary_compatible_arch (bfd *abfd, int arch)
12490 {
12491   obj_attribute *attr =
12492     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12493 
12494   if (arch == -1)
12495     {
12496       attr->s = NULL;
12497       return;
12498     }
12499 
12500   /* Note: the tag and its argument below are uleb128 values, though
12501      currently-defined values fit in one byte for each.  */
12502   if (!attr->s)
12503     attr->s = (char *) bfd_alloc (abfd, 3);
12504   attr->s[0] = Tag_CPU_arch;
12505   attr->s[1] = arch;
12506   attr->s[2] = '\0';
12507 }
12508 
12509 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12510    into account.  */
12511 
12512 static int
12513 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12514 		      int newtag, int secondary_compat)
12515 {
12516 #define T(X) TAG_CPU_ARCH_##X
12517   int tagl, tagh, result;
12518   const int v6t2[] =
12519     {
12520       T(V6T2),   /* PRE_V4.  */
12521       T(V6T2),   /* V4.  */
12522       T(V6T2),   /* V4T.  */
12523       T(V6T2),   /* V5T.  */
12524       T(V6T2),   /* V5TE.  */
12525       T(V6T2),   /* V5TEJ.  */
12526       T(V6T2),   /* V6.  */
12527       T(V7),     /* V6KZ.  */
12528       T(V6T2)    /* V6T2.  */
12529     };
12530   const int v6k[] =
12531     {
12532       T(V6K),    /* PRE_V4.  */
12533       T(V6K),    /* V4.  */
12534       T(V6K),    /* V4T.  */
12535       T(V6K),    /* V5T.  */
12536       T(V6K),    /* V5TE.  */
12537       T(V6K),    /* V5TEJ.  */
12538       T(V6K),    /* V6.  */
12539       T(V6KZ),   /* V6KZ.  */
12540       T(V7),     /* V6T2.  */
12541       T(V6K)     /* V6K.  */
12542     };
12543   const int v7[] =
12544     {
12545       T(V7),     /* PRE_V4.  */
12546       T(V7),     /* V4.  */
12547       T(V7),     /* V4T.  */
12548       T(V7),     /* V5T.  */
12549       T(V7),     /* V5TE.  */
12550       T(V7),     /* V5TEJ.  */
12551       T(V7),     /* V6.  */
12552       T(V7),     /* V6KZ.  */
12553       T(V7),     /* V6T2.  */
12554       T(V7),     /* V6K.  */
12555       T(V7)      /* V7.  */
12556     };
12557   const int v6_m[] =
12558     {
12559       -1,        /* PRE_V4.  */
12560       -1,        /* V4.  */
12561       T(V6K),    /* V4T.  */
12562       T(V6K),    /* V5T.  */
12563       T(V6K),    /* V5TE.  */
12564       T(V6K),    /* V5TEJ.  */
12565       T(V6K),    /* V6.  */
12566       T(V6KZ),   /* V6KZ.  */
12567       T(V7),     /* V6T2.  */
12568       T(V6K),    /* V6K.  */
12569       T(V7),     /* V7.  */
12570       T(V6_M)    /* V6_M.  */
12571     };
12572   const int v6s_m[] =
12573     {
12574       -1,        /* PRE_V4.  */
12575       -1,        /* V4.  */
12576       T(V6K),    /* V4T.  */
12577       T(V6K),    /* V5T.  */
12578       T(V6K),    /* V5TE.  */
12579       T(V6K),    /* V5TEJ.  */
12580       T(V6K),    /* V6.  */
12581       T(V6KZ),   /* V6KZ.  */
12582       T(V7),     /* V6T2.  */
12583       T(V6K),    /* V6K.  */
12584       T(V7),     /* V7.  */
12585       T(V6S_M),  /* V6_M.  */
12586       T(V6S_M)   /* V6S_M.  */
12587     };
12588   const int v7e_m[] =
12589     {
12590       -1,        /* PRE_V4.  */
12591       -1,        /* V4.  */
12592       T(V7E_M),  /* V4T.  */
12593       T(V7E_M),  /* V5T.  */
12594       T(V7E_M),  /* V5TE.  */
12595       T(V7E_M),  /* V5TEJ.  */
12596       T(V7E_M),  /* V6.  */
12597       T(V7E_M),  /* V6KZ.  */
12598       T(V7E_M),  /* V6T2.  */
12599       T(V7E_M),  /* V6K.  */
12600       T(V7E_M),  /* V7.  */
12601       T(V7E_M),  /* V6_M.  */
12602       T(V7E_M),  /* V6S_M.  */
12603       T(V7E_M)   /* V7E_M.  */
12604     };
12605   const int v8[] =
12606     {
12607       T(V8),		/* PRE_V4.  */
12608       T(V8),		/* V4.  */
12609       T(V8),		/* V4T.  */
12610       T(V8),		/* V5T.  */
12611       T(V8),		/* V5TE.  */
12612       T(V8),		/* V5TEJ.  */
12613       T(V8),		/* V6.  */
12614       T(V8),		/* V6KZ.  */
12615       T(V8),		/* V6T2.  */
12616       T(V8),		/* V6K.  */
12617       T(V8),		/* V7.  */
12618       T(V8),		/* V6_M.  */
12619       T(V8),		/* V6S_M.  */
12620       T(V8),		/* V7E_M.  */
12621       T(V8)		/* V8.  */
12622     };
12623   const int v8m_baseline[] =
12624     {
12625       -1,		/* PRE_V4.  */
12626       -1,		/* V4.  */
12627       -1,		/* V4T.  */
12628       -1,		/* V5T.  */
12629       -1,		/* V5TE.  */
12630       -1,		/* V5TEJ.  */
12631       -1,		/* V6.  */
12632       -1,		/* V6KZ.  */
12633       -1,		/* V6T2.  */
12634       -1,		/* V6K.  */
12635       -1,		/* V7.  */
12636       T(V8M_BASE),	/* V6_M.  */
12637       T(V8M_BASE),	/* V6S_M.  */
12638       -1,		/* V7E_M.  */
12639       -1,		/* V8.  */
12640       -1,
12641       T(V8M_BASE)	/* V8-M BASELINE.  */
12642     };
12643   const int v8m_mainline[] =
12644     {
12645       -1,		/* PRE_V4.  */
12646       -1,		/* V4.  */
12647       -1,		/* V4T.  */
12648       -1,		/* V5T.  */
12649       -1,		/* V5TE.  */
12650       -1,		/* V5TEJ.  */
12651       -1,		/* V6.  */
12652       -1,		/* V6KZ.  */
12653       -1,		/* V6T2.  */
12654       -1,		/* V6K.  */
12655       T(V8M_MAIN),	/* V7.  */
12656       T(V8M_MAIN),	/* V6_M.  */
12657       T(V8M_MAIN),	/* V6S_M.  */
12658       T(V8M_MAIN),	/* V7E_M.  */
12659       -1,		/* V8.  */
12660       -1,
12661       T(V8M_MAIN),	/* V8-M BASELINE.  */
12662       T(V8M_MAIN)	/* V8-M MAINLINE.  */
12663     };
12664   const int v4t_plus_v6_m[] =
12665     {
12666       -1,		/* PRE_V4.  */
12667       -1,		/* V4.  */
12668       T(V4T),		/* V4T.  */
12669       T(V5T),		/* V5T.  */
12670       T(V5TE),		/* V5TE.  */
12671       T(V5TEJ),		/* V5TEJ.  */
12672       T(V6),		/* V6.  */
12673       T(V6KZ),		/* V6KZ.  */
12674       T(V6T2),		/* V6T2.  */
12675       T(V6K),		/* V6K.  */
12676       T(V7),		/* V7.  */
12677       T(V6_M),		/* V6_M.  */
12678       T(V6S_M),		/* V6S_M.  */
12679       T(V7E_M),		/* V7E_M.  */
12680       T(V8),		/* V8.  */
12681       -1,		/* Unused.  */
12682       T(V8M_BASE),	/* V8-M BASELINE.  */
12683       T(V8M_MAIN),	/* V8-M MAINLINE.  */
12684       T(V4T_PLUS_V6_M)	/* V4T plus V6_M.  */
12685     };
12686   const int *comb[] =
12687     {
12688       v6t2,
12689       v6k,
12690       v7,
12691       v6_m,
12692       v6s_m,
12693       v7e_m,
12694       v8,
12695       NULL,
12696       v8m_baseline,
12697       v8m_mainline,
12698       /* Pseudo-architecture.  */
12699       v4t_plus_v6_m
12700     };
12701 
12702   /* Check we've not got a higher architecture than we know about.  */
12703 
12704   if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12705     {
12706       _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12707       return -1;
12708     }
12709 
12710   /* Override old tag if we have a Tag_also_compatible_with on the output.  */
12711 
12712   if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12713       || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12714     oldtag = T(V4T_PLUS_V6_M);
12715 
12716   /* And override the new tag if we have a Tag_also_compatible_with on the
12717      input.  */
12718 
12719   if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12720       || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12721     newtag = T(V4T_PLUS_V6_M);
12722 
12723   tagl = (oldtag < newtag) ? oldtag : newtag;
12724   result = tagh = (oldtag > newtag) ? oldtag : newtag;
12725 
12726   /* Architectures before V6KZ add features monotonically.  */
12727   if (tagh <= TAG_CPU_ARCH_V6KZ)
12728     return result;
12729 
12730   result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12731 
12732   /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12733      as the canonical version.  */
12734   if (result == T(V4T_PLUS_V6_M))
12735     {
12736       result = T(V4T);
12737       *secondary_compat_out = T(V6_M);
12738     }
12739   else
12740     *secondary_compat_out = -1;
12741 
12742   if (result == -1)
12743     {
12744       _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12745 			  ibfd, oldtag, newtag);
12746       return -1;
12747     }
12748 
12749   return result;
12750 #undef T
12751 }
12752 
12753 /* Query attributes object to see if integer divide instructions may be
12754    present in an object.  */
12755 static bfd_boolean
12756 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12757 {
12758   int arch = attr[Tag_CPU_arch].i;
12759   int profile = attr[Tag_CPU_arch_profile].i;
12760 
12761   switch (attr[Tag_DIV_use].i)
12762     {
12763     case 0:
12764       /* Integer divide allowed if instruction contained in archetecture.  */
12765       if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12766 	return TRUE;
12767       else if (arch >= TAG_CPU_ARCH_V7E_M)
12768 	return TRUE;
12769       else
12770 	return FALSE;
12771 
12772     case 1:
12773       /* Integer divide explicitly prohibited.  */
12774       return FALSE;
12775 
12776     default:
12777       /* Unrecognised case - treat as allowing divide everywhere.  */
12778     case 2:
12779       /* Integer divide allowed in ARM state.  */
12780       return TRUE;
12781     }
12782 }
12783 
12784 /* Query attributes object to see if integer divide instructions are
12785    forbidden to be in the object.  This is not the inverse of
12786    elf32_arm_attributes_accept_div.  */
12787 static bfd_boolean
12788 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12789 {
12790   return attr[Tag_DIV_use].i == 1;
12791 }
12792 
12793 /* Merge EABI object attributes from IBFD into OBFD.  Raise an error if there
12794    are conflicting attributes.  */
12795 
12796 static bfd_boolean
12797 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12798 {
12799   obj_attribute *in_attr;
12800   obj_attribute *out_attr;
12801   /* Some tags have 0 = don't care, 1 = strong requirement,
12802      2 = weak requirement.  */
12803   static const int order_021[3] = {0, 2, 1};
12804   int i;
12805   bfd_boolean result = TRUE;
12806   const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12807 
12808   /* Skip the linker stubs file.  This preserves previous behavior
12809      of accepting unknown attributes in the first input file - but
12810      is that a bug?  */
12811   if (ibfd->flags & BFD_LINKER_CREATED)
12812     return TRUE;
12813 
12814   /* Skip any input that hasn't attribute section.
12815      This enables to link object files without attribute section with
12816      any others.  */
12817   if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12818     return TRUE;
12819 
12820   if (!elf_known_obj_attributes_proc (obfd)[0].i)
12821     {
12822       /* This is the first object.  Copy the attributes.  */
12823       _bfd_elf_copy_obj_attributes (ibfd, obfd);
12824 
12825       out_attr = elf_known_obj_attributes_proc (obfd);
12826 
12827       /* Use the Tag_null value to indicate the attributes have been
12828 	 initialized.  */
12829       out_attr[0].i = 1;
12830 
12831       /* We do not output objects with Tag_MPextension_use_legacy - we move
12832 	 the attribute's value to Tag_MPextension_use.  */
12833       if (out_attr[Tag_MPextension_use_legacy].i != 0)
12834 	{
12835 	  if (out_attr[Tag_MPextension_use].i != 0
12836 	      && out_attr[Tag_MPextension_use_legacy].i
12837 		!= out_attr[Tag_MPextension_use].i)
12838 	    {
12839 	      _bfd_error_handler
12840 		(_("Error: %B has both the current and legacy "
12841 		   "Tag_MPextension_use attributes"), ibfd);
12842 	      result = FALSE;
12843 	    }
12844 
12845 	  out_attr[Tag_MPextension_use] =
12846 	    out_attr[Tag_MPextension_use_legacy];
12847 	  out_attr[Tag_MPextension_use_legacy].type = 0;
12848 	  out_attr[Tag_MPextension_use_legacy].i = 0;
12849 	}
12850 
12851       return result;
12852     }
12853 
12854   in_attr = elf_known_obj_attributes_proc (ibfd);
12855   out_attr = elf_known_obj_attributes_proc (obfd);
12856   /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
12857   if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12858     {
12859       /* Ignore mismatches if the object doesn't use floating point or is
12860 	 floating point ABI independent.  */
12861       if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12862 	  || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12863 	      && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12864 	out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12865       else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12866 	       && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12867 	{
12868 	  _bfd_error_handler
12869 	    (_("error: %B uses VFP register arguments, %B does not"),
12870 	     in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12871 	     in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12872 	  result = FALSE;
12873 	}
12874     }
12875 
12876   for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12877     {
12878       /* Merge this attribute with existing attributes.  */
12879       switch (i)
12880 	{
12881 	case Tag_CPU_raw_name:
12882 	case Tag_CPU_name:
12883 	  /* These are merged after Tag_CPU_arch.  */
12884 	  break;
12885 
12886 	case Tag_ABI_optimization_goals:
12887 	case Tag_ABI_FP_optimization_goals:
12888 	  /* Use the first value seen.  */
12889 	  break;
12890 
12891 	case Tag_CPU_arch:
12892 	  {
12893 	    int secondary_compat = -1, secondary_compat_out = -1;
12894 	    unsigned int saved_out_attr = out_attr[i].i;
12895 	    int arch_attr;
12896 	    static const char *name_table[] =
12897 	      {
12898 		/* These aren't real CPU names, but we can't guess
12899 		   that from the architecture version alone.  */
12900 		"Pre v4",
12901 		"ARM v4",
12902 		"ARM v4T",
12903 		"ARM v5T",
12904 		"ARM v5TE",
12905 		"ARM v5TEJ",
12906 		"ARM v6",
12907 		"ARM v6KZ",
12908 		"ARM v6T2",
12909 		"ARM v6K",
12910 		"ARM v7",
12911 		"ARM v6-M",
12912 		"ARM v6S-M",
12913 		"ARM v8",
12914 		"",
12915 		"ARM v8-M.baseline",
12916 		"ARM v8-M.mainline",
12917 	    };
12918 
12919 	    /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
12920 	    secondary_compat = get_secondary_compatible_arch (ibfd);
12921 	    secondary_compat_out = get_secondary_compatible_arch (obfd);
12922 	    arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12923 					      &secondary_compat_out,
12924 					      in_attr[i].i,
12925 					      secondary_compat);
12926 
12927 	    /* Return with error if failed to merge.  */
12928 	    if (arch_attr == -1)
12929 	      return FALSE;
12930 
12931 	    out_attr[i].i = arch_attr;
12932 
12933 	    set_secondary_compatible_arch (obfd, secondary_compat_out);
12934 
12935 	    /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
12936 	    if (out_attr[i].i == saved_out_attr)
12937 	      ; /* Leave the names alone.  */
12938 	    else if (out_attr[i].i == in_attr[i].i)
12939 	      {
12940 		/* The output architecture has been changed to match the
12941 		   input architecture.  Use the input names.  */
12942 		out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12943 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12944 		  : NULL;
12945 		out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12946 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12947 		  : NULL;
12948 	      }
12949 	    else
12950 	      {
12951 		out_attr[Tag_CPU_name].s = NULL;
12952 		out_attr[Tag_CPU_raw_name].s = NULL;
12953 	      }
12954 
12955 	    /* If we still don't have a value for Tag_CPU_name,
12956 	       make one up now.  Tag_CPU_raw_name remains blank.  */
12957 	    if (out_attr[Tag_CPU_name].s == NULL
12958 		&& out_attr[i].i < ARRAY_SIZE (name_table))
12959 	      out_attr[Tag_CPU_name].s =
12960 		_bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12961 	  }
12962 	  break;
12963 
12964 	case Tag_ARM_ISA_use:
12965 	case Tag_THUMB_ISA_use:
12966 	case Tag_WMMX_arch:
12967 	case Tag_Advanced_SIMD_arch:
12968 	  /* ??? Do Advanced_SIMD (NEON) and WMMX conflict?  */
12969 	case Tag_ABI_FP_rounding:
12970 	case Tag_ABI_FP_exceptions:
12971 	case Tag_ABI_FP_user_exceptions:
12972 	case Tag_ABI_FP_number_model:
12973 	case Tag_FP_HP_extension:
12974 	case Tag_CPU_unaligned_access:
12975 	case Tag_T2EE_use:
12976 	case Tag_MPextension_use:
12977 	  /* Use the largest value specified.  */
12978 	  if (in_attr[i].i > out_attr[i].i)
12979 	    out_attr[i].i = in_attr[i].i;
12980 	  break;
12981 
12982 	case Tag_ABI_align_preserved:
12983 	case Tag_ABI_PCS_RO_data:
12984 	  /* Use the smallest value specified.  */
12985 	  if (in_attr[i].i < out_attr[i].i)
12986 	    out_attr[i].i = in_attr[i].i;
12987 	  break;
12988 
12989 	case Tag_ABI_align_needed:
12990 	  if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12991 	      && (in_attr[Tag_ABI_align_preserved].i == 0
12992 		  || out_attr[Tag_ABI_align_preserved].i == 0))
12993 	    {
12994 	      /* This error message should be enabled once all non-conformant
12995 		 binaries in the toolchain have had the attributes set
12996 		 properly.
12997 	      _bfd_error_handler
12998 		(_("error: %B: 8-byte data alignment conflicts with %B"),
12999 		 obfd, ibfd);
13000 	      result = FALSE; */
13001 	    }
13002 	  /* Fall through.  */
13003 	case Tag_ABI_FP_denormal:
13004 	case Tag_ABI_PCS_GOT_use:
13005 	  /* Use the "greatest" from the sequence 0, 2, 1, or the largest
13006 	     value if greater than 2 (for future-proofing).  */
13007 	  if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
13008 	      || (in_attr[i].i <= 2 && out_attr[i].i <= 2
13009 		  && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
13010 	    out_attr[i].i = in_attr[i].i;
13011 	  break;
13012 
13013 	case Tag_Virtualization_use:
13014 	  /* The virtualization tag effectively stores two bits of
13015 	     information: the intended use of TrustZone (in bit 0), and the
13016 	     intended use of Virtualization (in bit 1).  */
13017 	  if (out_attr[i].i == 0)
13018 	    out_attr[i].i = in_attr[i].i;
13019 	  else if (in_attr[i].i != 0
13020 		   && in_attr[i].i != out_attr[i].i)
13021 	    {
13022 	      if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
13023 		out_attr[i].i = 3;
13024 	      else
13025 		{
13026 		  _bfd_error_handler
13027 		    (_("error: %B: unable to merge virtualization attributes "
13028 		       "with %B"),
13029 		     obfd, ibfd);
13030 		  result = FALSE;
13031 		}
13032 	    }
13033 	  break;
13034 
13035 	case Tag_CPU_arch_profile:
13036 	  if (out_attr[i].i != in_attr[i].i)
13037 	    {
13038 	      /* 0 will merge with anything.
13039 		 'A' and 'S' merge to 'A'.
13040 		 'R' and 'S' merge to 'R'.
13041 		 'M' and 'A|R|S' is an error.  */
13042 	      if (out_attr[i].i == 0
13043 		  || (out_attr[i].i == 'S'
13044 		      && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
13045 		out_attr[i].i = in_attr[i].i;
13046 	      else if (in_attr[i].i == 0
13047 		       || (in_attr[i].i == 'S'
13048 			   && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
13049 		; /* Do nothing.  */
13050 	      else
13051 		{
13052 		  _bfd_error_handler
13053 		    (_("error: %B: Conflicting architecture profiles %c/%c"),
13054 		     ibfd,
13055 		     in_attr[i].i ? in_attr[i].i : '0',
13056 		     out_attr[i].i ? out_attr[i].i : '0');
13057 		  result = FALSE;
13058 		}
13059 	    }
13060 	  break;
13061 
13062 	case Tag_DSP_extension:
13063 	  /* No need to change output value if any of:
13064 	     - pre (<=) ARMv5T input architecture (do not have DSP)
13065 	     - M input profile not ARMv7E-M and do not have DSP.  */
13066 	  if (in_attr[Tag_CPU_arch].i <= 3
13067 	      || (in_attr[Tag_CPU_arch_profile].i == 'M'
13068 		  && in_attr[Tag_CPU_arch].i != 13
13069 		  && in_attr[i].i == 0))
13070 	    ; /* Do nothing.  */
13071 	  /* Output value should be 0 if DSP part of architecture, ie.
13072 	     - post (>=) ARMv5te architecture output
13073 	     - A, R or S profile output or ARMv7E-M output architecture.  */
13074 	  else if (out_attr[Tag_CPU_arch].i >= 4
13075 		   && (out_attr[Tag_CPU_arch_profile].i == 'A'
13076 		       || out_attr[Tag_CPU_arch_profile].i == 'R'
13077 		       || out_attr[Tag_CPU_arch_profile].i == 'S'
13078 		       || out_attr[Tag_CPU_arch].i == 13))
13079 	    out_attr[i].i = 0;
13080 	  /* Otherwise, DSP instructions are added and not part of output
13081 	     architecture.  */
13082 	  else
13083 	    out_attr[i].i = 1;
13084 	  break;
13085 
13086 	case Tag_FP_arch:
13087 	    {
13088 	      /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
13089 		 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
13090 		 when it's 0.  It might mean absence of FP hardware if
13091 		 Tag_FP_arch is zero.  */
13092 
13093 #define VFP_VERSION_COUNT 9
13094 	      static const struct
13095 	      {
13096 		  int ver;
13097 		  int regs;
13098 	      } vfp_versions[VFP_VERSION_COUNT] =
13099 		{
13100 		  {0, 0},
13101 		  {1, 16},
13102 		  {2, 16},
13103 		  {3, 32},
13104 		  {3, 16},
13105 		  {4, 32},
13106 		  {4, 16},
13107 		  {8, 32},
13108 		  {8, 16}
13109 		};
13110 	      int ver;
13111 	      int regs;
13112 	      int newval;
13113 
13114 	      /* If the output has no requirement about FP hardware,
13115 		 follow the requirement of the input.  */
13116 	      if (out_attr[i].i == 0)
13117 		{
13118 		  BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13119 		  out_attr[i].i = in_attr[i].i;
13120 		  out_attr[Tag_ABI_HardFP_use].i
13121 		    = in_attr[Tag_ABI_HardFP_use].i;
13122 		  break;
13123 		}
13124 	      /* If the input has no requirement about FP hardware, do
13125 		 nothing.  */
13126 	      else if (in_attr[i].i == 0)
13127 		{
13128 		  BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
13129 		  break;
13130 		}
13131 
13132 	      /* Both the input and the output have nonzero Tag_FP_arch.
13133 		 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
13134 
13135 	      /* If both the input and the output have zero Tag_ABI_HardFP_use,
13136 		 do nothing.  */
13137 	      if (in_attr[Tag_ABI_HardFP_use].i == 0
13138 		  && out_attr[Tag_ABI_HardFP_use].i == 0)
13139 		;
13140 	      /* If the input and the output have different Tag_ABI_HardFP_use,
13141 		 the combination of them is 0 (implied by Tag_FP_arch).  */
13142 	      else if (in_attr[Tag_ABI_HardFP_use].i
13143 		       != out_attr[Tag_ABI_HardFP_use].i)
13144 		out_attr[Tag_ABI_HardFP_use].i = 0;
13145 
13146 	      /* Now we can handle Tag_FP_arch.  */
13147 
13148 	      /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13149 		 pick the biggest.  */
13150 	      if (in_attr[i].i >= VFP_VERSION_COUNT
13151 		  && in_attr[i].i > out_attr[i].i)
13152 		{
13153 		  out_attr[i] = in_attr[i];
13154 		  break;
13155 		}
13156 	      /* The output uses the superset of input features
13157 		 (ISA version) and registers.  */
13158 	      ver = vfp_versions[in_attr[i].i].ver;
13159 	      if (ver < vfp_versions[out_attr[i].i].ver)
13160 		ver = vfp_versions[out_attr[i].i].ver;
13161 	      regs = vfp_versions[in_attr[i].i].regs;
13162 	      if (regs < vfp_versions[out_attr[i].i].regs)
13163 		regs = vfp_versions[out_attr[i].i].regs;
13164 	      /* This assumes all possible supersets are also a valid
13165 		 options.  */
13166 	      for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13167 		{
13168 		  if (regs == vfp_versions[newval].regs
13169 		      && ver == vfp_versions[newval].ver)
13170 		    break;
13171 		}
13172 	      out_attr[i].i = newval;
13173 	    }
13174 	  break;
13175 	case Tag_PCS_config:
13176 	  if (out_attr[i].i == 0)
13177 	    out_attr[i].i = in_attr[i].i;
13178 	  else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13179 	    {
13180 	      /* It's sometimes ok to mix different configs, so this is only
13181 		 a warning.  */
13182 	      _bfd_error_handler
13183 		(_("Warning: %B: Conflicting platform configuration"), ibfd);
13184 	    }
13185 	  break;
13186 	case Tag_ABI_PCS_R9_use:
13187 	  if (in_attr[i].i != out_attr[i].i
13188 	      && out_attr[i].i != AEABI_R9_unused
13189 	      && in_attr[i].i != AEABI_R9_unused)
13190 	    {
13191 	      _bfd_error_handler
13192 		(_("error: %B: Conflicting use of R9"), ibfd);
13193 	      result = FALSE;
13194 	    }
13195 	  if (out_attr[i].i == AEABI_R9_unused)
13196 	    out_attr[i].i = in_attr[i].i;
13197 	  break;
13198 	case Tag_ABI_PCS_RW_data:
13199 	  if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13200 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13201 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13202 	    {
13203 	      _bfd_error_handler
13204 		(_("error: %B: SB relative addressing conflicts with use of R9"),
13205 		 ibfd);
13206 	      result = FALSE;
13207 	    }
13208 	  /* Use the smallest value specified.  */
13209 	  if (in_attr[i].i < out_attr[i].i)
13210 	    out_attr[i].i = in_attr[i].i;
13211 	  break;
13212 	case Tag_ABI_PCS_wchar_t:
13213 	  if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13214 	      && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13215 	    {
13216 	      _bfd_error_handler
13217 		(_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13218 		 ibfd, in_attr[i].i, out_attr[i].i);
13219 	    }
13220 	  else if (in_attr[i].i && !out_attr[i].i)
13221 	    out_attr[i].i = in_attr[i].i;
13222 	  break;
13223 	case Tag_ABI_enum_size:
13224 	  if (in_attr[i].i != AEABI_enum_unused)
13225 	    {
13226 	      if (out_attr[i].i == AEABI_enum_unused
13227 		  || out_attr[i].i == AEABI_enum_forced_wide)
13228 		{
13229 		  /* The existing object is compatible with anything.
13230 		     Use whatever requirements the new object has.  */
13231 		  out_attr[i].i = in_attr[i].i;
13232 		}
13233 	      else if (in_attr[i].i != AEABI_enum_forced_wide
13234 		       && out_attr[i].i != in_attr[i].i
13235 		       && !elf_arm_tdata (obfd)->no_enum_size_warning)
13236 		{
13237 		  static const char *aeabi_enum_names[] =
13238 		    { "", "variable-size", "32-bit", "" };
13239 		  const char *in_name =
13240 		    in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13241 		    ? aeabi_enum_names[in_attr[i].i]
13242 		    : "<unknown>";
13243 		  const char *out_name =
13244 		    out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13245 		    ? aeabi_enum_names[out_attr[i].i]
13246 		    : "<unknown>";
13247 		  _bfd_error_handler
13248 		    (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13249 		     ibfd, in_name, out_name);
13250 		}
13251 	    }
13252 	  break;
13253 	case Tag_ABI_VFP_args:
13254 	  /* Aready done.  */
13255 	  break;
13256 	case Tag_ABI_WMMX_args:
13257 	  if (in_attr[i].i != out_attr[i].i)
13258 	    {
13259 	      _bfd_error_handler
13260 		(_("error: %B uses iWMMXt register arguments, %B does not"),
13261 		 ibfd, obfd);
13262 	      result = FALSE;
13263 	    }
13264 	  break;
13265 	case Tag_compatibility:
13266 	  /* Merged in target-independent code.  */
13267 	  break;
13268 	case Tag_ABI_HardFP_use:
13269 	  /* This is handled along with Tag_FP_arch.  */
13270 	  break;
13271 	case Tag_ABI_FP_16bit_format:
13272 	  if (in_attr[i].i != 0 && out_attr[i].i != 0)
13273 	    {
13274 	      if (in_attr[i].i != out_attr[i].i)
13275 		{
13276 		  _bfd_error_handler
13277 		    (_("error: fp16 format mismatch between %B and %B"),
13278 		     ibfd, obfd);
13279 		  result = FALSE;
13280 		}
13281 	    }
13282 	  if (in_attr[i].i != 0)
13283 	    out_attr[i].i = in_attr[i].i;
13284 	  break;
13285 
13286 	case Tag_DIV_use:
13287 	  /* A value of zero on input means that the divide instruction may
13288 	     be used if available in the base architecture as specified via
13289 	     Tag_CPU_arch and Tag_CPU_arch_profile.  A value of 1 means that
13290 	     the user did not want divide instructions.  A value of 2
13291 	     explicitly means that divide instructions were allowed in ARM
13292 	     and Thumb state.  */
13293 	  if (in_attr[i].i == out_attr[i].i)
13294 	    /* Do nothing.  */ ;
13295 	  else if (elf32_arm_attributes_forbid_div (in_attr)
13296 		   && !elf32_arm_attributes_accept_div (out_attr))
13297 	    out_attr[i].i = 1;
13298 	  else if (elf32_arm_attributes_forbid_div (out_attr)
13299 		   && elf32_arm_attributes_accept_div (in_attr))
13300 	    out_attr[i].i = in_attr[i].i;
13301 	  else if (in_attr[i].i == 2)
13302 	    out_attr[i].i = in_attr[i].i;
13303 	  break;
13304 
13305 	case Tag_MPextension_use_legacy:
13306 	  /* We don't output objects with Tag_MPextension_use_legacy - we
13307 	     move the value to Tag_MPextension_use.  */
13308 	  if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13309 	    {
13310 	      if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13311 		{
13312 		  _bfd_error_handler
13313 		    (_("%B has has both the current and legacy "
13314 		       "Tag_MPextension_use attributes"),
13315 		     ibfd);
13316 		  result = FALSE;
13317 		}
13318 	    }
13319 
13320 	  if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13321 	    out_attr[Tag_MPextension_use] = in_attr[i];
13322 
13323 	  break;
13324 
13325 	case Tag_nodefaults:
13326 	  /* This tag is set if it exists, but the value is unused (and is
13327 	     typically zero).  We don't actually need to do anything here -
13328 	     the merge happens automatically when the type flags are merged
13329 	     below.  */
13330 	  break;
13331 	case Tag_also_compatible_with:
13332 	  /* Already done in Tag_CPU_arch.  */
13333 	  break;
13334 	case Tag_conformance:
13335 	  /* Keep the attribute if it matches.  Throw it away otherwise.
13336 	     No attribute means no claim to conform.  */
13337 	  if (!in_attr[i].s || !out_attr[i].s
13338 	      || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13339 	    out_attr[i].s = NULL;
13340 	  break;
13341 
13342 	default:
13343 	  result
13344 	    = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13345 	}
13346 
13347       /* If out_attr was copied from in_attr then it won't have a type yet.  */
13348       if (in_attr[i].type && !out_attr[i].type)
13349 	out_attr[i].type = in_attr[i].type;
13350     }
13351 
13352   /* Merge Tag_compatibility attributes and any common GNU ones.  */
13353   if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13354     return FALSE;
13355 
13356   /* Check for any attributes not known on ARM.  */
13357   result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13358 
13359   return result;
13360 }
13361 
13362 
13363 /* Return TRUE if the two EABI versions are incompatible.  */
13364 
13365 static bfd_boolean
13366 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13367 {
13368   /* v4 and v5 are the same spec before and after it was released,
13369      so allow mixing them.  */
13370   if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13371       || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13372     return TRUE;
13373 
13374   return (iver == over);
13375 }
13376 
13377 /* Merge backend specific data from an object file to the output
13378    object file when linking.  */
13379 
13380 static bfd_boolean
13381 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13382 
13383 /* Display the flags field.  */
13384 
13385 static bfd_boolean
13386 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13387 {
13388   FILE * file = (FILE *) ptr;
13389   unsigned long flags;
13390 
13391   BFD_ASSERT (abfd != NULL && ptr != NULL);
13392 
13393   /* Print normal ELF private data.  */
13394   _bfd_elf_print_private_bfd_data (abfd, ptr);
13395 
13396   flags = elf_elfheader (abfd)->e_flags;
13397   /* Ignore init flag - it may not be set, despite the flags field
13398      containing valid data.  */
13399 
13400   /* xgettext:c-format */
13401   fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13402 
13403   switch (EF_ARM_EABI_VERSION (flags))
13404     {
13405     case EF_ARM_EABI_UNKNOWN:
13406       /* The following flag bits are GNU extensions and not part of the
13407 	 official ARM ELF extended ABI.  Hence they are only decoded if
13408 	 the EABI version is not set.  */
13409       if (flags & EF_ARM_INTERWORK)
13410 	fprintf (file, _(" [interworking enabled]"));
13411 
13412       if (flags & EF_ARM_APCS_26)
13413 	fprintf (file, " [APCS-26]");
13414       else
13415 	fprintf (file, " [APCS-32]");
13416 
13417       if (flags & EF_ARM_VFP_FLOAT)
13418 	fprintf (file, _(" [VFP float format]"));
13419       else if (flags & EF_ARM_MAVERICK_FLOAT)
13420 	fprintf (file, _(" [Maverick float format]"));
13421       else
13422 	fprintf (file, _(" [FPA float format]"));
13423 
13424       if (flags & EF_ARM_APCS_FLOAT)
13425 	fprintf (file, _(" [floats passed in float registers]"));
13426 
13427       if (flags & EF_ARM_PIC)
13428 	fprintf (file, _(" [position independent]"));
13429 
13430       if (flags & EF_ARM_NEW_ABI)
13431 	fprintf (file, _(" [new ABI]"));
13432 
13433       if (flags & EF_ARM_OLD_ABI)
13434 	fprintf (file, _(" [old ABI]"));
13435 
13436       if (flags & EF_ARM_SOFT_FLOAT)
13437 	fprintf (file, _(" [software FP]"));
13438 
13439       flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13440 		 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13441 		 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13442 		 | EF_ARM_MAVERICK_FLOAT);
13443       break;
13444 
13445     case EF_ARM_EABI_VER1:
13446       fprintf (file, _(" [Version1 EABI]"));
13447 
13448       if (flags & EF_ARM_SYMSARESORTED)
13449 	fprintf (file, _(" [sorted symbol table]"));
13450       else
13451 	fprintf (file, _(" [unsorted symbol table]"));
13452 
13453       flags &= ~ EF_ARM_SYMSARESORTED;
13454       break;
13455 
13456     case EF_ARM_EABI_VER2:
13457       fprintf (file, _(" [Version2 EABI]"));
13458 
13459       if (flags & EF_ARM_SYMSARESORTED)
13460 	fprintf (file, _(" [sorted symbol table]"));
13461       else
13462 	fprintf (file, _(" [unsorted symbol table]"));
13463 
13464       if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13465 	fprintf (file, _(" [dynamic symbols use segment index]"));
13466 
13467       if (flags & EF_ARM_MAPSYMSFIRST)
13468 	fprintf (file, _(" [mapping symbols precede others]"));
13469 
13470       flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13471 		 | EF_ARM_MAPSYMSFIRST);
13472       break;
13473 
13474     case EF_ARM_EABI_VER3:
13475       fprintf (file, _(" [Version3 EABI]"));
13476       break;
13477 
13478     case EF_ARM_EABI_VER4:
13479       fprintf (file, _(" [Version4 EABI]"));
13480       goto eabi;
13481 
13482     case EF_ARM_EABI_VER5:
13483       fprintf (file, _(" [Version5 EABI]"));
13484 
13485       if (flags & EF_ARM_ABI_FLOAT_SOFT)
13486 	fprintf (file, _(" [soft-float ABI]"));
13487 
13488       if (flags & EF_ARM_ABI_FLOAT_HARD)
13489 	fprintf (file, _(" [hard-float ABI]"));
13490 
13491       flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13492 
13493     eabi:
13494       if (flags & EF_ARM_BE8)
13495 	fprintf (file, _(" [BE8]"));
13496 
13497       if (flags & EF_ARM_LE8)
13498 	fprintf (file, _(" [LE8]"));
13499 
13500       flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13501       break;
13502 
13503     default:
13504       fprintf (file, _(" <EABI version unrecognised>"));
13505       break;
13506     }
13507 
13508   flags &= ~ EF_ARM_EABIMASK;
13509 
13510   if (flags & EF_ARM_RELEXEC)
13511     fprintf (file, _(" [relocatable executable]"));
13512 
13513   flags &= ~EF_ARM_RELEXEC;
13514 
13515   if (flags)
13516     fprintf (file, _("<Unrecognised flag bits set>"));
13517 
13518   fputc ('\n', file);
13519 
13520   return TRUE;
13521 }
13522 
13523 static int
13524 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13525 {
13526   switch (ELF_ST_TYPE (elf_sym->st_info))
13527     {
13528     case STT_ARM_TFUNC:
13529       return ELF_ST_TYPE (elf_sym->st_info);
13530 
13531     case STT_ARM_16BIT:
13532       /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13533 	 This allows us to distinguish between data used by Thumb instructions
13534 	 and non-data (which is probably code) inside Thumb regions of an
13535 	 executable.  */
13536       if (type != STT_OBJECT && type != STT_TLS)
13537 	return ELF_ST_TYPE (elf_sym->st_info);
13538       break;
13539 
13540     default:
13541       break;
13542     }
13543 
13544   return type;
13545 }
13546 
13547 static asection *
13548 elf32_arm_gc_mark_hook (asection *sec,
13549 			struct bfd_link_info *info,
13550 			Elf_Internal_Rela *rel,
13551 			struct elf_link_hash_entry *h,
13552 			Elf_Internal_Sym *sym)
13553 {
13554   if (h != NULL)
13555     switch (ELF32_R_TYPE (rel->r_info))
13556       {
13557       case R_ARM_GNU_VTINHERIT:
13558       case R_ARM_GNU_VTENTRY:
13559 	return NULL;
13560       }
13561 
13562   return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13563 }
13564 
13565 /* Update the got entry reference counts for the section being removed.  */
13566 
13567 static bfd_boolean
13568 elf32_arm_gc_sweep_hook (bfd *                     abfd,
13569 			 struct bfd_link_info *    info,
13570 			 asection *                sec,
13571 			 const Elf_Internal_Rela * relocs)
13572 {
13573   Elf_Internal_Shdr *symtab_hdr;
13574   struct elf_link_hash_entry **sym_hashes;
13575   bfd_signed_vma *local_got_refcounts;
13576   const Elf_Internal_Rela *rel, *relend;
13577   struct elf32_arm_link_hash_table * globals;
13578 
13579   if (bfd_link_relocatable (info))
13580     return TRUE;
13581 
13582   globals = elf32_arm_hash_table (info);
13583   if (globals == NULL)
13584     return FALSE;
13585 
13586   elf_section_data (sec)->local_dynrel = NULL;
13587 
13588   symtab_hdr = & elf_symtab_hdr (abfd);
13589   sym_hashes = elf_sym_hashes (abfd);
13590   local_got_refcounts = elf_local_got_refcounts (abfd);
13591 
13592   check_use_blx (globals);
13593 
13594   relend = relocs + sec->reloc_count;
13595   for (rel = relocs; rel < relend; rel++)
13596     {
13597       unsigned long r_symndx;
13598       struct elf_link_hash_entry *h = NULL;
13599       struct elf32_arm_link_hash_entry *eh;
13600       int r_type;
13601       bfd_boolean call_reloc_p;
13602       bfd_boolean may_become_dynamic_p;
13603       bfd_boolean may_need_local_target_p;
13604       union gotplt_union *root_plt;
13605       struct arm_plt_info *arm_plt;
13606 
13607       r_symndx = ELF32_R_SYM (rel->r_info);
13608       if (r_symndx >= symtab_hdr->sh_info)
13609 	{
13610 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13611 	  while (h->root.type == bfd_link_hash_indirect
13612 		 || h->root.type == bfd_link_hash_warning)
13613 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
13614 	}
13615       eh = (struct elf32_arm_link_hash_entry *) h;
13616 
13617       call_reloc_p = FALSE;
13618       may_become_dynamic_p = FALSE;
13619       may_need_local_target_p = FALSE;
13620 
13621       r_type = ELF32_R_TYPE (rel->r_info);
13622       r_type = arm_real_reloc_type (globals, r_type);
13623       switch (r_type)
13624 	{
13625 	case R_ARM_GOT32:
13626 	case R_ARM_GOT_PREL:
13627 	case R_ARM_TLS_GD32:
13628 	case R_ARM_TLS_IE32:
13629 	  if (h != NULL)
13630 	    {
13631 	      if (h->got.refcount > 0)
13632 		h->got.refcount -= 1;
13633 	    }
13634 	  else if (local_got_refcounts != NULL)
13635 	    {
13636 	      if (local_got_refcounts[r_symndx] > 0)
13637 		local_got_refcounts[r_symndx] -= 1;
13638 	    }
13639 	  break;
13640 
13641 	case R_ARM_TLS_LDM32:
13642 	  globals->tls_ldm_got.refcount -= 1;
13643 	  break;
13644 
13645 	case R_ARM_PC24:
13646 	case R_ARM_PLT32:
13647 	case R_ARM_CALL:
13648 	case R_ARM_JUMP24:
13649 	case R_ARM_PREL31:
13650 	case R_ARM_THM_CALL:
13651 	case R_ARM_THM_JUMP24:
13652 	case R_ARM_THM_JUMP19:
13653 	  call_reloc_p = TRUE;
13654 	  may_need_local_target_p = TRUE;
13655 	  break;
13656 
13657 	case R_ARM_ABS12:
13658 	  if (!globals->vxworks_p)
13659 	    {
13660 	      may_need_local_target_p = TRUE;
13661 	      break;
13662 	    }
13663 	  /* Fall through.  */
13664 	case R_ARM_ABS32:
13665 	case R_ARM_ABS32_NOI:
13666 	case R_ARM_REL32:
13667 	case R_ARM_REL32_NOI:
13668 	case R_ARM_MOVW_ABS_NC:
13669 	case R_ARM_MOVT_ABS:
13670 	case R_ARM_MOVW_PREL_NC:
13671 	case R_ARM_MOVT_PREL:
13672 	case R_ARM_THM_MOVW_ABS_NC:
13673 	case R_ARM_THM_MOVT_ABS:
13674 	case R_ARM_THM_MOVW_PREL_NC:
13675 	case R_ARM_THM_MOVT_PREL:
13676 	  /* Should the interworking branches be here also?  */
13677 	  if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13678 	      && (sec->flags & SEC_ALLOC) != 0)
13679 	    {
13680 	      if (h == NULL
13681 		  && elf32_arm_howto_from_type (r_type)->pc_relative)
13682 		{
13683 		  call_reloc_p = TRUE;
13684 		  may_need_local_target_p = TRUE;
13685 		}
13686 	      else
13687 		may_become_dynamic_p = TRUE;
13688 	    }
13689 	  else
13690 	    may_need_local_target_p = TRUE;
13691 	  break;
13692 
13693 	default:
13694 	  break;
13695 	}
13696 
13697       if (may_need_local_target_p
13698 	  && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13699 	{
13700 	  /* If PLT refcount book-keeping is wrong and too low, we'll
13701 	     see a zero value (going to -1) for the root PLT reference
13702 	     count.  */
13703 	  if (root_plt->refcount >= 0)
13704 	    {
13705 	      BFD_ASSERT (root_plt->refcount != 0);
13706 	      root_plt->refcount -= 1;
13707 	    }
13708 	  else
13709 	    /* A value of -1 means the symbol has become local, forced
13710 	       or seeing a hidden definition.  Any other negative value
13711 	       is an error.  */
13712 	    BFD_ASSERT (root_plt->refcount == -1);
13713 
13714 	  if (!call_reloc_p)
13715 	    arm_plt->noncall_refcount--;
13716 
13717 	  if (r_type == R_ARM_THM_CALL)
13718 	    arm_plt->maybe_thumb_refcount--;
13719 
13720 	  if (r_type == R_ARM_THM_JUMP24
13721 	      || r_type == R_ARM_THM_JUMP19)
13722 	    arm_plt->thumb_refcount--;
13723 	}
13724 
13725       if (may_become_dynamic_p)
13726 	{
13727 	  struct elf_dyn_relocs **pp;
13728 	  struct elf_dyn_relocs *p;
13729 
13730 	  if (h != NULL)
13731 	    pp = &(eh->dyn_relocs);
13732 	  else
13733 	    {
13734 	      Elf_Internal_Sym *isym;
13735 
13736 	      isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13737 					    abfd, r_symndx);
13738 	      if (isym == NULL)
13739 		return FALSE;
13740 	      pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13741 	      if (pp == NULL)
13742 		return FALSE;
13743 	    }
13744 	  for (; (p = *pp) != NULL; pp = &p->next)
13745 	    if (p->sec == sec)
13746 	      {
13747 		/* Everything must go for SEC.  */
13748 		*pp = p->next;
13749 		break;
13750 	      }
13751 	}
13752     }
13753 
13754   return TRUE;
13755 }
13756 
13757 /* Look through the relocs for a section during the first phase.  */
13758 
13759 static bfd_boolean
13760 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13761 			asection *sec, const Elf_Internal_Rela *relocs)
13762 {
13763   Elf_Internal_Shdr *symtab_hdr;
13764   struct elf_link_hash_entry **sym_hashes;
13765   const Elf_Internal_Rela *rel;
13766   const Elf_Internal_Rela *rel_end;
13767   bfd *dynobj;
13768   asection *sreloc;
13769   struct elf32_arm_link_hash_table *htab;
13770   bfd_boolean call_reloc_p;
13771   bfd_boolean may_become_dynamic_p;
13772   bfd_boolean may_need_local_target_p;
13773   unsigned long nsyms;
13774 
13775   if (bfd_link_relocatable (info))
13776     return TRUE;
13777 
13778   BFD_ASSERT (is_arm_elf (abfd));
13779 
13780   htab = elf32_arm_hash_table (info);
13781   if (htab == NULL)
13782     return FALSE;
13783 
13784   sreloc = NULL;
13785 
13786   /* Create dynamic sections for relocatable executables so that we can
13787      copy relocations.  */
13788   if (htab->root.is_relocatable_executable
13789       && ! htab->root.dynamic_sections_created)
13790     {
13791       if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13792 	return FALSE;
13793     }
13794 
13795   if (htab->root.dynobj == NULL)
13796     htab->root.dynobj = abfd;
13797   if (!create_ifunc_sections (info))
13798     return FALSE;
13799 
13800   dynobj = htab->root.dynobj;
13801 
13802   symtab_hdr = & elf_symtab_hdr (abfd);
13803   sym_hashes = elf_sym_hashes (abfd);
13804   nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13805 
13806   rel_end = relocs + sec->reloc_count;
13807   for (rel = relocs; rel < rel_end; rel++)
13808     {
13809       Elf_Internal_Sym *isym;
13810       struct elf_link_hash_entry *h;
13811       struct elf32_arm_link_hash_entry *eh;
13812       unsigned long r_symndx;
13813       int r_type;
13814 
13815       r_symndx = ELF32_R_SYM (rel->r_info);
13816       r_type = ELF32_R_TYPE (rel->r_info);
13817       r_type = arm_real_reloc_type (htab, r_type);
13818 
13819       if (r_symndx >= nsyms
13820 	  /* PR 9934: It is possible to have relocations that do not
13821 	     refer to symbols, thus it is also possible to have an
13822 	     object file containing relocations but no symbol table.  */
13823 	  && (r_symndx > STN_UNDEF || nsyms > 0))
13824 	{
13825 	  (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13826 				   r_symndx);
13827 	  return FALSE;
13828 	}
13829 
13830       h = NULL;
13831       isym = NULL;
13832       if (nsyms > 0)
13833 	{
13834 	  if (r_symndx < symtab_hdr->sh_info)
13835 	    {
13836 	      /* A local symbol.  */
13837 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13838 					    abfd, r_symndx);
13839 	      if (isym == NULL)
13840 		return FALSE;
13841 	    }
13842 	  else
13843 	    {
13844 	      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13845 	      while (h->root.type == bfd_link_hash_indirect
13846 		     || h->root.type == bfd_link_hash_warning)
13847 		h = (struct elf_link_hash_entry *) h->root.u.i.link;
13848 
13849 	      /* PR15323, ref flags aren't set for references in the
13850 		 same object.  */
13851 	      h->root.non_ir_ref = 1;
13852 	    }
13853 	}
13854 
13855       eh = (struct elf32_arm_link_hash_entry *) h;
13856 
13857       call_reloc_p = FALSE;
13858       may_become_dynamic_p = FALSE;
13859       may_need_local_target_p = FALSE;
13860 
13861       /* Could be done earlier, if h were already available.  */
13862       r_type = elf32_arm_tls_transition (info, r_type, h);
13863       switch (r_type)
13864 	{
13865 	  case R_ARM_GOT32:
13866 	  case R_ARM_GOT_PREL:
13867 	  case R_ARM_TLS_GD32:
13868 	  case R_ARM_TLS_IE32:
13869 	  case R_ARM_TLS_GOTDESC:
13870 	  case R_ARM_TLS_DESCSEQ:
13871 	  case R_ARM_THM_TLS_DESCSEQ:
13872 	  case R_ARM_TLS_CALL:
13873 	  case R_ARM_THM_TLS_CALL:
13874 	    /* This symbol requires a global offset table entry.  */
13875 	    {
13876 	      int tls_type, old_tls_type;
13877 
13878 	      switch (r_type)
13879 		{
13880 		case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13881 
13882 		case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13883 
13884 		case R_ARM_TLS_GOTDESC:
13885 		case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13886 		case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13887 		  tls_type = GOT_TLS_GDESC; break;
13888 
13889 		default: tls_type = GOT_NORMAL; break;
13890 		}
13891 
13892 	      if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13893 		info->flags |= DF_STATIC_TLS;
13894 
13895 	      if (h != NULL)
13896 		{
13897 		  h->got.refcount++;
13898 		  old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13899 		}
13900 	      else
13901 		{
13902 		  /* This is a global offset table entry for a local symbol.  */
13903 		  if (!elf32_arm_allocate_local_sym_info (abfd))
13904 		    return FALSE;
13905 		  elf_local_got_refcounts (abfd)[r_symndx] += 1;
13906 		  old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13907 		}
13908 
13909 	      /* If a variable is accessed with both tls methods, two
13910 		 slots may be created.  */
13911 	      if (GOT_TLS_GD_ANY_P (old_tls_type)
13912 		  && GOT_TLS_GD_ANY_P (tls_type))
13913 		tls_type |= old_tls_type;
13914 
13915 	      /* We will already have issued an error message if there
13916 		 is a TLS/non-TLS mismatch, based on the symbol
13917 		 type.  So just combine any TLS types needed.  */
13918 	      if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13919 		  && tls_type != GOT_NORMAL)
13920 		tls_type |= old_tls_type;
13921 
13922 	      /* If the symbol is accessed in both IE and GDESC
13923 		 method, we're able to relax. Turn off the GDESC flag,
13924 		 without messing up with any other kind of tls types
13925 		 that may be involved.  */
13926 	      if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13927 		tls_type &= ~GOT_TLS_GDESC;
13928 
13929 	      if (old_tls_type != tls_type)
13930 		{
13931 		  if (h != NULL)
13932 		    elf32_arm_hash_entry (h)->tls_type = tls_type;
13933 		  else
13934 		    elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13935 		}
13936 	    }
13937 	    /* Fall through.  */
13938 
13939 	  case R_ARM_TLS_LDM32:
13940 	    if (r_type == R_ARM_TLS_LDM32)
13941 		htab->tls_ldm_got.refcount++;
13942 	    /* Fall through.  */
13943 
13944 	  case R_ARM_GOTOFF32:
13945 	  case R_ARM_GOTPC:
13946 	    if (htab->root.sgot == NULL
13947 		&& !create_got_section (htab->root.dynobj, info))
13948 	      return FALSE;
13949 	    break;
13950 
13951 	  case R_ARM_PC24:
13952 	  case R_ARM_PLT32:
13953 	  case R_ARM_CALL:
13954 	  case R_ARM_JUMP24:
13955 	  case R_ARM_PREL31:
13956 	  case R_ARM_THM_CALL:
13957 	  case R_ARM_THM_JUMP24:
13958 	  case R_ARM_THM_JUMP19:
13959 	    call_reloc_p = TRUE;
13960 	    may_need_local_target_p = TRUE;
13961 	    break;
13962 
13963 	  case R_ARM_ABS12:
13964 	    /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13965 	       ldr __GOTT_INDEX__ offsets.  */
13966 	    if (!htab->vxworks_p)
13967 	      {
13968 		may_need_local_target_p = TRUE;
13969 		break;
13970 	      }
13971 	    else goto jump_over;
13972 
13973 	    /* Fall through.  */
13974 
13975 	  case R_ARM_MOVW_ABS_NC:
13976 	  case R_ARM_MOVT_ABS:
13977 	  case R_ARM_THM_MOVW_ABS_NC:
13978 	  case R_ARM_THM_MOVT_ABS:
13979 	    if (bfd_link_pic (info))
13980 	      {
13981 		(*_bfd_error_handler)
13982 		  (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13983 		   abfd, elf32_arm_howto_table_1[r_type].name,
13984 		   (h) ? h->root.root.string : "a local symbol");
13985 		bfd_set_error (bfd_error_bad_value);
13986 		return FALSE;
13987 	      }
13988 
13989 	    /* Fall through.  */
13990 	  case R_ARM_ABS32:
13991 	  case R_ARM_ABS32_NOI:
13992 	jump_over:
13993 	    if (h != NULL && bfd_link_executable (info))
13994 	      {
13995 		h->pointer_equality_needed = 1;
13996 	      }
13997 	    /* Fall through.  */
13998 	  case R_ARM_REL32:
13999 	  case R_ARM_REL32_NOI:
14000 	  case R_ARM_MOVW_PREL_NC:
14001 	  case R_ARM_MOVT_PREL:
14002 	  case R_ARM_THM_MOVW_PREL_NC:
14003 	  case R_ARM_THM_MOVT_PREL:
14004 
14005 	    /* Should the interworking branches be listed here?  */
14006 	    if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
14007 		&& (sec->flags & SEC_ALLOC) != 0)
14008 	      {
14009 		if (h == NULL
14010 		    && elf32_arm_howto_from_type (r_type)->pc_relative)
14011 		  {
14012 		    /* In shared libraries and relocatable executables,
14013 		       we treat local relative references as calls;
14014 		       see the related SYMBOL_CALLS_LOCAL code in
14015 		       allocate_dynrelocs.  */
14016 		    call_reloc_p = TRUE;
14017 		    may_need_local_target_p = TRUE;
14018 		  }
14019 		else
14020 		  /* We are creating a shared library or relocatable
14021 		     executable, and this is a reloc against a global symbol,
14022 		     or a non-PC-relative reloc against a local symbol.
14023 		     We may need to copy the reloc into the output.  */
14024 		  may_become_dynamic_p = TRUE;
14025 	      }
14026 	    else
14027 	      may_need_local_target_p = TRUE;
14028 	    break;
14029 
14030 	/* This relocation describes the C++ object vtable hierarchy.
14031 	   Reconstruct it for later use during GC.  */
14032 	case R_ARM_GNU_VTINHERIT:
14033 	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
14034 	    return FALSE;
14035 	  break;
14036 
14037 	/* This relocation describes which C++ vtable entries are actually
14038 	   used.  Record for later use during GC.  */
14039 	case R_ARM_GNU_VTENTRY:
14040 	  BFD_ASSERT (h != NULL);
14041 	  if (h != NULL
14042 	      && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
14043 	    return FALSE;
14044 	  break;
14045 	}
14046 
14047       if (h != NULL)
14048 	{
14049 	  if (call_reloc_p)
14050 	    /* We may need a .plt entry if the function this reloc
14051 	       refers to is in a different object, regardless of the
14052 	       symbol's type.  We can't tell for sure yet, because
14053 	       something later might force the symbol local.  */
14054 	    h->needs_plt = 1;
14055 	  else if (may_need_local_target_p)
14056 	    /* If this reloc is in a read-only section, we might
14057 	       need a copy reloc.  We can't check reliably at this
14058 	       stage whether the section is read-only, as input
14059 	       sections have not yet been mapped to output sections.
14060 	       Tentatively set the flag for now, and correct in
14061 	       adjust_dynamic_symbol.  */
14062 	    h->non_got_ref = 1;
14063 	}
14064 
14065       if (may_need_local_target_p
14066 	  && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
14067 	{
14068 	  union gotplt_union *root_plt;
14069 	  struct arm_plt_info *arm_plt;
14070 	  struct arm_local_iplt_info *local_iplt;
14071 
14072 	  if (h != NULL)
14073 	    {
14074 	      root_plt = &h->plt;
14075 	      arm_plt = &eh->plt;
14076 	    }
14077 	  else
14078 	    {
14079 	      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
14080 	      if (local_iplt == NULL)
14081 		return FALSE;
14082 	      root_plt = &local_iplt->root;
14083 	      arm_plt = &local_iplt->arm;
14084 	    }
14085 
14086 	  /* If the symbol is a function that doesn't bind locally,
14087 	     this relocation will need a PLT entry.  */
14088 	  if (root_plt->refcount != -1)
14089 	    root_plt->refcount += 1;
14090 
14091 	  if (!call_reloc_p)
14092 	    arm_plt->noncall_refcount++;
14093 
14094 	  /* It's too early to use htab->use_blx here, so we have to
14095 	     record possible blx references separately from
14096 	     relocs that definitely need a thumb stub.  */
14097 
14098 	  if (r_type == R_ARM_THM_CALL)
14099 	    arm_plt->maybe_thumb_refcount += 1;
14100 
14101 	  if (r_type == R_ARM_THM_JUMP24
14102 	      || r_type == R_ARM_THM_JUMP19)
14103 	    arm_plt->thumb_refcount += 1;
14104 	}
14105 
14106       if (may_become_dynamic_p)
14107 	{
14108 	  struct elf_dyn_relocs *p, **head;
14109 
14110 	  /* Create a reloc section in dynobj.  */
14111 	  if (sreloc == NULL)
14112 	    {
14113 	      sreloc = _bfd_elf_make_dynamic_reloc_section
14114 		(sec, dynobj, 2, abfd, ! htab->use_rel);
14115 
14116 	      if (sreloc == NULL)
14117 		return FALSE;
14118 
14119 	      /* BPABI objects never have dynamic relocations mapped.  */
14120 	      if (htab->symbian_p)
14121 		{
14122 		  flagword flags;
14123 
14124 		  flags = bfd_get_section_flags (dynobj, sreloc);
14125 		  flags &= ~(SEC_LOAD | SEC_ALLOC);
14126 		  bfd_set_section_flags (dynobj, sreloc, flags);
14127 		}
14128 	    }
14129 
14130 	  /* If this is a global symbol, count the number of
14131 	     relocations we need for this symbol.  */
14132 	  if (h != NULL)
14133 	    head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14134 	  else
14135 	    {
14136 	      head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14137 	      if (head == NULL)
14138 		return FALSE;
14139 	    }
14140 
14141 	  p = *head;
14142 	  if (p == NULL || p->sec != sec)
14143 	    {
14144 	      bfd_size_type amt = sizeof *p;
14145 
14146 	      p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14147 	      if (p == NULL)
14148 		return FALSE;
14149 	      p->next = *head;
14150 	      *head = p;
14151 	      p->sec = sec;
14152 	      p->count = 0;
14153 	      p->pc_count = 0;
14154 	    }
14155 
14156 	  if (elf32_arm_howto_from_type (r_type)->pc_relative)
14157 	    p->pc_count += 1;
14158 	  p->count += 1;
14159 	}
14160     }
14161 
14162   return TRUE;
14163 }
14164 
14165 /* Unwinding tables are not referenced directly.  This pass marks them as
14166    required if the corresponding code section is marked.  */
14167 
14168 static bfd_boolean
14169 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14170 				  elf_gc_mark_hook_fn gc_mark_hook)
14171 {
14172   bfd *sub;
14173   Elf_Internal_Shdr **elf_shdrp;
14174   bfd_boolean again;
14175 
14176   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14177 
14178   /* Marking EH data may cause additional code sections to be marked,
14179      requiring multiple passes.  */
14180   again = TRUE;
14181   while (again)
14182     {
14183       again = FALSE;
14184       for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14185 	{
14186 	  asection *o;
14187 
14188 	  if (! is_arm_elf (sub))
14189 	    continue;
14190 
14191 	  elf_shdrp = elf_elfsections (sub);
14192 	  for (o = sub->sections; o != NULL; o = o->next)
14193 	    {
14194 	      Elf_Internal_Shdr *hdr;
14195 
14196 	      hdr = &elf_section_data (o)->this_hdr;
14197 	      if (hdr->sh_type == SHT_ARM_EXIDX
14198 		  && hdr->sh_link
14199 		  && hdr->sh_link < elf_numsections (sub)
14200 		  && !o->gc_mark
14201 		  && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14202 		{
14203 		  again = TRUE;
14204 		  if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14205 		    return FALSE;
14206 		}
14207 	    }
14208 	}
14209     }
14210 
14211   return TRUE;
14212 }
14213 
14214 /* Treat mapping symbols as special target symbols.  */
14215 
14216 static bfd_boolean
14217 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14218 {
14219   return bfd_is_arm_special_symbol_name (sym->name,
14220 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14221 }
14222 
14223 /* This is a copy of elf_find_function() from elf.c except that
14224    ARM mapping symbols are ignored when looking for function names
14225    and STT_ARM_TFUNC is considered to a function type.  */
14226 
14227 static bfd_boolean
14228 arm_elf_find_function (bfd *         abfd ATTRIBUTE_UNUSED,
14229 		       asymbol **    symbols,
14230 		       asection *    section,
14231 		       bfd_vma       offset,
14232 		       const char ** filename_ptr,
14233 		       const char ** functionname_ptr)
14234 {
14235   const char * filename = NULL;
14236   asymbol * func = NULL;
14237   bfd_vma low_func = 0;
14238   asymbol ** p;
14239 
14240   for (p = symbols; *p != NULL; p++)
14241     {
14242       elf_symbol_type *q;
14243 
14244       q = (elf_symbol_type *) *p;
14245 
14246       switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14247 	{
14248 	default:
14249 	  break;
14250 	case STT_FILE:
14251 	  filename = bfd_asymbol_name (&q->symbol);
14252 	  break;
14253 	case STT_FUNC:
14254 	case STT_ARM_TFUNC:
14255 	case STT_NOTYPE:
14256 	  /* Skip mapping symbols.  */
14257 	  if ((q->symbol.flags & BSF_LOCAL)
14258 	      && bfd_is_arm_special_symbol_name (q->symbol.name,
14259 		    BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14260 	    continue;
14261 	  /* Fall through.  */
14262 	  if (bfd_get_section (&q->symbol) == section
14263 	      && q->symbol.value >= low_func
14264 	      && q->symbol.value <= offset)
14265 	    {
14266 	      func = (asymbol *) q;
14267 	      low_func = q->symbol.value;
14268 	    }
14269 	  break;
14270 	}
14271     }
14272 
14273   if (func == NULL)
14274     return FALSE;
14275 
14276   if (filename_ptr)
14277     *filename_ptr = filename;
14278   if (functionname_ptr)
14279     *functionname_ptr = bfd_asymbol_name (func);
14280 
14281   return TRUE;
14282 }
14283 
14284 
14285 /* Find the nearest line to a particular section and offset, for error
14286    reporting.   This code is a duplicate of the code in elf.c, except
14287    that it uses arm_elf_find_function.  */
14288 
14289 static bfd_boolean
14290 elf32_arm_find_nearest_line (bfd *          abfd,
14291 			     asymbol **     symbols,
14292 			     asection *     section,
14293 			     bfd_vma        offset,
14294 			     const char **  filename_ptr,
14295 			     const char **  functionname_ptr,
14296 			     unsigned int * line_ptr,
14297 			     unsigned int * discriminator_ptr)
14298 {
14299   bfd_boolean found = FALSE;
14300 
14301   if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14302 				     filename_ptr, functionname_ptr,
14303 				     line_ptr, discriminator_ptr,
14304 				     dwarf_debug_sections, 0,
14305 				     & elf_tdata (abfd)->dwarf2_find_line_info))
14306     {
14307       if (!*functionname_ptr)
14308 	arm_elf_find_function (abfd, symbols, section, offset,
14309 			       *filename_ptr ? NULL : filename_ptr,
14310 			       functionname_ptr);
14311 
14312       return TRUE;
14313     }
14314 
14315   /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14316      uses DWARF1.  */
14317 
14318   if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14319 					     & found, filename_ptr,
14320 					     functionname_ptr, line_ptr,
14321 					     & elf_tdata (abfd)->line_info))
14322     return FALSE;
14323 
14324   if (found && (*functionname_ptr || *line_ptr))
14325     return TRUE;
14326 
14327   if (symbols == NULL)
14328     return FALSE;
14329 
14330   if (! arm_elf_find_function (abfd, symbols, section, offset,
14331 			       filename_ptr, functionname_ptr))
14332     return FALSE;
14333 
14334   *line_ptr = 0;
14335   return TRUE;
14336 }
14337 
14338 static bfd_boolean
14339 elf32_arm_find_inliner_info (bfd *          abfd,
14340 			     const char **  filename_ptr,
14341 			     const char **  functionname_ptr,
14342 			     unsigned int * line_ptr)
14343 {
14344   bfd_boolean found;
14345   found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14346 					 functionname_ptr, line_ptr,
14347 					 & elf_tdata (abfd)->dwarf2_find_line_info);
14348   return found;
14349 }
14350 
14351 /* Adjust a symbol defined by a dynamic object and referenced by a
14352    regular object.  The current definition is in some section of the
14353    dynamic object, but we're not including those sections.  We have to
14354    change the definition to something the rest of the link can
14355    understand.  */
14356 
14357 static bfd_boolean
14358 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14359 				 struct elf_link_hash_entry * h)
14360 {
14361   bfd * dynobj;
14362   asection * s;
14363   struct elf32_arm_link_hash_entry * eh;
14364   struct elf32_arm_link_hash_table *globals;
14365 
14366   globals = elf32_arm_hash_table (info);
14367   if (globals == NULL)
14368     return FALSE;
14369 
14370   dynobj = elf_hash_table (info)->dynobj;
14371 
14372   /* Make sure we know what is going on here.  */
14373   BFD_ASSERT (dynobj != NULL
14374 	      && (h->needs_plt
14375 		  || h->type == STT_GNU_IFUNC
14376 		  || h->u.weakdef != NULL
14377 		  || (h->def_dynamic
14378 		      && h->ref_regular
14379 		      && !h->def_regular)));
14380 
14381   eh = (struct elf32_arm_link_hash_entry *) h;
14382 
14383   /* If this is a function, put it in the procedure linkage table.  We
14384      will fill in the contents of the procedure linkage table later,
14385      when we know the address of the .got section.  */
14386   if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14387     {
14388       /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14389 	 symbol binds locally.  */
14390       if (h->plt.refcount <= 0
14391 	  || (h->type != STT_GNU_IFUNC
14392 	      && (SYMBOL_CALLS_LOCAL (info, h)
14393 		  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14394 		      && h->root.type == bfd_link_hash_undefweak))))
14395 	{
14396 	  /* This case can occur if we saw a PLT32 reloc in an input
14397 	     file, but the symbol was never referred to by a dynamic
14398 	     object, or if all references were garbage collected.  In
14399 	     such a case, we don't actually need to build a procedure
14400 	     linkage table, and we can just do a PC24 reloc instead.  */
14401 	  h->plt.offset = (bfd_vma) -1;
14402 	  eh->plt.thumb_refcount = 0;
14403 	  eh->plt.maybe_thumb_refcount = 0;
14404 	  eh->plt.noncall_refcount = 0;
14405 	  h->needs_plt = 0;
14406 	}
14407 
14408       return TRUE;
14409     }
14410   else
14411     {
14412       /* It's possible that we incorrectly decided a .plt reloc was
14413 	 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14414 	 in check_relocs.  We can't decide accurately between function
14415 	 and non-function syms in check-relocs; Objects loaded later in
14416 	 the link may change h->type.  So fix it now.  */
14417       h->plt.offset = (bfd_vma) -1;
14418       eh->plt.thumb_refcount = 0;
14419       eh->plt.maybe_thumb_refcount = 0;
14420       eh->plt.noncall_refcount = 0;
14421     }
14422 
14423   /* If this is a weak symbol, and there is a real definition, the
14424      processor independent code will have arranged for us to see the
14425      real definition first, and we can just use the same value.  */
14426   if (h->u.weakdef != NULL)
14427     {
14428       BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14429 		  || h->u.weakdef->root.type == bfd_link_hash_defweak);
14430       h->root.u.def.section = h->u.weakdef->root.u.def.section;
14431       h->root.u.def.value = h->u.weakdef->root.u.def.value;
14432       return TRUE;
14433     }
14434 
14435   /* If there are no non-GOT references, we do not need a copy
14436      relocation.  */
14437   if (!h->non_got_ref)
14438     return TRUE;
14439 
14440   /* This is a reference to a symbol defined by a dynamic object which
14441      is not a function.  */
14442 
14443   /* If we are creating a shared library, we must presume that the
14444      only references to the symbol are via the global offset table.
14445      For such cases we need not do anything here; the relocations will
14446      be handled correctly by relocate_section.  Relocatable executables
14447      can reference data in shared objects directly, so we don't need to
14448      do anything here.  */
14449   if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14450     return TRUE;
14451 
14452   /* We must allocate the symbol in our .dynbss section, which will
14453      become part of the .bss section of the executable.  There will be
14454      an entry for this symbol in the .dynsym section.  The dynamic
14455      object will contain position independent code, so all references
14456      from the dynamic object to this symbol will go through the global
14457      offset table.  The dynamic linker will use the .dynsym entry to
14458      determine the address it must put in the global offset table, so
14459      both the dynamic object and the regular object will refer to the
14460      same memory location for the variable.  */
14461   s = bfd_get_linker_section (dynobj, ".dynbss");
14462   BFD_ASSERT (s != NULL);
14463 
14464   /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14465      linker to copy the initial value out of the dynamic object and into
14466      the runtime process image.  We need to remember the offset into the
14467      .rel(a).bss section we are going to use.  */
14468   if (info->nocopyreloc == 0
14469       && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14470       && h->size != 0)
14471     {
14472       asection *srel;
14473 
14474       srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14475       elf32_arm_allocate_dynrelocs (info, srel, 1);
14476       h->needs_copy = 1;
14477     }
14478 
14479   return _bfd_elf_adjust_dynamic_copy (info, h, s);
14480 }
14481 
14482 /* Allocate space in .plt, .got and associated reloc sections for
14483    dynamic relocs.  */
14484 
14485 static bfd_boolean
14486 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14487 {
14488   struct bfd_link_info *info;
14489   struct elf32_arm_link_hash_table *htab;
14490   struct elf32_arm_link_hash_entry *eh;
14491   struct elf_dyn_relocs *p;
14492 
14493   if (h->root.type == bfd_link_hash_indirect)
14494     return TRUE;
14495 
14496   eh = (struct elf32_arm_link_hash_entry *) h;
14497 
14498   info = (struct bfd_link_info *) inf;
14499   htab = elf32_arm_hash_table (info);
14500   if (htab == NULL)
14501     return FALSE;
14502 
14503   if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14504       && h->plt.refcount > 0)
14505     {
14506       /* Make sure this symbol is output as a dynamic symbol.
14507 	 Undefined weak syms won't yet be marked as dynamic.  */
14508       if (h->dynindx == -1
14509 	  && !h->forced_local)
14510 	{
14511 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
14512 	    return FALSE;
14513 	}
14514 
14515       /* If the call in the PLT entry binds locally, the associated
14516 	 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14517 	 the usual R_ARM_JUMP_SLOT.  Put it in the .iplt section rather
14518 	 than the .plt section.  */
14519       if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14520 	{
14521 	  eh->is_iplt = 1;
14522 	  if (eh->plt.noncall_refcount == 0
14523 	      && SYMBOL_REFERENCES_LOCAL (info, h))
14524 	    /* All non-call references can be resolved directly.
14525 	       This means that they can (and in some cases, must)
14526 	       resolve directly to the run-time target, rather than
14527 	       to the PLT.  That in turns means that any .got entry
14528 	       would be equal to the .igot.plt entry, so there's
14529 	       no point having both.  */
14530 	    h->got.refcount = 0;
14531 	}
14532 
14533       if (bfd_link_pic (info)
14534 	  || eh->is_iplt
14535 	  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14536 	{
14537 	  elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14538 
14539 	  /* If this symbol is not defined in a regular file, and we are
14540 	     not generating a shared library, then set the symbol to this
14541 	     location in the .plt.  This is required to make function
14542 	     pointers compare as equal between the normal executable and
14543 	     the shared library.  */
14544 	  if (! bfd_link_pic (info)
14545 	      && !h->def_regular)
14546 	    {
14547 	      h->root.u.def.section = htab->root.splt;
14548 	      h->root.u.def.value = h->plt.offset;
14549 
14550 	      /* Make sure the function is not marked as Thumb, in case
14551 		 it is the target of an ABS32 relocation, which will
14552 		 point to the PLT entry.  */
14553 	      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14554 	    }
14555 
14556 	  /* VxWorks executables have a second set of relocations for
14557 	     each PLT entry.  They go in a separate relocation section,
14558 	     which is processed by the kernel loader.  */
14559 	  if (htab->vxworks_p && !bfd_link_pic (info))
14560 	    {
14561 	      /* There is a relocation for the initial PLT entry:
14562 		 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
14563 	      if (h->plt.offset == htab->plt_header_size)
14564 		elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14565 
14566 	      /* There are two extra relocations for each subsequent
14567 		 PLT entry: an R_ARM_32 relocation for the GOT entry,
14568 		 and an R_ARM_32 relocation for the PLT entry.  */
14569 	      elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14570 	    }
14571 	}
14572       else
14573 	{
14574 	  h->plt.offset = (bfd_vma) -1;
14575 	  h->needs_plt = 0;
14576 	}
14577     }
14578   else
14579     {
14580       h->plt.offset = (bfd_vma) -1;
14581       h->needs_plt = 0;
14582     }
14583 
14584   eh = (struct elf32_arm_link_hash_entry *) h;
14585   eh->tlsdesc_got = (bfd_vma) -1;
14586 
14587   if (h->got.refcount > 0)
14588     {
14589       asection *s;
14590       bfd_boolean dyn;
14591       int tls_type = elf32_arm_hash_entry (h)->tls_type;
14592       int indx;
14593 
14594       /* Make sure this symbol is output as a dynamic symbol.
14595 	 Undefined weak syms won't yet be marked as dynamic.  */
14596       if (h->dynindx == -1
14597 	  && !h->forced_local)
14598 	{
14599 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
14600 	    return FALSE;
14601 	}
14602 
14603       if (!htab->symbian_p)
14604 	{
14605 	  s = htab->root.sgot;
14606 	  h->got.offset = s->size;
14607 
14608 	  if (tls_type == GOT_UNKNOWN)
14609 	    abort ();
14610 
14611 	  if (tls_type == GOT_NORMAL)
14612 	    /* Non-TLS symbols need one GOT slot.  */
14613 	    s->size += 4;
14614 	  else
14615 	    {
14616 	      if (tls_type & GOT_TLS_GDESC)
14617 		{
14618 		  /* R_ARM_TLS_DESC needs 2 GOT slots.  */
14619 		  eh->tlsdesc_got
14620 		    = (htab->root.sgotplt->size
14621 		       - elf32_arm_compute_jump_table_size (htab));
14622 		  htab->root.sgotplt->size += 8;
14623 		  h->got.offset = (bfd_vma) -2;
14624 		  /* plt.got_offset needs to know there's a TLS_DESC
14625 		     reloc in the middle of .got.plt.  */
14626 		  htab->num_tls_desc++;
14627 		}
14628 
14629 	      if (tls_type & GOT_TLS_GD)
14630 		{
14631 		  /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots.  If
14632 		     the symbol is both GD and GDESC, got.offset may
14633 		     have been overwritten.  */
14634 		  h->got.offset = s->size;
14635 		  s->size += 8;
14636 		}
14637 
14638 	      if (tls_type & GOT_TLS_IE)
14639 		/* R_ARM_TLS_IE32 needs one GOT slot.  */
14640 		s->size += 4;
14641 	    }
14642 
14643 	  dyn = htab->root.dynamic_sections_created;
14644 
14645 	  indx = 0;
14646 	  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14647 					       bfd_link_pic (info),
14648 					       h)
14649 	      && (!bfd_link_pic (info)
14650 		  || !SYMBOL_REFERENCES_LOCAL (info, h)))
14651 	    indx = h->dynindx;
14652 
14653 	  if (tls_type != GOT_NORMAL
14654 	      && (bfd_link_pic (info) || indx != 0)
14655 	      && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14656 		  || h->root.type != bfd_link_hash_undefweak))
14657 	    {
14658 	      if (tls_type & GOT_TLS_IE)
14659 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14660 
14661 	      if (tls_type & GOT_TLS_GD)
14662 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14663 
14664 	      if (tls_type & GOT_TLS_GDESC)
14665 		{
14666 		  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14667 		  /* GDESC needs a trampoline to jump to.  */
14668 		  htab->tls_trampoline = -1;
14669 		}
14670 
14671 	      /* Only GD needs it.  GDESC just emits one relocation per
14672 		 2 entries.  */
14673 	      if ((tls_type & GOT_TLS_GD) && indx != 0)
14674 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14675 	    }
14676 	  else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14677 	    {
14678 	      if (htab->root.dynamic_sections_created)
14679 		/* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation.  */
14680 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14681 	    }
14682 	  else if (h->type == STT_GNU_IFUNC
14683 		   && eh->plt.noncall_refcount == 0)
14684 	    /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14685 	       they all resolve dynamically instead.  Reserve room for the
14686 	       GOT entry's R_ARM_IRELATIVE relocation.  */
14687 	    elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14688 	  else if (bfd_link_pic (info)
14689 		   && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14690 		       || h->root.type != bfd_link_hash_undefweak))
14691 	    /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
14692 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14693 	}
14694     }
14695   else
14696     h->got.offset = (bfd_vma) -1;
14697 
14698   /* Allocate stubs for exported Thumb functions on v4t.  */
14699   if (!htab->use_blx && h->dynindx != -1
14700       && h->def_regular
14701       && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14702       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14703     {
14704       struct elf_link_hash_entry * th;
14705       struct bfd_link_hash_entry * bh;
14706       struct elf_link_hash_entry * myh;
14707       char name[1024];
14708       asection *s;
14709       bh = NULL;
14710       /* Create a new symbol to regist the real location of the function.  */
14711       s = h->root.u.def.section;
14712       sprintf (name, "__real_%s", h->root.root.string);
14713       _bfd_generic_link_add_one_symbol (info, s->owner,
14714 					name, BSF_GLOBAL, s,
14715 					h->root.u.def.value,
14716 					NULL, TRUE, FALSE, &bh);
14717 
14718       myh = (struct elf_link_hash_entry *) bh;
14719       myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14720       myh->forced_local = 1;
14721       ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14722       eh->export_glue = myh;
14723       th = record_arm_to_thumb_glue (info, h);
14724       /* Point the symbol at the stub.  */
14725       h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14726       ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14727       h->root.u.def.section = th->root.u.def.section;
14728       h->root.u.def.value = th->root.u.def.value & ~1;
14729     }
14730 
14731   if (eh->dyn_relocs == NULL)
14732     return TRUE;
14733 
14734   /* In the shared -Bsymbolic case, discard space allocated for
14735      dynamic pc-relative relocs against symbols which turn out to be
14736      defined in regular objects.  For the normal shared case, discard
14737      space for pc-relative relocs that have become local due to symbol
14738      visibility changes.  */
14739 
14740   if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14741     {
14742       /* Relocs that use pc_count are PC-relative forms, which will appear
14743 	 on something like ".long foo - ." or "movw REG, foo - .".  We want
14744 	 calls to protected symbols to resolve directly to the function
14745 	 rather than going via the plt.  If people want function pointer
14746 	 comparisons to work as expected then they should avoid writing
14747 	 assembly like ".long foo - .".  */
14748       if (SYMBOL_CALLS_LOCAL (info, h))
14749 	{
14750 	  struct elf_dyn_relocs **pp;
14751 
14752 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14753 	    {
14754 	      p->count -= p->pc_count;
14755 	      p->pc_count = 0;
14756 	      if (p->count == 0)
14757 		*pp = p->next;
14758 	      else
14759 		pp = &p->next;
14760 	    }
14761 	}
14762 
14763       if (htab->vxworks_p)
14764 	{
14765 	  struct elf_dyn_relocs **pp;
14766 
14767 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14768 	    {
14769 	      if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14770 		*pp = p->next;
14771 	      else
14772 		pp = &p->next;
14773 	    }
14774 	}
14775 
14776       /* Also discard relocs on undefined weak syms with non-default
14777 	 visibility.  */
14778       if (eh->dyn_relocs != NULL
14779 	  && h->root.type == bfd_link_hash_undefweak)
14780 	{
14781 	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14782 	    eh->dyn_relocs = NULL;
14783 
14784 	  /* Make sure undefined weak symbols are output as a dynamic
14785 	     symbol in PIEs.  */
14786 	  else if (h->dynindx == -1
14787 		   && !h->forced_local)
14788 	    {
14789 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
14790 		return FALSE;
14791 	    }
14792 	}
14793 
14794       else if (htab->root.is_relocatable_executable && h->dynindx == -1
14795 	       && h->root.type == bfd_link_hash_new)
14796 	{
14797 	  /* Output absolute symbols so that we can create relocations
14798 	     against them.  For normal symbols we output a relocation
14799 	     against the section that contains them.  */
14800 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
14801 	    return FALSE;
14802 	}
14803 
14804     }
14805   else
14806     {
14807       /* For the non-shared case, discard space for relocs against
14808 	 symbols which turn out to need copy relocs or are not
14809 	 dynamic.  */
14810 
14811       if (!h->non_got_ref
14812 	  && ((h->def_dynamic
14813 	       && !h->def_regular)
14814 	      || (htab->root.dynamic_sections_created
14815 		  && (h->root.type == bfd_link_hash_undefweak
14816 		      || h->root.type == bfd_link_hash_undefined))))
14817 	{
14818 	  /* Make sure this symbol is output as a dynamic symbol.
14819 	     Undefined weak syms won't yet be marked as dynamic.  */
14820 	  if (h->dynindx == -1
14821 	      && !h->forced_local)
14822 	    {
14823 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
14824 		return FALSE;
14825 	    }
14826 
14827 	  /* If that succeeded, we know we'll be keeping all the
14828 	     relocs.  */
14829 	  if (h->dynindx != -1)
14830 	    goto keep;
14831 	}
14832 
14833       eh->dyn_relocs = NULL;
14834 
14835     keep: ;
14836     }
14837 
14838   /* Finally, allocate space.  */
14839   for (p = eh->dyn_relocs; p != NULL; p = p->next)
14840     {
14841       asection *sreloc = elf_section_data (p->sec)->sreloc;
14842       if (h->type == STT_GNU_IFUNC
14843 	  && eh->plt.noncall_refcount == 0
14844 	  && SYMBOL_REFERENCES_LOCAL (info, h))
14845 	elf32_arm_allocate_irelocs (info, sreloc, p->count);
14846       else
14847 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14848     }
14849 
14850   return TRUE;
14851 }
14852 
14853 /* Find any dynamic relocs that apply to read-only sections.  */
14854 
14855 static bfd_boolean
14856 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14857 {
14858   struct elf32_arm_link_hash_entry * eh;
14859   struct elf_dyn_relocs * p;
14860 
14861   eh = (struct elf32_arm_link_hash_entry *) h;
14862   for (p = eh->dyn_relocs; p != NULL; p = p->next)
14863     {
14864       asection *s = p->sec;
14865 
14866       if (s != NULL && (s->flags & SEC_READONLY) != 0)
14867 	{
14868 	  struct bfd_link_info *info = (struct bfd_link_info *) inf;
14869 
14870 	  info->flags |= DF_TEXTREL;
14871 
14872 	  /* Not an error, just cut short the traversal.  */
14873 	  return FALSE;
14874 	}
14875     }
14876   return TRUE;
14877 }
14878 
14879 void
14880 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14881 				 int byteswap_code)
14882 {
14883   struct elf32_arm_link_hash_table *globals;
14884 
14885   globals = elf32_arm_hash_table (info);
14886   if (globals == NULL)
14887     return;
14888 
14889   globals->byteswap_code = byteswap_code;
14890 }
14891 
14892 /* Set the sizes of the dynamic sections.  */
14893 
14894 static bfd_boolean
14895 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14896 				 struct bfd_link_info * info)
14897 {
14898   bfd * dynobj;
14899   asection * s;
14900   bfd_boolean plt;
14901   bfd_boolean relocs;
14902   bfd *ibfd;
14903   struct elf32_arm_link_hash_table *htab;
14904 
14905   htab = elf32_arm_hash_table (info);
14906   if (htab == NULL)
14907     return FALSE;
14908 
14909   dynobj = elf_hash_table (info)->dynobj;
14910   BFD_ASSERT (dynobj != NULL);
14911   check_use_blx (htab);
14912 
14913   if (elf_hash_table (info)->dynamic_sections_created)
14914     {
14915       /* Set the contents of the .interp section to the interpreter.  */
14916       if (bfd_link_executable (info) && !info->nointerp)
14917 	{
14918 	  s = bfd_get_linker_section (dynobj, ".interp");
14919 	  BFD_ASSERT (s != NULL);
14920 	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14921 	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14922 	}
14923     }
14924 
14925   /* Set up .got offsets for local syms, and space for local dynamic
14926      relocs.  */
14927   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14928     {
14929       bfd_signed_vma *local_got;
14930       bfd_signed_vma *end_local_got;
14931       struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14932       char *local_tls_type;
14933       bfd_vma *local_tlsdesc_gotent;
14934       bfd_size_type locsymcount;
14935       Elf_Internal_Shdr *symtab_hdr;
14936       asection *srel;
14937       bfd_boolean is_vxworks = htab->vxworks_p;
14938       unsigned int symndx;
14939 
14940       if (! is_arm_elf (ibfd))
14941 	continue;
14942 
14943       for (s = ibfd->sections; s != NULL; s = s->next)
14944 	{
14945 	  struct elf_dyn_relocs *p;
14946 
14947 	  for (p = (struct elf_dyn_relocs *)
14948 		   elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14949 	    {
14950 	      if (!bfd_is_abs_section (p->sec)
14951 		  && bfd_is_abs_section (p->sec->output_section))
14952 		{
14953 		  /* Input section has been discarded, either because
14954 		     it is a copy of a linkonce section or due to
14955 		     linker script /DISCARD/, so we'll be discarding
14956 		     the relocs too.  */
14957 		}
14958 	      else if (is_vxworks
14959 		       && strcmp (p->sec->output_section->name,
14960 				  ".tls_vars") == 0)
14961 		{
14962 		  /* Relocations in vxworks .tls_vars sections are
14963 		     handled specially by the loader.  */
14964 		}
14965 	      else if (p->count != 0)
14966 		{
14967 		  srel = elf_section_data (p->sec)->sreloc;
14968 		  elf32_arm_allocate_dynrelocs (info, srel, p->count);
14969 		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14970 		    info->flags |= DF_TEXTREL;
14971 		}
14972 	    }
14973 	}
14974 
14975       local_got = elf_local_got_refcounts (ibfd);
14976       if (!local_got)
14977 	continue;
14978 
14979       symtab_hdr = & elf_symtab_hdr (ibfd);
14980       locsymcount = symtab_hdr->sh_info;
14981       end_local_got = local_got + locsymcount;
14982       local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14983       local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14984       local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14985       symndx = 0;
14986       s = htab->root.sgot;
14987       srel = htab->root.srelgot;
14988       for (; local_got < end_local_got;
14989 	   ++local_got, ++local_iplt_ptr, ++local_tls_type,
14990 	   ++local_tlsdesc_gotent, ++symndx)
14991 	{
14992 	  *local_tlsdesc_gotent = (bfd_vma) -1;
14993 	  local_iplt = *local_iplt_ptr;
14994 	  if (local_iplt != NULL)
14995 	    {
14996 	      struct elf_dyn_relocs *p;
14997 
14998 	      if (local_iplt->root.refcount > 0)
14999 		{
15000 		  elf32_arm_allocate_plt_entry (info, TRUE,
15001 						&local_iplt->root,
15002 						&local_iplt->arm);
15003 		  if (local_iplt->arm.noncall_refcount == 0)
15004 		    /* All references to the PLT are calls, so all
15005 		       non-call references can resolve directly to the
15006 		       run-time target.  This means that the .got entry
15007 		       would be the same as the .igot.plt entry, so there's
15008 		       no point creating both.  */
15009 		    *local_got = 0;
15010 		}
15011 	      else
15012 		{
15013 		  BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
15014 		  local_iplt->root.offset = (bfd_vma) -1;
15015 		}
15016 
15017 	      for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
15018 		{
15019 		  asection *psrel;
15020 
15021 		  psrel = elf_section_data (p->sec)->sreloc;
15022 		  if (local_iplt->arm.noncall_refcount == 0)
15023 		    elf32_arm_allocate_irelocs (info, psrel, p->count);
15024 		  else
15025 		    elf32_arm_allocate_dynrelocs (info, psrel, p->count);
15026 		}
15027 	    }
15028 	  if (*local_got > 0)
15029 	    {
15030 	      Elf_Internal_Sym *isym;
15031 
15032 	      *local_got = s->size;
15033 	      if (*local_tls_type & GOT_TLS_GD)
15034 		/* TLS_GD relocs need an 8-byte structure in the GOT.  */
15035 		s->size += 8;
15036 	      if (*local_tls_type & GOT_TLS_GDESC)
15037 		{
15038 		  *local_tlsdesc_gotent = htab->root.sgotplt->size
15039 		    - elf32_arm_compute_jump_table_size (htab);
15040 		  htab->root.sgotplt->size += 8;
15041 		  *local_got = (bfd_vma) -2;
15042 		  /* plt.got_offset needs to know there's a TLS_DESC
15043 		     reloc in the middle of .got.plt.  */
15044 		  htab->num_tls_desc++;
15045 		}
15046 	      if (*local_tls_type & GOT_TLS_IE)
15047 		s->size += 4;
15048 
15049 	      if (*local_tls_type & GOT_NORMAL)
15050 		{
15051 		  /* If the symbol is both GD and GDESC, *local_got
15052 		     may have been overwritten.  */
15053 		  *local_got = s->size;
15054 		  s->size += 4;
15055 		}
15056 
15057 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
15058 	      if (isym == NULL)
15059 		return FALSE;
15060 
15061 	      /* If all references to an STT_GNU_IFUNC PLT are calls,
15062 		 then all non-call references, including this GOT entry,
15063 		 resolve directly to the run-time target.  */
15064 	      if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
15065 		  && (local_iplt == NULL
15066 		      || local_iplt->arm.noncall_refcount == 0))
15067 		elf32_arm_allocate_irelocs (info, srel, 1);
15068 	      else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
15069 		{
15070 		  if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
15071 		      || *local_tls_type & GOT_TLS_GD)
15072 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
15073 
15074 		  if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
15075 		    {
15076 		      elf32_arm_allocate_dynrelocs (info,
15077 						    htab->root.srelplt, 1);
15078 		      htab->tls_trampoline = -1;
15079 		    }
15080 		}
15081 	    }
15082 	  else
15083 	    *local_got = (bfd_vma) -1;
15084 	}
15085     }
15086 
15087   if (htab->tls_ldm_got.refcount > 0)
15088     {
15089       /* Allocate two GOT entries and one dynamic relocation (if necessary)
15090 	 for R_ARM_TLS_LDM32 relocations.  */
15091       htab->tls_ldm_got.offset = htab->root.sgot->size;
15092       htab->root.sgot->size += 8;
15093       if (bfd_link_pic (info))
15094 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15095     }
15096   else
15097     htab->tls_ldm_got.offset = -1;
15098 
15099   /* Allocate global sym .plt and .got entries, and space for global
15100      sym dynamic relocs.  */
15101   elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15102 
15103   /* Here we rummage through the found bfds to collect glue information.  */
15104   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15105     {
15106       if (! is_arm_elf (ibfd))
15107 	continue;
15108 
15109       /* Initialise mapping tables for code/data.  */
15110       bfd_elf32_arm_init_maps (ibfd);
15111 
15112       if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15113 	  || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15114 	  || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15115 	/* xgettext:c-format */
15116 	_bfd_error_handler (_("Errors encountered processing file %s"),
15117 			    ibfd->filename);
15118     }
15119 
15120   /* Allocate space for the glue sections now that we've sized them.  */
15121   bfd_elf32_arm_allocate_interworking_sections (info);
15122 
15123   /* For every jump slot reserved in the sgotplt, reloc_count is
15124      incremented.  However, when we reserve space for TLS descriptors,
15125      it's not incremented, so in order to compute the space reserved
15126      for them, it suffices to multiply the reloc count by the jump
15127      slot size.  */
15128   if (htab->root.srelplt)
15129     htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15130 
15131   if (htab->tls_trampoline)
15132     {
15133       if (htab->root.splt->size == 0)
15134 	htab->root.splt->size += htab->plt_header_size;
15135 
15136       htab->tls_trampoline = htab->root.splt->size;
15137       htab->root.splt->size += htab->plt_entry_size;
15138 
15139       /* If we're not using lazy TLS relocations, don't generate the
15140 	 PLT and GOT entries they require.  */
15141       if (!(info->flags & DF_BIND_NOW))
15142 	{
15143 	  htab->dt_tlsdesc_got = htab->root.sgot->size;
15144 	  htab->root.sgot->size += 4;
15145 
15146 	  htab->dt_tlsdesc_plt = htab->root.splt->size;
15147 	  htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15148 	}
15149     }
15150 
15151   /* The check_relocs and adjust_dynamic_symbol entry points have
15152      determined the sizes of the various dynamic sections.  Allocate
15153      memory for them.  */
15154   plt = FALSE;
15155   relocs = FALSE;
15156   for (s = dynobj->sections; s != NULL; s = s->next)
15157     {
15158       const char * name;
15159 
15160       if ((s->flags & SEC_LINKER_CREATED) == 0)
15161 	continue;
15162 
15163       /* It's OK to base decisions on the section name, because none
15164 	 of the dynobj section names depend upon the input files.  */
15165       name = bfd_get_section_name (dynobj, s);
15166 
15167       if (s == htab->root.splt)
15168 	{
15169 	  /* Remember whether there is a PLT.  */
15170 	  plt = s->size != 0;
15171 	}
15172       else if (CONST_STRNEQ (name, ".rel"))
15173 	{
15174 	  if (s->size != 0)
15175 	    {
15176 	      /* Remember whether there are any reloc sections other
15177 		 than .rel(a).plt and .rela.plt.unloaded.  */
15178 	      if (s != htab->root.srelplt && s != htab->srelplt2)
15179 		relocs = TRUE;
15180 
15181 	      /* We use the reloc_count field as a counter if we need
15182 		 to copy relocs into the output file.  */
15183 	      s->reloc_count = 0;
15184 	    }
15185 	}
15186       else if (s != htab->root.sgot
15187 	       && s != htab->root.sgotplt
15188 	       && s != htab->root.iplt
15189 	       && s != htab->root.igotplt
15190 	       && s != htab->sdynbss)
15191 	{
15192 	  /* It's not one of our sections, so don't allocate space.  */
15193 	  continue;
15194 	}
15195 
15196       if (s->size == 0)
15197 	{
15198 	  /* If we don't need this section, strip it from the
15199 	     output file.  This is mostly to handle .rel(a).bss and
15200 	     .rel(a).plt.  We must create both sections in
15201 	     create_dynamic_sections, because they must be created
15202 	     before the linker maps input sections to output
15203 	     sections.  The linker does that before
15204 	     adjust_dynamic_symbol is called, and it is that
15205 	     function which decides whether anything needs to go
15206 	     into these sections.  */
15207 	  s->flags |= SEC_EXCLUDE;
15208 	  continue;
15209 	}
15210 
15211       if ((s->flags & SEC_HAS_CONTENTS) == 0)
15212 	continue;
15213 
15214       /* Allocate memory for the section contents.  */
15215       s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15216       if (s->contents == NULL)
15217 	return FALSE;
15218     }
15219 
15220   if (elf_hash_table (info)->dynamic_sections_created)
15221     {
15222       /* Add some entries to the .dynamic section.  We fill in the
15223 	 values later, in elf32_arm_finish_dynamic_sections, but we
15224 	 must add the entries now so that we get the correct size for
15225 	 the .dynamic section.  The DT_DEBUG entry is filled in by the
15226 	 dynamic linker and used by the debugger.  */
15227 #define add_dynamic_entry(TAG, VAL) \
15228   _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15229 
15230      if (bfd_link_executable (info))
15231 	{
15232 	  if (!add_dynamic_entry (DT_DEBUG, 0))
15233 	    return FALSE;
15234 	}
15235 
15236       if (plt)
15237 	{
15238 	  if (   !add_dynamic_entry (DT_PLTGOT, 0)
15239 	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
15240 	      || !add_dynamic_entry (DT_PLTREL,
15241 				     htab->use_rel ? DT_REL : DT_RELA)
15242 	      || !add_dynamic_entry (DT_JMPREL, 0))
15243 	    return FALSE;
15244 
15245 	  if (htab->dt_tlsdesc_plt &&
15246 		(!add_dynamic_entry (DT_TLSDESC_PLT,0)
15247 		 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15248 	    return FALSE;
15249 	}
15250 
15251       if (relocs)
15252 	{
15253 	  if (htab->use_rel)
15254 	    {
15255 	      if (!add_dynamic_entry (DT_REL, 0)
15256 		  || !add_dynamic_entry (DT_RELSZ, 0)
15257 		  || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15258 		return FALSE;
15259 	    }
15260 	  else
15261 	    {
15262 	      if (!add_dynamic_entry (DT_RELA, 0)
15263 		  || !add_dynamic_entry (DT_RELASZ, 0)
15264 		  || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15265 		return FALSE;
15266 	    }
15267 	}
15268 
15269       /* If any dynamic relocs apply to a read-only section,
15270 	 then we need a DT_TEXTREL entry.  */
15271       if ((info->flags & DF_TEXTREL) == 0)
15272 	elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15273 				info);
15274 
15275       if ((info->flags & DF_TEXTREL) != 0)
15276 	{
15277 	  if (!add_dynamic_entry (DT_TEXTREL, 0))
15278 	    return FALSE;
15279 	}
15280       if (htab->vxworks_p
15281 	  && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15282 	return FALSE;
15283     }
15284 #undef add_dynamic_entry
15285 
15286   return TRUE;
15287 }
15288 
15289 /* Size sections even though they're not dynamic.  We use it to setup
15290    _TLS_MODULE_BASE_, if needed.  */
15291 
15292 static bfd_boolean
15293 elf32_arm_always_size_sections (bfd *output_bfd,
15294 				struct bfd_link_info *info)
15295 {
15296   asection *tls_sec;
15297 
15298   if (bfd_link_relocatable (info))
15299     return TRUE;
15300 
15301   tls_sec = elf_hash_table (info)->tls_sec;
15302 
15303   if (tls_sec)
15304     {
15305       struct elf_link_hash_entry *tlsbase;
15306 
15307       tlsbase = elf_link_hash_lookup
15308 	(elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15309 
15310       if (tlsbase)
15311 	{
15312 	  struct bfd_link_hash_entry *bh = NULL;
15313 	  const struct elf_backend_data *bed
15314 	    = get_elf_backend_data (output_bfd);
15315 
15316 	  if (!(_bfd_generic_link_add_one_symbol
15317 		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15318 		 tls_sec, 0, NULL, FALSE,
15319 		 bed->collect, &bh)))
15320 	    return FALSE;
15321 
15322 	  tlsbase->type = STT_TLS;
15323 	  tlsbase = (struct elf_link_hash_entry *)bh;
15324 	  tlsbase->def_regular = 1;
15325 	  tlsbase->other = STV_HIDDEN;
15326 	  (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15327 	}
15328     }
15329   return TRUE;
15330 }
15331 
15332 /* Finish up dynamic symbol handling.  We set the contents of various
15333    dynamic sections here.  */
15334 
15335 static bfd_boolean
15336 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15337 				 struct bfd_link_info * info,
15338 				 struct elf_link_hash_entry * h,
15339 				 Elf_Internal_Sym * sym)
15340 {
15341   struct elf32_arm_link_hash_table *htab;
15342   struct elf32_arm_link_hash_entry *eh;
15343 
15344   htab = elf32_arm_hash_table (info);
15345   if (htab == NULL)
15346     return FALSE;
15347 
15348   eh = (struct elf32_arm_link_hash_entry *) h;
15349 
15350   if (h->plt.offset != (bfd_vma) -1)
15351     {
15352       if (!eh->is_iplt)
15353 	{
15354 	  BFD_ASSERT (h->dynindx != -1);
15355 	  if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15356 					      h->dynindx, 0))
15357 	    return FALSE;
15358 	}
15359 
15360       if (!h->def_regular)
15361 	{
15362 	  /* Mark the symbol as undefined, rather than as defined in
15363 	     the .plt section.  */
15364 	  sym->st_shndx = SHN_UNDEF;
15365 	  /* If the symbol is weak we need to clear the value.
15366 	     Otherwise, the PLT entry would provide a definition for
15367 	     the symbol even if the symbol wasn't defined anywhere,
15368 	     and so the symbol would never be NULL.  Leave the value if
15369 	     there were any relocations where pointer equality matters
15370 	     (this is a clue for the dynamic linker, to make function
15371 	     pointer comparisons work between an application and shared
15372 	     library).  */
15373 	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15374 	    sym->st_value = 0;
15375 	}
15376       else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15377 	{
15378 	  /* At least one non-call relocation references this .iplt entry,
15379 	     so the .iplt entry is the function's canonical address.  */
15380 	  sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15381 	  ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15382 	  sym->st_shndx = (_bfd_elf_section_from_bfd_section
15383 			   (output_bfd, htab->root.iplt->output_section));
15384 	  sym->st_value = (h->plt.offset
15385 			   + htab->root.iplt->output_section->vma
15386 			   + htab->root.iplt->output_offset);
15387 	}
15388     }
15389 
15390   if (h->needs_copy)
15391     {
15392       asection * s;
15393       Elf_Internal_Rela rel;
15394 
15395       /* This symbol needs a copy reloc.  Set it up.  */
15396       BFD_ASSERT (h->dynindx != -1
15397 		  && (h->root.type == bfd_link_hash_defined
15398 		      || h->root.type == bfd_link_hash_defweak));
15399 
15400       s = htab->srelbss;
15401       BFD_ASSERT (s != NULL);
15402 
15403       rel.r_addend = 0;
15404       rel.r_offset = (h->root.u.def.value
15405 		      + h->root.u.def.section->output_section->vma
15406 		      + h->root.u.def.section->output_offset);
15407       rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15408       elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15409     }
15410 
15411   /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  On VxWorks,
15412      the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15413      to the ".got" section.  */
15414   if (h == htab->root.hdynamic
15415       || (!htab->vxworks_p && h == htab->root.hgot))
15416     sym->st_shndx = SHN_ABS;
15417 
15418   return TRUE;
15419 }
15420 
15421 static void
15422 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15423 		    void *contents,
15424 		    const unsigned long *template, unsigned count)
15425 {
15426   unsigned ix;
15427 
15428   for (ix = 0; ix != count; ix++)
15429     {
15430       unsigned long insn = template[ix];
15431 
15432       /* Emit mov pc,rx if bx is not permitted.  */
15433       if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15434 	insn = (insn & 0xf000000f) | 0x01a0f000;
15435       put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15436     }
15437 }
15438 
15439 /* Install the special first PLT entry for elf32-arm-nacl.  Unlike
15440    other variants, NaCl needs this entry in a static executable's
15441    .iplt too.  When we're handling that case, GOT_DISPLACEMENT is
15442    zero.  For .iplt really only the last bundle is useful, and .iplt
15443    could have a shorter first entry, with each individual PLT entry's
15444    relative branch calculated differently so it targets the last
15445    bundle instead of the instruction before it (labelled .Lplt_tail
15446    above).  But it's simpler to keep the size and layout of PLT0
15447    consistent with the dynamic case, at the cost of some dead code at
15448    the start of .iplt and the one dead store to the stack at the start
15449    of .Lplt_tail.  */
15450 static void
15451 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15452 		   asection *plt, bfd_vma got_displacement)
15453 {
15454   unsigned int i;
15455 
15456   put_arm_insn (htab, output_bfd,
15457 		elf32_arm_nacl_plt0_entry[0]
15458 		| arm_movw_immediate (got_displacement),
15459 		plt->contents + 0);
15460   put_arm_insn (htab, output_bfd,
15461 		elf32_arm_nacl_plt0_entry[1]
15462 		| arm_movt_immediate (got_displacement),
15463 		plt->contents + 4);
15464 
15465   for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15466     put_arm_insn (htab, output_bfd,
15467 		  elf32_arm_nacl_plt0_entry[i],
15468 		  plt->contents + (i * 4));
15469 }
15470 
15471 /* Finish up the dynamic sections.  */
15472 
15473 static bfd_boolean
15474 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15475 {
15476   bfd * dynobj;
15477   asection * sgot;
15478   asection * sdyn;
15479   struct elf32_arm_link_hash_table *htab;
15480 
15481   htab = elf32_arm_hash_table (info);
15482   if (htab == NULL)
15483     return FALSE;
15484 
15485   dynobj = elf_hash_table (info)->dynobj;
15486 
15487   sgot = htab->root.sgotplt;
15488   /* A broken linker script might have discarded the dynamic sections.
15489      Catch this here so that we do not seg-fault later on.  */
15490   if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15491     return FALSE;
15492   sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15493 
15494   if (elf_hash_table (info)->dynamic_sections_created)
15495     {
15496       asection *splt;
15497       Elf32_External_Dyn *dyncon, *dynconend;
15498 
15499       splt = htab->root.splt;
15500       BFD_ASSERT (splt != NULL && sdyn != NULL);
15501       BFD_ASSERT (htab->symbian_p || sgot != NULL);
15502 
15503       dyncon = (Elf32_External_Dyn *) sdyn->contents;
15504       dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15505 
15506       for (; dyncon < dynconend; dyncon++)
15507 	{
15508 	  Elf_Internal_Dyn dyn;
15509 	  const char * name;
15510 	  asection * s;
15511 
15512 	  bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15513 
15514 	  switch (dyn.d_tag)
15515 	    {
15516 	      unsigned int type;
15517 
15518 	    default:
15519 	      if (htab->vxworks_p
15520 		  && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15521 		bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15522 	      break;
15523 
15524 	    case DT_HASH:
15525 	      name = ".hash";
15526 	      goto get_vma_if_bpabi;
15527 	    case DT_STRTAB:
15528 	      name = ".dynstr";
15529 	      goto get_vma_if_bpabi;
15530 	    case DT_SYMTAB:
15531 	      name = ".dynsym";
15532 	      goto get_vma_if_bpabi;
15533 	    case DT_VERSYM:
15534 	      name = ".gnu.version";
15535 	      goto get_vma_if_bpabi;
15536 	    case DT_VERDEF:
15537 	      name = ".gnu.version_d";
15538 	      goto get_vma_if_bpabi;
15539 	    case DT_VERNEED:
15540 	      name = ".gnu.version_r";
15541 	      goto get_vma_if_bpabi;
15542 
15543 	    case DT_PLTGOT:
15544 	      name = htab->symbian_p ? ".got" : ".got.plt";
15545 	      goto get_vma;
15546 	    case DT_JMPREL:
15547 	      name = RELOC_SECTION (htab, ".plt");
15548 	    get_vma:
15549 	      s = bfd_get_linker_section (dynobj, name);
15550 	      if (s == NULL)
15551 		{
15552 		  (*_bfd_error_handler)
15553 		    (_("could not find section %s"), name);
15554 		  bfd_set_error (bfd_error_invalid_operation);
15555 		  return FALSE;
15556 		}
15557 	      if (!htab->symbian_p)
15558 		dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15559 	      else
15560 		/* In the BPABI, tags in the PT_DYNAMIC section point
15561 		   at the file offset, not the memory address, for the
15562 		   convenience of the post linker.  */
15563 		dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15564 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15565 	      break;
15566 
15567 	    get_vma_if_bpabi:
15568 	      if (htab->symbian_p)
15569 		goto get_vma;
15570 	      break;
15571 
15572 	    case DT_PLTRELSZ:
15573 	      s = htab->root.srelplt;
15574 	      BFD_ASSERT (s != NULL);
15575 	      dyn.d_un.d_val = s->size;
15576 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15577 	      break;
15578 
15579 	    case DT_RELSZ:
15580 	    case DT_RELASZ:
15581 	      if (!htab->symbian_p)
15582 		{
15583 		  /* My reading of the SVR4 ABI indicates that the
15584 		     procedure linkage table relocs (DT_JMPREL) should be
15585 		     included in the overall relocs (DT_REL).  This is
15586 		     what Solaris does.  However, UnixWare can not handle
15587 		     that case.  Therefore, we override the DT_RELSZ entry
15588 		     here to make it not include the JMPREL relocs.  Since
15589 		     the linker script arranges for .rel(a).plt to follow all
15590 		     other relocation sections, we don't have to worry
15591 		     about changing the DT_REL entry.  */
15592 		  s = htab->root.srelplt;
15593 		  if (s != NULL)
15594 		    dyn.d_un.d_val -= s->size;
15595 		  bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15596 		  break;
15597 		}
15598 	      /* Fall through.  */
15599 
15600 	    case DT_REL:
15601 	    case DT_RELA:
15602 	      /* In the BPABI, the DT_REL tag must point at the file
15603 		 offset, not the VMA, of the first relocation
15604 		 section.  So, we use code similar to that in
15605 		 elflink.c, but do not check for SHF_ALLOC on the
15606 		 relcoation section, since relocations sections are
15607 		 never allocated under the BPABI.  The comments above
15608 		 about Unixware notwithstanding, we include all of the
15609 		 relocations here.  */
15610 	      if (htab->symbian_p)
15611 		{
15612 		  unsigned int i;
15613 		  type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15614 			  ? SHT_REL : SHT_RELA);
15615 		  dyn.d_un.d_val = 0;
15616 		  for (i = 1; i < elf_numsections (output_bfd); i++)
15617 		    {
15618 		      Elf_Internal_Shdr *hdr
15619 			= elf_elfsections (output_bfd)[i];
15620 		      if (hdr->sh_type == type)
15621 			{
15622 			  if (dyn.d_tag == DT_RELSZ
15623 			      || dyn.d_tag == DT_RELASZ)
15624 			    dyn.d_un.d_val += hdr->sh_size;
15625 			  else if ((ufile_ptr) hdr->sh_offset
15626 				   <= dyn.d_un.d_val - 1)
15627 			    dyn.d_un.d_val = hdr->sh_offset;
15628 			}
15629 		    }
15630 		  bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15631 		}
15632 	      break;
15633 
15634 	    case DT_TLSDESC_PLT:
15635 	      s = htab->root.splt;
15636 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15637 				+ htab->dt_tlsdesc_plt);
15638 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15639 	      break;
15640 
15641 	    case DT_TLSDESC_GOT:
15642 	      s = htab->root.sgot;
15643 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15644 				+ htab->dt_tlsdesc_got);
15645 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15646 	      break;
15647 
15648 	      /* Set the bottom bit of DT_INIT/FINI if the
15649 		 corresponding function is Thumb.  */
15650 	    case DT_INIT:
15651 	      name = info->init_function;
15652 	      goto get_sym;
15653 	    case DT_FINI:
15654 	      name = info->fini_function;
15655 	    get_sym:
15656 	      /* If it wasn't set by elf_bfd_final_link
15657 		 then there is nothing to adjust.  */
15658 	      if (dyn.d_un.d_val != 0)
15659 		{
15660 		  struct elf_link_hash_entry * eh;
15661 
15662 		  eh = elf_link_hash_lookup (elf_hash_table (info), name,
15663 					     FALSE, FALSE, TRUE);
15664 		  if (eh != NULL
15665 		      && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15666 			 == ST_BRANCH_TO_THUMB)
15667 		    {
15668 		      dyn.d_un.d_val |= 1;
15669 		      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15670 		    }
15671 		}
15672 	      break;
15673 	    }
15674 	}
15675 
15676       /* Fill in the first entry in the procedure linkage table.  */
15677       if (splt->size > 0 && htab->plt_header_size)
15678 	{
15679 	  const bfd_vma *plt0_entry;
15680 	  bfd_vma got_address, plt_address, got_displacement;
15681 
15682 	  /* Calculate the addresses of the GOT and PLT.  */
15683 	  got_address = sgot->output_section->vma + sgot->output_offset;
15684 	  plt_address = splt->output_section->vma + splt->output_offset;
15685 
15686 	  if (htab->vxworks_p)
15687 	    {
15688 	      /* The VxWorks GOT is relocated by the dynamic linker.
15689 		 Therefore, we must emit relocations rather than simply
15690 		 computing the values now.  */
15691 	      Elf_Internal_Rela rel;
15692 
15693 	      plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15694 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
15695 			    splt->contents + 0);
15696 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
15697 			    splt->contents + 4);
15698 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
15699 			    splt->contents + 8);
15700 	      bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15701 
15702 	      /* Generate a relocation for _GLOBAL_OFFSET_TABLE_.  */
15703 	      rel.r_offset = plt_address + 12;
15704 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15705 	      rel.r_addend = 0;
15706 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15707 				     htab->srelplt2->contents);
15708 	    }
15709 	  else if (htab->nacl_p)
15710 	    arm_nacl_put_plt0 (htab, output_bfd, splt,
15711 			       got_address + 8 - (plt_address + 16));
15712 	  else if (using_thumb_only (htab))
15713 	    {
15714 	      got_displacement = got_address - (plt_address + 12);
15715 
15716 	      plt0_entry = elf32_thumb2_plt0_entry;
15717 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
15718 			    splt->contents + 0);
15719 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
15720 			    splt->contents + 4);
15721 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
15722 			    splt->contents + 8);
15723 
15724 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15725 	    }
15726 	  else
15727 	    {
15728 	      got_displacement = got_address - (plt_address + 16);
15729 
15730 	      plt0_entry = elf32_arm_plt0_entry;
15731 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
15732 			    splt->contents + 0);
15733 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
15734 			    splt->contents + 4);
15735 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
15736 			    splt->contents + 8);
15737 	      put_arm_insn (htab, output_bfd, plt0_entry[3],
15738 			    splt->contents + 12);
15739 
15740 #ifdef FOUR_WORD_PLT
15741 	      /* The displacement value goes in the otherwise-unused
15742 		 last word of the second entry.  */
15743 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15744 #else
15745 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15746 #endif
15747 	    }
15748 	}
15749 
15750       /* UnixWare sets the entsize of .plt to 4, although that doesn't
15751 	 really seem like the right value.  */
15752       if (splt->output_section->owner == output_bfd)
15753 	elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15754 
15755       if (htab->dt_tlsdesc_plt)
15756 	{
15757 	  bfd_vma got_address
15758 	    = sgot->output_section->vma + sgot->output_offset;
15759 	  bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15760 				    + htab->root.sgot->output_offset);
15761 	  bfd_vma plt_address
15762 	    = splt->output_section->vma + splt->output_offset;
15763 
15764 	  arm_put_trampoline (htab, output_bfd,
15765 			      splt->contents + htab->dt_tlsdesc_plt,
15766 			      dl_tlsdesc_lazy_trampoline, 6);
15767 
15768 	  bfd_put_32 (output_bfd,
15769 		      gotplt_address + htab->dt_tlsdesc_got
15770 		      - (plt_address + htab->dt_tlsdesc_plt)
15771 		      - dl_tlsdesc_lazy_trampoline[6],
15772 		      splt->contents + htab->dt_tlsdesc_plt + 24);
15773 	  bfd_put_32 (output_bfd,
15774 		      got_address - (plt_address + htab->dt_tlsdesc_plt)
15775 		      - dl_tlsdesc_lazy_trampoline[7],
15776 		      splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15777 	}
15778 
15779       if (htab->tls_trampoline)
15780 	{
15781 	  arm_put_trampoline (htab, output_bfd,
15782 			      splt->contents + htab->tls_trampoline,
15783 			      tls_trampoline, 3);
15784 #ifdef FOUR_WORD_PLT
15785 	  bfd_put_32 (output_bfd, 0x00000000,
15786 		      splt->contents + htab->tls_trampoline + 12);
15787 #endif
15788 	}
15789 
15790       if (htab->vxworks_p
15791 	  && !bfd_link_pic (info)
15792 	  && htab->root.splt->size > 0)
15793 	{
15794 	  /* Correct the .rel(a).plt.unloaded relocations.  They will have
15795 	     incorrect symbol indexes.  */
15796 	  int num_plts;
15797 	  unsigned char *p;
15798 
15799 	  num_plts = ((htab->root.splt->size - htab->plt_header_size)
15800 		      / htab->plt_entry_size);
15801 	  p = htab->srelplt2->contents + RELOC_SIZE (htab);
15802 
15803 	  for (; num_plts; num_plts--)
15804 	    {
15805 	      Elf_Internal_Rela rel;
15806 
15807 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15808 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15809 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15810 	      p += RELOC_SIZE (htab);
15811 
15812 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15813 	      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15814 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15815 	      p += RELOC_SIZE (htab);
15816 	    }
15817 	}
15818     }
15819 
15820   if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15821     /* NaCl uses a special first entry in .iplt too.  */
15822     arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15823 
15824   /* Fill in the first three entries in the global offset table.  */
15825   if (sgot)
15826     {
15827       if (sgot->size > 0)
15828 	{
15829 	  if (sdyn == NULL)
15830 	    bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15831 	  else
15832 	    bfd_put_32 (output_bfd,
15833 			sdyn->output_section->vma + sdyn->output_offset,
15834 			sgot->contents);
15835 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15836 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15837 	}
15838 
15839       elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15840     }
15841 
15842   return TRUE;
15843 }
15844 
15845 static void
15846 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15847 {
15848   Elf_Internal_Ehdr * i_ehdrp;	/* ELF file header, internal form.  */
15849   struct elf32_arm_link_hash_table *globals;
15850   struct elf_segment_map *m;
15851 
15852   i_ehdrp = elf_elfheader (abfd);
15853 
15854   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15855     i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15856   else
15857     _bfd_elf_post_process_headers (abfd, link_info);
15858   i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15859 
15860   if (link_info)
15861     {
15862       globals = elf32_arm_hash_table (link_info);
15863       if (globals != NULL && globals->byteswap_code)
15864 	i_ehdrp->e_flags |= EF_ARM_BE8;
15865     }
15866 
15867   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15868       && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15869     {
15870       int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15871       if (abi == AEABI_VFP_args_vfp)
15872 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15873       else
15874 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15875     }
15876 
15877   /* Scan segment to set p_flags attribute if it contains only sections with
15878      SHF_ARM_PURECODE flag.  */
15879   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15880     {
15881       unsigned int j;
15882 
15883       if (m->count == 0)
15884 	continue;
15885       for (j = 0; j < m->count; j++)
15886 	{
15887 	  if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
15888 	    break;
15889 	}
15890       if (j == m->count)
15891 	{
15892 	  m->p_flags = PF_X;
15893 	  m->p_flags_valid = 1;
15894 	}
15895     }
15896 }
15897 
15898 static enum elf_reloc_type_class
15899 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15900 			    const asection *rel_sec ATTRIBUTE_UNUSED,
15901 			    const Elf_Internal_Rela *rela)
15902 {
15903   switch ((int) ELF32_R_TYPE (rela->r_info))
15904     {
15905     case R_ARM_RELATIVE:
15906       return reloc_class_relative;
15907     case R_ARM_JUMP_SLOT:
15908       return reloc_class_plt;
15909     case R_ARM_COPY:
15910       return reloc_class_copy;
15911     case R_ARM_IRELATIVE:
15912       return reloc_class_ifunc;
15913     default:
15914       return reloc_class_normal;
15915     }
15916 }
15917 
15918 static void
15919 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15920 {
15921   bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15922 }
15923 
15924 /* Return TRUE if this is an unwinding table entry.  */
15925 
15926 static bfd_boolean
15927 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15928 {
15929   return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15930 	  || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15931 }
15932 
15933 
15934 /* Set the type and flags for an ARM section.  We do this by
15935    the section name, which is a hack, but ought to work.  */
15936 
15937 static bfd_boolean
15938 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15939 {
15940   const char * name;
15941 
15942   name = bfd_get_section_name (abfd, sec);
15943 
15944   if (is_arm_elf_unwind_section_name (abfd, name))
15945     {
15946       hdr->sh_type = SHT_ARM_EXIDX;
15947       hdr->sh_flags |= SHF_LINK_ORDER;
15948     }
15949 
15950   if (sec->flags & SEC_ELF_PURECODE)
15951     hdr->sh_flags |= SHF_ARM_PURECODE;
15952 
15953   return TRUE;
15954 }
15955 
15956 /* Handle an ARM specific section when reading an object file.  This is
15957    called when bfd_section_from_shdr finds a section with an unknown
15958    type.  */
15959 
15960 static bfd_boolean
15961 elf32_arm_section_from_shdr (bfd *abfd,
15962 			     Elf_Internal_Shdr * hdr,
15963 			     const char *name,
15964 			     int shindex)
15965 {
15966   /* There ought to be a place to keep ELF backend specific flags, but
15967      at the moment there isn't one.  We just keep track of the
15968      sections by their name, instead.  Fortunately, the ABI gives
15969      names for all the ARM specific sections, so we will probably get
15970      away with this.  */
15971   switch (hdr->sh_type)
15972     {
15973     case SHT_ARM_EXIDX:
15974     case SHT_ARM_PREEMPTMAP:
15975     case SHT_ARM_ATTRIBUTES:
15976       break;
15977 
15978     default:
15979       return FALSE;
15980     }
15981 
15982   if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15983     return FALSE;
15984 
15985   return TRUE;
15986 }
15987 
15988 static _arm_elf_section_data *
15989 get_arm_elf_section_data (asection * sec)
15990 {
15991   if (sec && sec->owner && is_arm_elf (sec->owner))
15992     return elf32_arm_section_data (sec);
15993   else
15994     return NULL;
15995 }
15996 
15997 typedef struct
15998 {
15999   void *flaginfo;
16000   struct bfd_link_info *info;
16001   asection *sec;
16002   int sec_shndx;
16003   int (*func) (void *, const char *, Elf_Internal_Sym *,
16004 	       asection *, struct elf_link_hash_entry *);
16005 } output_arch_syminfo;
16006 
16007 enum map_symbol_type
16008 {
16009   ARM_MAP_ARM,
16010   ARM_MAP_THUMB,
16011   ARM_MAP_DATA
16012 };
16013 
16014 
16015 /* Output a single mapping symbol.  */
16016 
16017 static bfd_boolean
16018 elf32_arm_output_map_sym (output_arch_syminfo *osi,
16019 			  enum map_symbol_type type,
16020 			  bfd_vma offset)
16021 {
16022   static const char *names[3] = {"$a", "$t", "$d"};
16023   Elf_Internal_Sym sym;
16024 
16025   sym.st_value = osi->sec->output_section->vma
16026 		 + osi->sec->output_offset
16027 		 + offset;
16028   sym.st_size = 0;
16029   sym.st_other = 0;
16030   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
16031   sym.st_shndx = osi->sec_shndx;
16032   sym.st_target_internal = 0;
16033   elf32_arm_section_map_add (osi->sec, names[type][1], offset);
16034   return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
16035 }
16036 
16037 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
16038    IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt.  */
16039 
16040 static bfd_boolean
16041 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
16042 			    bfd_boolean is_iplt_entry_p,
16043 			    union gotplt_union *root_plt,
16044 			    struct arm_plt_info *arm_plt)
16045 {
16046   struct elf32_arm_link_hash_table *htab;
16047   bfd_vma addr, plt_header_size;
16048 
16049   if (root_plt->offset == (bfd_vma) -1)
16050     return TRUE;
16051 
16052   htab = elf32_arm_hash_table (osi->info);
16053   if (htab == NULL)
16054     return FALSE;
16055 
16056   if (is_iplt_entry_p)
16057     {
16058       osi->sec = htab->root.iplt;
16059       plt_header_size = 0;
16060     }
16061   else
16062     {
16063       osi->sec = htab->root.splt;
16064       plt_header_size = htab->plt_header_size;
16065     }
16066   osi->sec_shndx = (_bfd_elf_section_from_bfd_section
16067 		    (osi->info->output_bfd, osi->sec->output_section));
16068 
16069   addr = root_plt->offset & -2;
16070   if (htab->symbian_p)
16071     {
16072       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16073 	return FALSE;
16074       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
16075 	return FALSE;
16076     }
16077   else if (htab->vxworks_p)
16078     {
16079       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16080 	return FALSE;
16081       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
16082 	return FALSE;
16083       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
16084 	return FALSE;
16085       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
16086 	return FALSE;
16087     }
16088   else if (htab->nacl_p)
16089     {
16090       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16091 	return FALSE;
16092     }
16093   else if (using_thumb_only (htab))
16094     {
16095       if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16096 	return FALSE;
16097     }
16098   else
16099     {
16100       bfd_boolean thumb_stub_p;
16101 
16102       thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16103       if (thumb_stub_p)
16104 	{
16105 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16106 	    return FALSE;
16107 	}
16108 #ifdef FOUR_WORD_PLT
16109       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16110 	return FALSE;
16111       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16112 	return FALSE;
16113 #else
16114       /* A three-word PLT with no Thumb thunk contains only Arm code,
16115 	 so only need to output a mapping symbol for the first PLT entry and
16116 	 entries with thumb thunks.  */
16117       if (thumb_stub_p || addr == plt_header_size)
16118 	{
16119 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16120 	    return FALSE;
16121 	}
16122 #endif
16123     }
16124 
16125   return TRUE;
16126 }
16127 
16128 /* Output mapping symbols for PLT entries associated with H.  */
16129 
16130 static bfd_boolean
16131 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16132 {
16133   output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16134   struct elf32_arm_link_hash_entry *eh;
16135 
16136   if (h->root.type == bfd_link_hash_indirect)
16137     return TRUE;
16138 
16139   if (h->root.type == bfd_link_hash_warning)
16140     /* When warning symbols are created, they **replace** the "real"
16141        entry in the hash table, thus we never get to see the real
16142        symbol in a hash traversal.  So look at it now.  */
16143     h = (struct elf_link_hash_entry *) h->root.u.i.link;
16144 
16145   eh = (struct elf32_arm_link_hash_entry *) h;
16146   return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16147 				     &h->plt, &eh->plt);
16148 }
16149 
16150 /* Bind a veneered symbol to its veneer identified by its hash entry
16151    STUB_ENTRY.  The veneered location thus loose its symbol.  */
16152 
16153 static void
16154 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16155 {
16156   struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16157 
16158   BFD_ASSERT (hash);
16159   hash->root.root.u.def.section = stub_entry->stub_sec;
16160   hash->root.root.u.def.value = stub_entry->stub_offset;
16161   hash->root.size = stub_entry->stub_size;
16162 }
16163 
16164 /* Output a single local symbol for a generated stub.  */
16165 
16166 static bfd_boolean
16167 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16168 			   bfd_vma offset, bfd_vma size)
16169 {
16170   Elf_Internal_Sym sym;
16171 
16172   sym.st_value = osi->sec->output_section->vma
16173 		 + osi->sec->output_offset
16174 		 + offset;
16175   sym.st_size = size;
16176   sym.st_other = 0;
16177   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16178   sym.st_shndx = osi->sec_shndx;
16179   sym.st_target_internal = 0;
16180   return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16181 }
16182 
16183 static bfd_boolean
16184 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16185 		  void * in_arg)
16186 {
16187   struct elf32_arm_stub_hash_entry *stub_entry;
16188   asection *stub_sec;
16189   bfd_vma addr;
16190   char *stub_name;
16191   output_arch_syminfo *osi;
16192   const insn_sequence *template_sequence;
16193   enum stub_insn_type prev_type;
16194   int size;
16195   int i;
16196   enum map_symbol_type sym_type;
16197 
16198   /* Massage our args to the form they really have.  */
16199   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16200   osi = (output_arch_syminfo *) in_arg;
16201 
16202   stub_sec = stub_entry->stub_sec;
16203 
16204   /* Ensure this stub is attached to the current section being
16205      processed.  */
16206   if (stub_sec != osi->sec)
16207     return TRUE;
16208 
16209   addr = (bfd_vma) stub_entry->stub_offset;
16210   template_sequence = stub_entry->stub_template;
16211 
16212   if (arm_stub_sym_claimed (stub_entry->stub_type))
16213     arm_stub_claim_sym (stub_entry);
16214   else
16215     {
16216       stub_name = stub_entry->output_name;
16217       switch (template_sequence[0].type)
16218 	{
16219 	case ARM_TYPE:
16220 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16221 					  stub_entry->stub_size))
16222 	    return FALSE;
16223 	  break;
16224 	case THUMB16_TYPE:
16225 	case THUMB32_TYPE:
16226 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16227 					  stub_entry->stub_size))
16228 	    return FALSE;
16229 	  break;
16230 	default:
16231 	  BFD_FAIL ();
16232 	  return 0;
16233 	}
16234     }
16235 
16236   prev_type = DATA_TYPE;
16237   size = 0;
16238   for (i = 0; i < stub_entry->stub_template_size; i++)
16239     {
16240       switch (template_sequence[i].type)
16241 	{
16242 	case ARM_TYPE:
16243 	  sym_type = ARM_MAP_ARM;
16244 	  break;
16245 
16246 	case THUMB16_TYPE:
16247 	case THUMB32_TYPE:
16248 	  sym_type = ARM_MAP_THUMB;
16249 	  break;
16250 
16251 	case DATA_TYPE:
16252 	  sym_type = ARM_MAP_DATA;
16253 	  break;
16254 
16255 	default:
16256 	  BFD_FAIL ();
16257 	  return FALSE;
16258 	}
16259 
16260       if (template_sequence[i].type != prev_type)
16261 	{
16262 	  prev_type = template_sequence[i].type;
16263 	  if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16264 	    return FALSE;
16265 	}
16266 
16267       switch (template_sequence[i].type)
16268 	{
16269 	case ARM_TYPE:
16270 	case THUMB32_TYPE:
16271 	  size += 4;
16272 	  break;
16273 
16274 	case THUMB16_TYPE:
16275 	  size += 2;
16276 	  break;
16277 
16278 	case DATA_TYPE:
16279 	  size += 4;
16280 	  break;
16281 
16282 	default:
16283 	  BFD_FAIL ();
16284 	  return FALSE;
16285 	}
16286     }
16287 
16288   return TRUE;
16289 }
16290 
16291 /* Output mapping symbols for linker generated sections,
16292    and for those data-only sections that do not have a
16293    $d.  */
16294 
16295 static bfd_boolean
16296 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16297 				  struct bfd_link_info *info,
16298 				  void *flaginfo,
16299 				  int (*func) (void *, const char *,
16300 					       Elf_Internal_Sym *,
16301 					       asection *,
16302 					       struct elf_link_hash_entry *))
16303 {
16304   output_arch_syminfo osi;
16305   struct elf32_arm_link_hash_table *htab;
16306   bfd_vma offset;
16307   bfd_size_type size;
16308   bfd *input_bfd;
16309 
16310   htab = elf32_arm_hash_table (info);
16311   if (htab == NULL)
16312     return FALSE;
16313 
16314   check_use_blx (htab);
16315 
16316   osi.flaginfo = flaginfo;
16317   osi.info = info;
16318   osi.func = func;
16319 
16320   /* Add a $d mapping symbol to data-only sections that
16321      don't have any mapping symbol.  This may result in (harmless) redundant
16322      mapping symbols.  */
16323   for (input_bfd = info->input_bfds;
16324        input_bfd != NULL;
16325        input_bfd = input_bfd->link.next)
16326     {
16327       if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16328 	for (osi.sec = input_bfd->sections;
16329 	     osi.sec != NULL;
16330 	     osi.sec = osi.sec->next)
16331 	  {
16332 	    if (osi.sec->output_section != NULL
16333 		&& ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16334 		    != 0)
16335 		&& (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16336 		   == SEC_HAS_CONTENTS
16337 		&& get_arm_elf_section_data (osi.sec) != NULL
16338 		&& get_arm_elf_section_data (osi.sec)->mapcount == 0
16339 		&& osi.sec->size > 0
16340 		&& (osi.sec->flags & SEC_EXCLUDE) == 0)
16341 	      {
16342 		osi.sec_shndx = _bfd_elf_section_from_bfd_section
16343 		  (output_bfd, osi.sec->output_section);
16344 		if (osi.sec_shndx != (int)SHN_BAD)
16345 		  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16346 	      }
16347 	  }
16348     }
16349 
16350   /* ARM->Thumb glue.  */
16351   if (htab->arm_glue_size > 0)
16352     {
16353       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16354 					ARM2THUMB_GLUE_SECTION_NAME);
16355 
16356       osi.sec_shndx = _bfd_elf_section_from_bfd_section
16357 	  (output_bfd, osi.sec->output_section);
16358       if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16359 	  || htab->pic_veneer)
16360 	size = ARM2THUMB_PIC_GLUE_SIZE;
16361       else if (htab->use_blx)
16362 	size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16363       else
16364 	size = ARM2THUMB_STATIC_GLUE_SIZE;
16365 
16366       for (offset = 0; offset < htab->arm_glue_size; offset += size)
16367 	{
16368 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16369 	  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16370 	}
16371     }
16372 
16373   /* Thumb->ARM glue.  */
16374   if (htab->thumb_glue_size > 0)
16375     {
16376       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16377 					THUMB2ARM_GLUE_SECTION_NAME);
16378 
16379       osi.sec_shndx = _bfd_elf_section_from_bfd_section
16380 	  (output_bfd, osi.sec->output_section);
16381       size = THUMB2ARM_GLUE_SIZE;
16382 
16383       for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16384 	{
16385 	  elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16386 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16387 	}
16388     }
16389 
16390   /* ARMv4 BX veneers.  */
16391   if (htab->bx_glue_size > 0)
16392     {
16393       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16394 					ARM_BX_GLUE_SECTION_NAME);
16395 
16396       osi.sec_shndx = _bfd_elf_section_from_bfd_section
16397 	  (output_bfd, osi.sec->output_section);
16398 
16399       elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16400     }
16401 
16402   /* Long calls stubs.  */
16403   if (htab->stub_bfd && htab->stub_bfd->sections)
16404     {
16405       asection* stub_sec;
16406 
16407       for (stub_sec = htab->stub_bfd->sections;
16408 	   stub_sec != NULL;
16409 	   stub_sec = stub_sec->next)
16410 	{
16411 	  /* Ignore non-stub sections.  */
16412 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
16413 	    continue;
16414 
16415 	  osi.sec = stub_sec;
16416 
16417 	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
16418 	    (output_bfd, osi.sec->output_section);
16419 
16420 	  bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16421 	}
16422     }
16423 
16424   /* Finally, output mapping symbols for the PLT.  */
16425   if (htab->root.splt && htab->root.splt->size > 0)
16426     {
16427       osi.sec = htab->root.splt;
16428       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16429 		       (output_bfd, osi.sec->output_section));
16430 
16431       /* Output mapping symbols for the plt header.  SymbianOS does not have a
16432 	 plt header.  */
16433       if (htab->vxworks_p)
16434 	{
16435 	  /* VxWorks shared libraries have no PLT header.  */
16436 	  if (!bfd_link_pic (info))
16437 	    {
16438 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16439 		return FALSE;
16440 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16441 		return FALSE;
16442 	    }
16443 	}
16444       else if (htab->nacl_p)
16445 	{
16446 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16447 	    return FALSE;
16448 	}
16449       else if (using_thumb_only (htab))
16450 	{
16451 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16452 	    return FALSE;
16453 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16454 	    return FALSE;
16455 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16456 	    return FALSE;
16457 	}
16458       else if (!htab->symbian_p)
16459 	{
16460 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16461 	    return FALSE;
16462 #ifndef FOUR_WORD_PLT
16463 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16464 	    return FALSE;
16465 #endif
16466 	}
16467     }
16468   if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16469     {
16470       /* NaCl uses a special first entry in .iplt too.  */
16471       osi.sec = htab->root.iplt;
16472       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16473 		       (output_bfd, osi.sec->output_section));
16474       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16475 	return FALSE;
16476     }
16477   if ((htab->root.splt && htab->root.splt->size > 0)
16478       || (htab->root.iplt && htab->root.iplt->size > 0))
16479     {
16480       elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16481       for (input_bfd = info->input_bfds;
16482 	   input_bfd != NULL;
16483 	   input_bfd = input_bfd->link.next)
16484 	{
16485 	  struct arm_local_iplt_info **local_iplt;
16486 	  unsigned int i, num_syms;
16487 
16488 	  local_iplt = elf32_arm_local_iplt (input_bfd);
16489 	  if (local_iplt != NULL)
16490 	    {
16491 	      num_syms = elf_symtab_hdr (input_bfd).sh_info;
16492 	      for (i = 0; i < num_syms; i++)
16493 		if (local_iplt[i] != NULL
16494 		    && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16495 						    &local_iplt[i]->root,
16496 						    &local_iplt[i]->arm))
16497 		  return FALSE;
16498 	    }
16499 	}
16500     }
16501   if (htab->dt_tlsdesc_plt != 0)
16502     {
16503       /* Mapping symbols for the lazy tls trampoline.  */
16504       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16505 	return FALSE;
16506 
16507       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16508 				     htab->dt_tlsdesc_plt + 24))
16509 	return FALSE;
16510     }
16511   if (htab->tls_trampoline != 0)
16512     {
16513       /* Mapping symbols for the tls trampoline.  */
16514       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16515 	return FALSE;
16516 #ifdef FOUR_WORD_PLT
16517       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16518 				     htab->tls_trampoline + 12))
16519 	return FALSE;
16520 #endif
16521     }
16522 
16523   return TRUE;
16524 }
16525 
16526 /* Allocate target specific section data.  */
16527 
16528 static bfd_boolean
16529 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16530 {
16531   if (!sec->used_by_bfd)
16532     {
16533       _arm_elf_section_data *sdata;
16534       bfd_size_type amt = sizeof (*sdata);
16535 
16536       sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16537       if (sdata == NULL)
16538 	return FALSE;
16539       sec->used_by_bfd = sdata;
16540     }
16541 
16542   return _bfd_elf_new_section_hook (abfd, sec);
16543 }
16544 
16545 
16546 /* Used to order a list of mapping symbols by address.  */
16547 
16548 static int
16549 elf32_arm_compare_mapping (const void * a, const void * b)
16550 {
16551   const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16552   const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16553 
16554   if (amap->vma > bmap->vma)
16555     return 1;
16556   else if (amap->vma < bmap->vma)
16557     return -1;
16558   else if (amap->type > bmap->type)
16559     /* Ensure results do not depend on the host qsort for objects with
16560        multiple mapping symbols at the same address by sorting on type
16561        after vma.  */
16562     return 1;
16563   else if (amap->type < bmap->type)
16564     return -1;
16565   else
16566     return 0;
16567 }
16568 
16569 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified.  */
16570 
16571 static unsigned long
16572 offset_prel31 (unsigned long addr, bfd_vma offset)
16573 {
16574   return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16575 }
16576 
16577 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16578    relocations.  */
16579 
16580 static void
16581 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16582 {
16583   unsigned long first_word = bfd_get_32 (output_bfd, from);
16584   unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16585 
16586   /* High bit of first word is supposed to be zero.  */
16587   if ((first_word & 0x80000000ul) == 0)
16588     first_word = offset_prel31 (first_word, offset);
16589 
16590   /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16591      (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry.  */
16592   if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16593     second_word = offset_prel31 (second_word, offset);
16594 
16595   bfd_put_32 (output_bfd, first_word, to);
16596   bfd_put_32 (output_bfd, second_word, to + 4);
16597 }
16598 
16599 /* Data for make_branch_to_a8_stub().  */
16600 
16601 struct a8_branch_to_stub_data
16602 {
16603   asection *writing_section;
16604   bfd_byte *contents;
16605 };
16606 
16607 
16608 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16609    places for a particular section.  */
16610 
16611 static bfd_boolean
16612 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16613 		       void *in_arg)
16614 {
16615   struct elf32_arm_stub_hash_entry *stub_entry;
16616   struct a8_branch_to_stub_data *data;
16617   bfd_byte *contents;
16618   unsigned long branch_insn;
16619   bfd_vma veneered_insn_loc, veneer_entry_loc;
16620   bfd_signed_vma branch_offset;
16621   bfd *abfd;
16622   unsigned int loc;
16623 
16624   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16625   data = (struct a8_branch_to_stub_data *) in_arg;
16626 
16627   if (stub_entry->target_section != data->writing_section
16628       || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16629     return TRUE;
16630 
16631   contents = data->contents;
16632 
16633   /* We use target_section as Cortex-A8 erratum workaround stubs are only
16634      generated when both source and target are in the same section.  */
16635   veneered_insn_loc = stub_entry->target_section->output_section->vma
16636 		      + stub_entry->target_section->output_offset
16637 		      + stub_entry->source_value;
16638 
16639   veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16640 		     + stub_entry->stub_sec->output_offset
16641 		     + stub_entry->stub_offset;
16642 
16643   if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16644     veneered_insn_loc &= ~3u;
16645 
16646   branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16647 
16648   abfd = stub_entry->target_section->owner;
16649   loc = stub_entry->source_value;
16650 
16651   /* We attempt to avoid this condition by setting stubs_always_after_branch
16652      in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16653      This check is just to be on the safe side...  */
16654   if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16655     {
16656       (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16657 			       "allocated in unsafe location"), abfd);
16658       return FALSE;
16659     }
16660 
16661   switch (stub_entry->stub_type)
16662     {
16663     case arm_stub_a8_veneer_b:
16664     case arm_stub_a8_veneer_b_cond:
16665       branch_insn = 0xf0009000;
16666       goto jump24;
16667 
16668     case arm_stub_a8_veneer_blx:
16669       branch_insn = 0xf000e800;
16670       goto jump24;
16671 
16672     case arm_stub_a8_veneer_bl:
16673       {
16674 	unsigned int i1, j1, i2, j2, s;
16675 
16676 	branch_insn = 0xf000d000;
16677 
16678       jump24:
16679 	if (branch_offset < -16777216 || branch_offset > 16777214)
16680 	  {
16681 	    /* There's not much we can do apart from complain if this
16682 	       happens.  */
16683 	    (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16684 				     "of range (input file too large)"), abfd);
16685 	    return FALSE;
16686 	  }
16687 
16688 	/* i1 = not(j1 eor s), so:
16689 	   not i1 = j1 eor s
16690 	   j1 = (not i1) eor s.  */
16691 
16692 	branch_insn |= (branch_offset >> 1) & 0x7ff;
16693 	branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16694 	i2 = (branch_offset >> 22) & 1;
16695 	i1 = (branch_offset >> 23) & 1;
16696 	s = (branch_offset >> 24) & 1;
16697 	j1 = (!i1) ^ s;
16698 	j2 = (!i2) ^ s;
16699 	branch_insn |= j2 << 11;
16700 	branch_insn |= j1 << 13;
16701 	branch_insn |= s << 26;
16702       }
16703       break;
16704 
16705     default:
16706       BFD_FAIL ();
16707       return FALSE;
16708     }
16709 
16710   bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16711   bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16712 
16713   return TRUE;
16714 }
16715 
16716 /* Beginning of stm32l4xx work-around.  */
16717 
16718 /* Functions encoding instructions necessary for the emission of the
16719    fix-stm32l4xx-629360.
16720    Encoding is extracted from the
16721    ARM (C) Architecture Reference Manual
16722    ARMv7-A and ARMv7-R edition
16723    ARM DDI 0406C.b (ID072512).  */
16724 
16725 static inline bfd_vma
16726 create_instruction_branch_absolute (int branch_offset)
16727 {
16728   /* A8.8.18 B (A8-334)
16729      B target_address (Encoding T4).  */
16730   /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
16731   /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
16732   /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
16733 
16734   int s = ((branch_offset & 0x1000000) >> 24);
16735   int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16736   int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16737 
16738   if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16739     BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
16740 
16741   bfd_vma patched_inst = 0xf0009000
16742     | s << 26 /* S.  */
16743     | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
16744     | j1 << 13 /* J1.  */
16745     | j2 << 11 /* J2.  */
16746     | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
16747 
16748   return patched_inst;
16749 }
16750 
16751 static inline bfd_vma
16752 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16753 {
16754   /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16755      LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
16756   bfd_vma patched_inst = 0xe8900000
16757     | (/*W=*/wback << 21)
16758     | (base_reg << 16)
16759     | (reg_mask & 0x0000ffff);
16760 
16761   return patched_inst;
16762 }
16763 
16764 static inline bfd_vma
16765 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16766 {
16767   /* A8.8.60 LDMDB/LDMEA (A8-402)
16768      LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
16769   bfd_vma patched_inst = 0xe9100000
16770     | (/*W=*/wback << 21)
16771     | (base_reg << 16)
16772     | (reg_mask & 0x0000ffff);
16773 
16774   return patched_inst;
16775 }
16776 
16777 static inline bfd_vma
16778 create_instruction_mov (int target_reg, int source_reg)
16779 {
16780   /* A8.8.103 MOV (register) (A8-486)
16781      MOV Rd, Rm (Encoding T1).  */
16782   bfd_vma patched_inst = 0x4600
16783     | (target_reg & 0x7)
16784     | ((target_reg & 0x8) >> 3) << 7
16785     | (source_reg << 3);
16786 
16787   return patched_inst;
16788 }
16789 
16790 static inline bfd_vma
16791 create_instruction_sub (int target_reg, int source_reg, int value)
16792 {
16793   /* A8.8.221 SUB (immediate) (A8-708)
16794      SUB Rd, Rn, #value (Encoding T3).  */
16795   bfd_vma patched_inst = 0xf1a00000
16796     | (target_reg << 8)
16797     | (source_reg << 16)
16798     | (/*S=*/0 << 20)
16799     | ((value & 0x800) >> 11) << 26
16800     | ((value & 0x700) >>  8) << 12
16801     | (value & 0x0ff);
16802 
16803   return patched_inst;
16804 }
16805 
16806 static inline bfd_vma
16807 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16808 			   int first_reg)
16809 {
16810   /* A8.8.332 VLDM (A8-922)
16811      VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
16812   bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16813     | (/*W=*/wback << 21)
16814     | (base_reg << 16)
16815     | (num_words & 0x000000ff)
16816     | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16817     | (first_reg & 0x00000001) << 22;
16818 
16819   return patched_inst;
16820 }
16821 
16822 static inline bfd_vma
16823 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16824 			   int first_reg)
16825 {
16826   /* A8.8.332 VLDM (A8-922)
16827      VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
16828   bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16829     | (base_reg << 16)
16830     | (num_words & 0x000000ff)
16831     | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16832     | (first_reg & 0x00000001) << 22;
16833 
16834   return patched_inst;
16835 }
16836 
16837 static inline bfd_vma
16838 create_instruction_udf_w (int value)
16839 {
16840   /* A8.8.247 UDF (A8-758)
16841      Undefined (Encoding T2).  */
16842   bfd_vma patched_inst = 0xf7f0a000
16843     | (value & 0x00000fff)
16844     | (value & 0x000f0000) << 16;
16845 
16846   return patched_inst;
16847 }
16848 
16849 static inline bfd_vma
16850 create_instruction_udf (int value)
16851 {
16852   /* A8.8.247 UDF (A8-758)
16853      Undefined (Encoding T1).  */
16854   bfd_vma patched_inst = 0xde00
16855     | (value & 0xff);
16856 
16857   return patched_inst;
16858 }
16859 
16860 /* Functions writing an instruction in memory, returning the next
16861    memory position to write to.  */
16862 
16863 static inline bfd_byte *
16864 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16865 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
16866 {
16867   put_thumb2_insn (htab, output_bfd, insn, pt);
16868   return pt + 4;
16869 }
16870 
16871 static inline bfd_byte *
16872 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16873 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
16874 {
16875   put_thumb_insn (htab, output_bfd, insn, pt);
16876   return pt + 2;
16877 }
16878 
16879 /* Function filling up a region in memory with T1 and T2 UDFs taking
16880    care of alignment.  */
16881 
16882 static bfd_byte *
16883 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16884 			 bfd *                   output_bfd,
16885 			 const bfd_byte * const  base_stub_contents,
16886 			 bfd_byte * const        from_stub_contents,
16887 			 const bfd_byte * const  end_stub_contents)
16888 {
16889   bfd_byte *current_stub_contents = from_stub_contents;
16890 
16891   /* Fill the remaining of the stub with deterministic contents : UDF
16892      instructions.
16893      Check if realignment is needed on modulo 4 frontier using T1, to
16894      further use T2.  */
16895   if ((current_stub_contents < end_stub_contents)
16896       && !((current_stub_contents - base_stub_contents) % 2)
16897       && ((current_stub_contents - base_stub_contents) % 4))
16898     current_stub_contents =
16899       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16900 			  create_instruction_udf (0));
16901 
16902   for (; current_stub_contents < end_stub_contents;)
16903     current_stub_contents =
16904       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16905 			  create_instruction_udf_w (0));
16906 
16907   return current_stub_contents;
16908 }
16909 
16910 /* Functions writing the stream of instructions equivalent to the
16911    derived sequence for ldmia, ldmdb, vldm respectively.  */
16912 
16913 static void
16914 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16915 				       bfd * output_bfd,
16916 				       const insn32 initial_insn,
16917 				       const bfd_byte *const initial_insn_addr,
16918 				       bfd_byte *const base_stub_contents)
16919 {
16920   int wback = (initial_insn & 0x00200000) >> 21;
16921   int ri, rn = (initial_insn & 0x000F0000) >> 16;
16922   int insn_all_registers = initial_insn & 0x0000ffff;
16923   int insn_low_registers, insn_high_registers;
16924   int usable_register_mask;
16925   int nb_registers = popcount (insn_all_registers);
16926   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16927   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16928   bfd_byte *current_stub_contents = base_stub_contents;
16929 
16930   BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16931 
16932   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16933      smaller than 8 registers load sequences that do not cause the
16934      hardware issue.  */
16935   if (nb_registers <= 8)
16936     {
16937       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
16938       current_stub_contents =
16939 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16940 			    initial_insn);
16941 
16942       /* B initial_insn_addr+4.  */
16943       if (!restore_pc)
16944 	current_stub_contents =
16945 	  push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16946 			      create_instruction_branch_absolute
16947 			      (initial_insn_addr - current_stub_contents));
16948 
16949 
16950       /* Fill the remaining of the stub with deterministic contents.  */
16951       current_stub_contents =
16952 	stm32l4xx_fill_stub_udf (htab, output_bfd,
16953 				 base_stub_contents, current_stub_contents,
16954 				 base_stub_contents +
16955 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16956 
16957       return;
16958     }
16959 
16960   /* - reg_list[13] == 0.  */
16961   BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16962 
16963   /* - reg_list[14] & reg_list[15] != 1.  */
16964   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16965 
16966   /* - if (wback==1) reg_list[rn] == 0.  */
16967   BFD_ASSERT (!wback || !restore_rn);
16968 
16969   /* - nb_registers > 8.  */
16970   BFD_ASSERT (popcount (insn_all_registers) > 8);
16971 
16972   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
16973 
16974   /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16975     - One with the 7 lowest registers (register mask 0x007F)
16976       This LDM will finally contain between 2 and 7 registers
16977     - One with the 7 highest registers (register mask 0xDF80)
16978       This ldm will finally contain between 2 and 7 registers.  */
16979   insn_low_registers = insn_all_registers & 0x007F;
16980   insn_high_registers = insn_all_registers & 0xDF80;
16981 
16982   /* A spare register may be needed during this veneer to temporarily
16983      handle the base register.  This register will be restored with the
16984      last LDM operation.
16985      The usable register may be any general purpose register (that
16986      excludes PC, SP, LR : register mask is 0x1FFF).  */
16987   usable_register_mask = 0x1FFF;
16988 
16989   /* Generate the stub function.  */
16990   if (wback)
16991     {
16992       /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
16993       current_stub_contents =
16994 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16995 			    create_instruction_ldmia
16996 			    (rn, /*wback=*/1, insn_low_registers));
16997 
16998       /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
16999       current_stub_contents =
17000 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17001 			    create_instruction_ldmia
17002 			    (rn, /*wback=*/1, insn_high_registers));
17003       if (!restore_pc)
17004 	{
17005 	  /* B initial_insn_addr+4.  */
17006 	  current_stub_contents =
17007 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17008 				create_instruction_branch_absolute
17009 				(initial_insn_addr - current_stub_contents));
17010        }
17011     }
17012   else /* if (!wback).  */
17013     {
17014       ri = rn;
17015 
17016       /* If Rn is not part of the high-register-list, move it there.  */
17017       if (!(insn_high_registers & (1 << rn)))
17018 	{
17019 	  /* Choose a Ri in the high-register-list that will be restored.  */
17020 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17021 
17022 	  /* MOV Ri, Rn.  */
17023 	  current_stub_contents =
17024 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17025 				create_instruction_mov (ri, rn));
17026 	}
17027 
17028       /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
17029       current_stub_contents =
17030 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17031 			    create_instruction_ldmia
17032 			    (ri, /*wback=*/1, insn_low_registers));
17033 
17034       /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
17035       current_stub_contents =
17036 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17037 			    create_instruction_ldmia
17038 			    (ri, /*wback=*/0, insn_high_registers));
17039 
17040       if (!restore_pc)
17041 	{
17042 	  /* B initial_insn_addr+4.  */
17043 	  current_stub_contents =
17044 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17045 				create_instruction_branch_absolute
17046 				(initial_insn_addr - current_stub_contents));
17047 	}
17048     }
17049 
17050   /* Fill the remaining of the stub with deterministic contents.  */
17051   current_stub_contents =
17052     stm32l4xx_fill_stub_udf (htab, output_bfd,
17053 			     base_stub_contents, current_stub_contents,
17054 			     base_stub_contents +
17055 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17056 }
17057 
17058 static void
17059 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
17060 				       bfd * output_bfd,
17061 				       const insn32 initial_insn,
17062 				       const bfd_byte *const initial_insn_addr,
17063 				       bfd_byte *const base_stub_contents)
17064 {
17065   int wback = (initial_insn & 0x00200000) >> 21;
17066   int ri, rn = (initial_insn & 0x000f0000) >> 16;
17067   int insn_all_registers = initial_insn & 0x0000ffff;
17068   int insn_low_registers, insn_high_registers;
17069   int usable_register_mask;
17070   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
17071   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
17072   int nb_registers = popcount (insn_all_registers);
17073   bfd_byte *current_stub_contents = base_stub_contents;
17074 
17075   BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
17076 
17077   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17078      smaller than 8 registers load sequences that do not cause the
17079      hardware issue.  */
17080   if (nb_registers <= 8)
17081     {
17082       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
17083       current_stub_contents =
17084 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17085 			    initial_insn);
17086 
17087       /* B initial_insn_addr+4.  */
17088       current_stub_contents =
17089 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17090 			    create_instruction_branch_absolute
17091 			    (initial_insn_addr - current_stub_contents));
17092 
17093       /* Fill the remaining of the stub with deterministic contents.  */
17094       current_stub_contents =
17095 	stm32l4xx_fill_stub_udf (htab, output_bfd,
17096 				 base_stub_contents, current_stub_contents,
17097 				 base_stub_contents +
17098 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17099 
17100       return;
17101     }
17102 
17103   /* - reg_list[13] == 0.  */
17104   BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17105 
17106   /* - reg_list[14] & reg_list[15] != 1.  */
17107   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17108 
17109   /* - if (wback==1) reg_list[rn] == 0.  */
17110   BFD_ASSERT (!wback || !restore_rn);
17111 
17112   /* - nb_registers > 8.  */
17113   BFD_ASSERT (popcount (insn_all_registers) > 8);
17114 
17115   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
17116 
17117   /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17118     - One with the 7 lowest registers (register mask 0x007F)
17119       This LDM will finally contain between 2 and 7 registers
17120     - One with the 7 highest registers (register mask 0xDF80)
17121       This ldm will finally contain between 2 and 7 registers.  */
17122   insn_low_registers = insn_all_registers & 0x007F;
17123   insn_high_registers = insn_all_registers & 0xDF80;
17124 
17125   /* A spare register may be needed during this veneer to temporarily
17126      handle the base register.  This register will be restored with
17127      the last LDM operation.
17128      The usable register may be any general purpose register (that excludes
17129      PC, SP, LR : register mask is 0x1FFF).  */
17130   usable_register_mask = 0x1FFF;
17131 
17132   /* Generate the stub function.  */
17133   if (!wback && !restore_pc && !restore_rn)
17134     {
17135       /* Choose a Ri in the low-register-list that will be restored.  */
17136       ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17137 
17138       /* MOV Ri, Rn.  */
17139       current_stub_contents =
17140 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17141 			    create_instruction_mov (ri, rn));
17142 
17143       /* LDMDB Ri!, {R-high-register-list}.  */
17144       current_stub_contents =
17145 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17146 			    create_instruction_ldmdb
17147 			    (ri, /*wback=*/1, insn_high_registers));
17148 
17149       /* LDMDB Ri, {R-low-register-list}.  */
17150       current_stub_contents =
17151 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17152 			    create_instruction_ldmdb
17153 			    (ri, /*wback=*/0, insn_low_registers));
17154 
17155       /* B initial_insn_addr+4.  */
17156       current_stub_contents =
17157 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17158 			    create_instruction_branch_absolute
17159 			    (initial_insn_addr - current_stub_contents));
17160     }
17161   else if (wback && !restore_pc && !restore_rn)
17162     {
17163       /* LDMDB Rn!, {R-high-register-list}.  */
17164       current_stub_contents =
17165 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17166 			    create_instruction_ldmdb
17167 			    (rn, /*wback=*/1, insn_high_registers));
17168 
17169       /* LDMDB Rn!, {R-low-register-list}.  */
17170       current_stub_contents =
17171 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17172 			    create_instruction_ldmdb
17173 			    (rn, /*wback=*/1, insn_low_registers));
17174 
17175       /* B initial_insn_addr+4.  */
17176       current_stub_contents =
17177 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17178 			    create_instruction_branch_absolute
17179 			    (initial_insn_addr - current_stub_contents));
17180     }
17181   else if (!wback && restore_pc && !restore_rn)
17182     {
17183       /* Choose a Ri in the high-register-list that will be restored.  */
17184       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17185 
17186       /* SUB Ri, Rn, #(4*nb_registers).  */
17187       current_stub_contents =
17188 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17189 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
17190 
17191       /* LDMIA Ri!, {R-low-register-list}.  */
17192       current_stub_contents =
17193 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17194 			    create_instruction_ldmia
17195 			    (ri, /*wback=*/1, insn_low_registers));
17196 
17197       /* LDMIA Ri, {R-high-register-list}.  */
17198       current_stub_contents =
17199 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17200 			    create_instruction_ldmia
17201 			    (ri, /*wback=*/0, insn_high_registers));
17202     }
17203   else if (wback && restore_pc && !restore_rn)
17204     {
17205       /* Choose a Ri in the high-register-list that will be restored.  */
17206       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17207 
17208       /* SUB Rn, Rn, #(4*nb_registers)  */
17209       current_stub_contents =
17210 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17211 			    create_instruction_sub (rn, rn, (4 * nb_registers)));
17212 
17213       /* MOV Ri, Rn.  */
17214       current_stub_contents =
17215 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17216 			    create_instruction_mov (ri, rn));
17217 
17218       /* LDMIA Ri!, {R-low-register-list}.  */
17219       current_stub_contents =
17220 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17221 			    create_instruction_ldmia
17222 			    (ri, /*wback=*/1, insn_low_registers));
17223 
17224       /* LDMIA Ri, {R-high-register-list}.  */
17225       current_stub_contents =
17226 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17227 			    create_instruction_ldmia
17228 			    (ri, /*wback=*/0, insn_high_registers));
17229     }
17230   else if (!wback && !restore_pc && restore_rn)
17231     {
17232       ri = rn;
17233       if (!(insn_low_registers & (1 << rn)))
17234 	{
17235 	  /* Choose a Ri in the low-register-list that will be restored.  */
17236 	  ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17237 
17238 	  /* MOV Ri, Rn.  */
17239 	  current_stub_contents =
17240 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17241 				create_instruction_mov (ri, rn));
17242 	}
17243 
17244       /* LDMDB Ri!, {R-high-register-list}.  */
17245       current_stub_contents =
17246 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17247 			    create_instruction_ldmdb
17248 			    (ri, /*wback=*/1, insn_high_registers));
17249 
17250       /* LDMDB Ri, {R-low-register-list}.  */
17251       current_stub_contents =
17252 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17253 			    create_instruction_ldmdb
17254 			    (ri, /*wback=*/0, insn_low_registers));
17255 
17256       /* B initial_insn_addr+4.  */
17257       current_stub_contents =
17258 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17259 			    create_instruction_branch_absolute
17260 			    (initial_insn_addr - current_stub_contents));
17261     }
17262   else if (!wback && restore_pc && restore_rn)
17263     {
17264       ri = rn;
17265       if (!(insn_high_registers & (1 << rn)))
17266 	{
17267 	  /* Choose a Ri in the high-register-list that will be restored.  */
17268 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17269 	}
17270 
17271       /* SUB Ri, Rn, #(4*nb_registers).  */
17272       current_stub_contents =
17273 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17274 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
17275 
17276       /* LDMIA Ri!, {R-low-register-list}.  */
17277       current_stub_contents =
17278 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17279 			    create_instruction_ldmia
17280 			    (ri, /*wback=*/1, insn_low_registers));
17281 
17282       /* LDMIA Ri, {R-high-register-list}.  */
17283       current_stub_contents =
17284 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17285 			    create_instruction_ldmia
17286 			    (ri, /*wback=*/0, insn_high_registers));
17287     }
17288   else if (wback && restore_rn)
17289     {
17290       /* The assembler should not have accepted to encode this.  */
17291       BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17292 	"undefined behavior.\n");
17293     }
17294 
17295   /* Fill the remaining of the stub with deterministic contents.  */
17296   current_stub_contents =
17297     stm32l4xx_fill_stub_udf (htab, output_bfd,
17298 			     base_stub_contents, current_stub_contents,
17299 			     base_stub_contents +
17300 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17301 
17302 }
17303 
17304 static void
17305 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17306 				      bfd * output_bfd,
17307 				      const insn32 initial_insn,
17308 				      const bfd_byte *const initial_insn_addr,
17309 				      bfd_byte *const base_stub_contents)
17310 {
17311   int num_words = ((unsigned int) initial_insn << 24) >> 24;
17312   bfd_byte *current_stub_contents = base_stub_contents;
17313 
17314   BFD_ASSERT (is_thumb2_vldm (initial_insn));
17315 
17316   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17317      smaller than 8 words load sequences that do not cause the
17318      hardware issue.  */
17319   if (num_words <= 8)
17320     {
17321       /* Untouched instruction.  */
17322       current_stub_contents =
17323 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17324 			    initial_insn);
17325 
17326       /* B initial_insn_addr+4.  */
17327       current_stub_contents =
17328 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17329 			    create_instruction_branch_absolute
17330 			    (initial_insn_addr - current_stub_contents));
17331     }
17332   else
17333     {
17334       bfd_boolean is_dp = /* DP encoding. */
17335 	(initial_insn & 0xfe100f00) == 0xec100b00;
17336       bfd_boolean is_ia_nobang = /* (IA without !).  */
17337 	(((initial_insn << 7) >> 28) & 0xd) == 0x4;
17338       bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP.  */
17339 	(((initial_insn << 7) >> 28) & 0xd) == 0x5;
17340       bfd_boolean is_db_bang = /* (DB with !).  */
17341 	(((initial_insn << 7) >> 28) & 0xd) == 0x9;
17342       int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17343       /* d = UInt (Vd:D);.  */
17344       int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17345 	| (((unsigned int)initial_insn << 9) >> 31);
17346 
17347       /* Compute the number of 8-words chunks needed to split.  */
17348       int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17349       int chunk;
17350 
17351       /* The test coverage has been done assuming the following
17352 	 hypothesis that exactly one of the previous is_ predicates is
17353 	 true.  */
17354       BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17355 		  && !(is_ia_nobang & is_ia_bang & is_db_bang));
17356 
17357       /* We treat the cutting of the words in one pass for all
17358 	 cases, then we emit the adjustments:
17359 
17360 	 vldm rx, {...}
17361 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
17362 	 -> sub rx, rx, #size (list)
17363 
17364 	 vldm rx!, {...}
17365 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
17366 	 This also handles vpop instruction (when rx is sp)
17367 
17368 	 vldmd rx!, {...}
17369 	 -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
17370       for (chunk = 0; chunk < chunks; ++chunk)
17371 	{
17372 	  bfd_vma new_insn = 0;
17373 
17374 	  if (is_ia_nobang || is_ia_bang)
17375 	    {
17376 	      new_insn = create_instruction_vldmia
17377 		(base_reg,
17378 		 is_dp,
17379 		 /*wback= .  */1,
17380 		 chunks - (chunk + 1) ?
17381 		 8 : num_words - chunk * 8,
17382 		 first_reg + chunk * 8);
17383 	    }
17384 	  else if (is_db_bang)
17385 	    {
17386 	      new_insn = create_instruction_vldmdb
17387 		(base_reg,
17388 		 is_dp,
17389 		 chunks - (chunk + 1) ?
17390 		 8 : num_words - chunk * 8,
17391 		 first_reg + chunk * 8);
17392 	    }
17393 
17394 	  if (new_insn)
17395 	    current_stub_contents =
17396 	      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17397 				  new_insn);
17398 	}
17399 
17400       /* Only this case requires the base register compensation
17401 	 subtract.  */
17402       if (is_ia_nobang)
17403 	{
17404 	  current_stub_contents =
17405 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17406 				create_instruction_sub
17407 				(base_reg, base_reg, 4*num_words));
17408 	}
17409 
17410       /* B initial_insn_addr+4.  */
17411       current_stub_contents =
17412 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17413 			    create_instruction_branch_absolute
17414 			    (initial_insn_addr - current_stub_contents));
17415     }
17416 
17417   /* Fill the remaining of the stub with deterministic contents.  */
17418   current_stub_contents =
17419     stm32l4xx_fill_stub_udf (htab, output_bfd,
17420 			     base_stub_contents, current_stub_contents,
17421 			     base_stub_contents +
17422 			     STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17423 }
17424 
17425 static void
17426 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17427 				 bfd * output_bfd,
17428 				 const insn32 wrong_insn,
17429 				 const bfd_byte *const wrong_insn_addr,
17430 				 bfd_byte *const stub_contents)
17431 {
17432   if (is_thumb2_ldmia (wrong_insn))
17433     stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17434 					   wrong_insn, wrong_insn_addr,
17435 					   stub_contents);
17436   else if (is_thumb2_ldmdb (wrong_insn))
17437     stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17438 					   wrong_insn, wrong_insn_addr,
17439 					   stub_contents);
17440   else if (is_thumb2_vldm (wrong_insn))
17441     stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17442 					  wrong_insn, wrong_insn_addr,
17443 					  stub_contents);
17444 }
17445 
17446 /* End of stm32l4xx work-around.  */
17447 
17448 
17449 static void
17450 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17451 			  asection *output_sec, Elf_Internal_Rela *rel)
17452 {
17453   BFD_ASSERT (output_sec && rel);
17454   struct bfd_elf_section_reloc_data *output_reldata;
17455   struct elf32_arm_link_hash_table *htab;
17456   struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17457   Elf_Internal_Shdr *rel_hdr;
17458 
17459 
17460   if (oesd->rel.hdr)
17461     {
17462       rel_hdr = oesd->rel.hdr;
17463       output_reldata = &(oesd->rel);
17464     }
17465   else if (oesd->rela.hdr)
17466     {
17467       rel_hdr = oesd->rela.hdr;
17468       output_reldata = &(oesd->rela);
17469     }
17470   else
17471     {
17472       abort ();
17473     }
17474 
17475   bfd_byte *erel = rel_hdr->contents;
17476   erel += output_reldata->count * rel_hdr->sh_entsize;
17477   htab = elf32_arm_hash_table (info);
17478   SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17479   output_reldata->count++;
17480 }
17481 
17482 /* Do code byteswapping.  Return FALSE afterwards so that the section is
17483    written out as normal.  */
17484 
17485 static bfd_boolean
17486 elf32_arm_write_section (bfd *output_bfd,
17487 			 struct bfd_link_info *link_info,
17488 			 asection *sec,
17489 			 bfd_byte *contents)
17490 {
17491   unsigned int mapcount, errcount;
17492   _arm_elf_section_data *arm_data;
17493   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17494   elf32_arm_section_map *map;
17495   elf32_vfp11_erratum_list *errnode;
17496   elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17497   bfd_vma ptr;
17498   bfd_vma end;
17499   bfd_vma offset = sec->output_section->vma + sec->output_offset;
17500   bfd_byte tmp;
17501   unsigned int i;
17502 
17503   if (globals == NULL)
17504     return FALSE;
17505 
17506   /* If this section has not been allocated an _arm_elf_section_data
17507      structure then we cannot record anything.  */
17508   arm_data = get_arm_elf_section_data (sec);
17509   if (arm_data == NULL)
17510     return FALSE;
17511 
17512   mapcount = arm_data->mapcount;
17513   map = arm_data->map;
17514   errcount = arm_data->erratumcount;
17515 
17516   if (errcount != 0)
17517     {
17518       unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17519 
17520       for (errnode = arm_data->erratumlist; errnode != 0;
17521 	   errnode = errnode->next)
17522 	{
17523 	  bfd_vma target = errnode->vma - offset;
17524 
17525 	  switch (errnode->type)
17526 	    {
17527 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17528 	      {
17529 		bfd_vma branch_to_veneer;
17530 		/* Original condition code of instruction, plus bit mask for
17531 		   ARM B instruction.  */
17532 		unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17533 				  | 0x0a000000;
17534 
17535 		/* The instruction is before the label.  */
17536 		target -= 4;
17537 
17538 		/* Above offset included in -4 below.  */
17539 		branch_to_veneer = errnode->u.b.veneer->vma
17540 				   - errnode->vma - 4;
17541 
17542 		if ((signed) branch_to_veneer < -(1 << 25)
17543 		    || (signed) branch_to_veneer >= (1 << 25))
17544 		  (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17545 					   "range"), output_bfd);
17546 
17547 		insn |= (branch_to_veneer >> 2) & 0xffffff;
17548 		contents[endianflip ^ target] = insn & 0xff;
17549 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17550 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17551 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17552 	      }
17553 	      break;
17554 
17555 	    case VFP11_ERRATUM_ARM_VENEER:
17556 	      {
17557 		bfd_vma branch_from_veneer;
17558 		unsigned int insn;
17559 
17560 		/* Take size of veneer into account.  */
17561 		branch_from_veneer = errnode->u.v.branch->vma
17562 				     - errnode->vma - 12;
17563 
17564 		if ((signed) branch_from_veneer < -(1 << 25)
17565 		    || (signed) branch_from_veneer >= (1 << 25))
17566 		  (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17567 					   "range"), output_bfd);
17568 
17569 		/* Original instruction.  */
17570 		insn = errnode->u.v.branch->u.b.vfp_insn;
17571 		contents[endianflip ^ target] = insn & 0xff;
17572 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17573 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17574 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17575 
17576 		/* Branch back to insn after original insn.  */
17577 		insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17578 		contents[endianflip ^ (target + 4)] = insn & 0xff;
17579 		contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17580 		contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17581 		contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17582 	      }
17583 	      break;
17584 
17585 	    default:
17586 	      abort ();
17587 	    }
17588 	}
17589     }
17590 
17591   if (arm_data->stm32l4xx_erratumcount != 0)
17592     {
17593       for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17594 	   stm32l4xx_errnode != 0;
17595 	   stm32l4xx_errnode = stm32l4xx_errnode->next)
17596 	{
17597 	  bfd_vma target = stm32l4xx_errnode->vma - offset;
17598 
17599 	  switch (stm32l4xx_errnode->type)
17600 	    {
17601 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17602 	      {
17603 		unsigned int insn;
17604 		bfd_vma branch_to_veneer =
17605 		  stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17606 
17607 		if ((signed) branch_to_veneer < -(1 << 24)
17608 		    || (signed) branch_to_veneer >= (1 << 24))
17609 		  {
17610 		    bfd_vma out_of_range =
17611 		      ((signed) branch_to_veneer < -(1 << 24)) ?
17612 		      - branch_to_veneer - (1 << 24) :
17613 		      ((signed) branch_to_veneer >= (1 << 24)) ?
17614 		      branch_to_veneer - (1 << 24) : 0;
17615 
17616 		    (*_bfd_error_handler)
17617 		      (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17618 			 "Jump out of range by %ld bytes. "
17619 			 "Cannot encode branch instruction. "),
17620 		       output_bfd,
17621 		       (long) (stm32l4xx_errnode->vma - 4),
17622 		       out_of_range);
17623 		    continue;
17624 		  }
17625 
17626 		insn = create_instruction_branch_absolute
17627 		  (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17628 
17629 		/* The instruction is before the label.  */
17630 		target -= 4;
17631 
17632 		put_thumb2_insn (globals, output_bfd,
17633 				 (bfd_vma) insn, contents + target);
17634 	      }
17635 	      break;
17636 
17637 	    case STM32L4XX_ERRATUM_VENEER:
17638 	      {
17639 		bfd_byte * veneer;
17640 		bfd_byte * veneer_r;
17641 		unsigned int insn;
17642 
17643 		veneer = contents + target;
17644 		veneer_r = veneer
17645 		  + stm32l4xx_errnode->u.b.veneer->vma
17646 		  - stm32l4xx_errnode->vma - 4;
17647 
17648 		if ((signed) (veneer_r - veneer -
17649 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17650 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17651 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17652 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17653 		    || (signed) (veneer_r - veneer) >= (1 << 24))
17654 		  {
17655 		    (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17656 					     "veneer."), output_bfd);
17657 		     continue;
17658 		  }
17659 
17660 		/* Original instruction.  */
17661 		insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17662 
17663 		stm32l4xx_create_replacing_stub
17664 		  (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17665 	      }
17666 	      break;
17667 
17668 	    default:
17669 	      abort ();
17670 	    }
17671 	}
17672     }
17673 
17674   if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17675     {
17676       arm_unwind_table_edit *edit_node
17677 	= arm_data->u.exidx.unwind_edit_list;
17678       /* Now, sec->size is the size of the section we will write.  The original
17679 	 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17680 	 markers) was sec->rawsize.  (This isn't the case if we perform no
17681 	 edits, then rawsize will be zero and we should use size).  */
17682       bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17683       unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17684       unsigned int in_index, out_index;
17685       bfd_vma add_to_offsets = 0;
17686 
17687       for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17688 	{
17689 	  if (edit_node)
17690 	    {
17691 	      unsigned int edit_index = edit_node->index;
17692 
17693 	      if (in_index < edit_index && in_index * 8 < input_size)
17694 		{
17695 		  copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17696 				    contents + in_index * 8, add_to_offsets);
17697 		  out_index++;
17698 		  in_index++;
17699 		}
17700 	      else if (in_index == edit_index
17701 		       || (in_index * 8 >= input_size
17702 			   && edit_index == UINT_MAX))
17703 		{
17704 		  switch (edit_node->type)
17705 		    {
17706 		    case DELETE_EXIDX_ENTRY:
17707 		      in_index++;
17708 		      add_to_offsets += 8;
17709 		      break;
17710 
17711 		    case INSERT_EXIDX_CANTUNWIND_AT_END:
17712 		      {
17713 			asection *text_sec = edit_node->linked_section;
17714 			bfd_vma text_offset = text_sec->output_section->vma
17715 					      + text_sec->output_offset
17716 					      + text_sec->size;
17717 			bfd_vma exidx_offset = offset + out_index * 8;
17718 			unsigned long prel31_offset;
17719 
17720 			/* Note: this is meant to be equivalent to an
17721 			   R_ARM_PREL31 relocation.  These synthetic
17722 			   EXIDX_CANTUNWIND markers are not relocated by the
17723 			   usual BFD method.  */
17724 			prel31_offset = (text_offset - exidx_offset)
17725 					& 0x7ffffffful;
17726 			if (bfd_link_relocatable (link_info))
17727 			  {
17728 			    /* Here relocation for new EXIDX_CANTUNWIND is
17729 			       created, so there is no need to
17730 			       adjust offset by hand.  */
17731 			    prel31_offset = text_sec->output_offset
17732 					    + text_sec->size;
17733 
17734 			    /* New relocation entity.  */
17735 			    asection *text_out = text_sec->output_section;
17736 			    Elf_Internal_Rela rel;
17737 			    rel.r_addend = 0;
17738 			    rel.r_offset = exidx_offset;
17739 			    rel.r_info = ELF32_R_INFO (text_out->target_index,
17740 						       R_ARM_PREL31);
17741 
17742 			    elf32_arm_add_relocation (output_bfd, link_info,
17743 						      sec->output_section,
17744 						      &rel);
17745 			  }
17746 
17747 			/* First address we can't unwind.  */
17748 			bfd_put_32 (output_bfd, prel31_offset,
17749 				    &edited_contents[out_index * 8]);
17750 
17751 			/* Code for EXIDX_CANTUNWIND.  */
17752 			bfd_put_32 (output_bfd, 0x1,
17753 				    &edited_contents[out_index * 8 + 4]);
17754 
17755 			out_index++;
17756 			add_to_offsets -= 8;
17757 		      }
17758 		      break;
17759 		    }
17760 
17761 		  edit_node = edit_node->next;
17762 		}
17763 	    }
17764 	  else
17765 	    {
17766 	      /* No more edits, copy remaining entries verbatim.  */
17767 	      copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17768 				contents + in_index * 8, add_to_offsets);
17769 	      out_index++;
17770 	      in_index++;
17771 	    }
17772 	}
17773 
17774       if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17775 	bfd_set_section_contents (output_bfd, sec->output_section,
17776 				  edited_contents,
17777 				  (file_ptr) sec->output_offset, sec->size);
17778 
17779       return TRUE;
17780     }
17781 
17782   /* Fix code to point to Cortex-A8 erratum stubs.  */
17783   if (globals->fix_cortex_a8)
17784     {
17785       struct a8_branch_to_stub_data data;
17786 
17787       data.writing_section = sec;
17788       data.contents = contents;
17789 
17790       bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17791 			 & data);
17792     }
17793 
17794   if (mapcount == 0)
17795     return FALSE;
17796 
17797   if (globals->byteswap_code)
17798     {
17799       qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17800 
17801       ptr = map[0].vma;
17802       for (i = 0; i < mapcount; i++)
17803 	{
17804 	  if (i == mapcount - 1)
17805 	    end = sec->size;
17806 	  else
17807 	    end = map[i + 1].vma;
17808 
17809 	  switch (map[i].type)
17810 	    {
17811 	    case 'a':
17812 	      /* Byte swap code words.  */
17813 	      while (ptr + 3 < end)
17814 		{
17815 		  tmp = contents[ptr];
17816 		  contents[ptr] = contents[ptr + 3];
17817 		  contents[ptr + 3] = tmp;
17818 		  tmp = contents[ptr + 1];
17819 		  contents[ptr + 1] = contents[ptr + 2];
17820 		  contents[ptr + 2] = tmp;
17821 		  ptr += 4;
17822 		}
17823 	      break;
17824 
17825 	    case 't':
17826 	      /* Byte swap code halfwords.  */
17827 	      while (ptr + 1 < end)
17828 		{
17829 		  tmp = contents[ptr];
17830 		  contents[ptr] = contents[ptr + 1];
17831 		  contents[ptr + 1] = tmp;
17832 		  ptr += 2;
17833 		}
17834 	      break;
17835 
17836 	    case 'd':
17837 	      /* Leave data alone.  */
17838 	      break;
17839 	    }
17840 	  ptr = end;
17841 	}
17842     }
17843 
17844   free (map);
17845   arm_data->mapcount = -1;
17846   arm_data->mapsize = 0;
17847   arm_data->map = NULL;
17848 
17849   return FALSE;
17850 }
17851 
17852 /* Mangle thumb function symbols as we read them in.  */
17853 
17854 static bfd_boolean
17855 elf32_arm_swap_symbol_in (bfd * abfd,
17856 			  const void *psrc,
17857 			  const void *pshn,
17858 			  Elf_Internal_Sym *dst)
17859 {
17860   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17861     return FALSE;
17862   dst->st_target_internal = 0;
17863 
17864   /* New EABI objects mark thumb function symbols by setting the low bit of
17865      the address.  */
17866   if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17867       || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17868     {
17869       if (dst->st_value & 1)
17870 	{
17871 	  dst->st_value &= ~(bfd_vma) 1;
17872 	  ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17873 				   ST_BRANCH_TO_THUMB);
17874 	}
17875       else
17876 	ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17877     }
17878   else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17879     {
17880       dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17881       ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17882     }
17883   else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17884     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17885   else
17886     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17887 
17888   return TRUE;
17889 }
17890 
17891 
17892 /* Mangle thumb function symbols as we write them out.  */
17893 
17894 static void
17895 elf32_arm_swap_symbol_out (bfd *abfd,
17896 			   const Elf_Internal_Sym *src,
17897 			   void *cdst,
17898 			   void *shndx)
17899 {
17900   Elf_Internal_Sym newsym;
17901 
17902   /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17903      of the address set, as per the new EABI.  We do this unconditionally
17904      because objcopy does not set the elf header flags until after
17905      it writes out the symbol table.  */
17906   if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17907     {
17908       newsym = *src;
17909       if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17910 	newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17911       if (newsym.st_shndx != SHN_UNDEF)
17912 	{
17913 	  /* Do this only for defined symbols. At link type, the static
17914 	     linker will simulate the work of dynamic linker of resolving
17915 	     symbols and will carry over the thumbness of found symbols to
17916 	     the output symbol table. It's not clear how it happens, but
17917 	     the thumbness of undefined symbols can well be different at
17918 	     runtime, and writing '1' for them will be confusing for users
17919 	     and possibly for dynamic linker itself.
17920 	  */
17921 	  newsym.st_value |= 1;
17922 	}
17923 
17924       src = &newsym;
17925     }
17926   bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17927 }
17928 
17929 /* Add the PT_ARM_EXIDX program header.  */
17930 
17931 static bfd_boolean
17932 elf32_arm_modify_segment_map (bfd *abfd,
17933 			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
17934 {
17935   struct elf_segment_map *m;
17936   asection *sec;
17937 
17938   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17939   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17940     {
17941       /* If there is already a PT_ARM_EXIDX header, then we do not
17942 	 want to add another one.  This situation arises when running
17943 	 "strip"; the input binary already has the header.  */
17944       m = elf_seg_map (abfd);
17945       while (m && m->p_type != PT_ARM_EXIDX)
17946 	m = m->next;
17947       if (!m)
17948 	{
17949 	  m = (struct elf_segment_map *)
17950 	      bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17951 	  if (m == NULL)
17952 	    return FALSE;
17953 	  m->p_type = PT_ARM_EXIDX;
17954 	  m->count = 1;
17955 	  m->sections[0] = sec;
17956 
17957 	  m->next = elf_seg_map (abfd);
17958 	  elf_seg_map (abfd) = m;
17959 	}
17960     }
17961 
17962   return TRUE;
17963 }
17964 
17965 /* We may add a PT_ARM_EXIDX program header.  */
17966 
17967 static int
17968 elf32_arm_additional_program_headers (bfd *abfd,
17969 				      struct bfd_link_info *info ATTRIBUTE_UNUSED)
17970 {
17971   asection *sec;
17972 
17973   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17974   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17975     return 1;
17976   else
17977     return 0;
17978 }
17979 
17980 /* Hook called by the linker routine which adds symbols from an object
17981    file.  */
17982 
17983 static bfd_boolean
17984 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17985 			   Elf_Internal_Sym *sym, const char **namep,
17986 			   flagword *flagsp, asection **secp, bfd_vma *valp)
17987 {
17988   if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17989       && (abfd->flags & DYNAMIC) == 0
17990       && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17991     elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17992 
17993   if (elf32_arm_hash_table (info) == NULL)
17994     return FALSE;
17995 
17996   if (elf32_arm_hash_table (info)->vxworks_p
17997       && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17998 				       flagsp, secp, valp))
17999     return FALSE;
18000 
18001   return TRUE;
18002 }
18003 
18004 /* We use this to override swap_symbol_in and swap_symbol_out.  */
18005 const struct elf_size_info elf32_arm_size_info =
18006 {
18007   sizeof (Elf32_External_Ehdr),
18008   sizeof (Elf32_External_Phdr),
18009   sizeof (Elf32_External_Shdr),
18010   sizeof (Elf32_External_Rel),
18011   sizeof (Elf32_External_Rela),
18012   sizeof (Elf32_External_Sym),
18013   sizeof (Elf32_External_Dyn),
18014   sizeof (Elf_External_Note),
18015   4,
18016   1,
18017   32, 2,
18018   ELFCLASS32, EV_CURRENT,
18019   bfd_elf32_write_out_phdrs,
18020   bfd_elf32_write_shdrs_and_ehdr,
18021   bfd_elf32_checksum_contents,
18022   bfd_elf32_write_relocs,
18023   elf32_arm_swap_symbol_in,
18024   elf32_arm_swap_symbol_out,
18025   bfd_elf32_slurp_reloc_table,
18026   bfd_elf32_slurp_symbol_table,
18027   bfd_elf32_swap_dyn_in,
18028   bfd_elf32_swap_dyn_out,
18029   bfd_elf32_swap_reloc_in,
18030   bfd_elf32_swap_reloc_out,
18031   bfd_elf32_swap_reloca_in,
18032   bfd_elf32_swap_reloca_out
18033 };
18034 
18035 static bfd_vma
18036 read_code32 (const bfd *abfd, const bfd_byte *addr)
18037 {
18038   /* V7 BE8 code is always little endian.  */
18039   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
18040     return bfd_getl32 (addr);
18041 
18042   return bfd_get_32 (abfd, addr);
18043 }
18044 
18045 static bfd_vma
18046 read_code16 (const bfd *abfd, const bfd_byte *addr)
18047 {
18048   /* V7 BE8 code is always little endian.  */
18049   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
18050     return bfd_getl16 (addr);
18051 
18052   return bfd_get_16 (abfd, addr);
18053 }
18054 
18055 /* Return size of plt0 entry starting at ADDR
18056    or (bfd_vma) -1 if size can not be determined.  */
18057 
18058 static bfd_vma
18059 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
18060 {
18061   bfd_vma first_word;
18062   bfd_vma plt0_size;
18063 
18064   first_word = read_code32 (abfd, addr);
18065 
18066   if (first_word == elf32_arm_plt0_entry[0])
18067     plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
18068   else if (first_word == elf32_thumb2_plt0_entry[0])
18069     plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
18070   else
18071     /* We don't yet handle this PLT format.  */
18072     return (bfd_vma) -1;
18073 
18074   return plt0_size;
18075 }
18076 
18077 /* Return size of plt entry starting at offset OFFSET
18078    of plt section located at address START
18079    or (bfd_vma) -1 if size can not be determined.  */
18080 
18081 static bfd_vma
18082 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
18083 {
18084   bfd_vma first_insn;
18085   bfd_vma plt_size = 0;
18086   const bfd_byte *addr = start + offset;
18087 
18088   /* PLT entry size if fixed on Thumb-only platforms.  */
18089   if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
18090       return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
18091 
18092   /* Respect Thumb stub if necessary.  */
18093   if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18094     {
18095       plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18096     }
18097 
18098   /* Strip immediate from first add.  */
18099   first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18100 
18101 #ifdef FOUR_WORD_PLT
18102   if (first_insn == elf32_arm_plt_entry[0])
18103     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18104 #else
18105   if (first_insn == elf32_arm_plt_entry_long[0])
18106     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18107   else if (first_insn == elf32_arm_plt_entry_short[0])
18108     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18109 #endif
18110   else
18111     /* We don't yet handle this PLT format.  */
18112     return (bfd_vma) -1;
18113 
18114   return plt_size;
18115 }
18116 
18117 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
18118 
18119 static long
18120 elf32_arm_get_synthetic_symtab (bfd *abfd,
18121 			       long symcount ATTRIBUTE_UNUSED,
18122 			       asymbol **syms ATTRIBUTE_UNUSED,
18123 			       long dynsymcount,
18124 			       asymbol **dynsyms,
18125 			       asymbol **ret)
18126 {
18127   asection *relplt;
18128   asymbol *s;
18129   arelent *p;
18130   long count, i, n;
18131   size_t size;
18132   Elf_Internal_Shdr *hdr;
18133   char *names;
18134   asection *plt;
18135   bfd_vma offset;
18136   bfd_byte *data;
18137 
18138   *ret = NULL;
18139 
18140   if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18141     return 0;
18142 
18143   if (dynsymcount <= 0)
18144     return 0;
18145 
18146   relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18147   if (relplt == NULL)
18148     return 0;
18149 
18150   hdr = &elf_section_data (relplt)->this_hdr;
18151   if (hdr->sh_link != elf_dynsymtab (abfd)
18152       || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18153     return 0;
18154 
18155   plt = bfd_get_section_by_name (abfd, ".plt");
18156   if (plt == NULL)
18157     return 0;
18158 
18159   if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18160     return -1;
18161 
18162   data = plt->contents;
18163   if (data == NULL)
18164     {
18165       if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18166 	return -1;
18167       bfd_cache_section_contents((asection *) plt, data);
18168     }
18169 
18170   count = relplt->size / hdr->sh_entsize;
18171   size = count * sizeof (asymbol);
18172   p = relplt->relocation;
18173   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18174     {
18175       size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18176       if (p->addend != 0)
18177 	size += sizeof ("+0x") - 1 + 8;
18178     }
18179 
18180   s = *ret = (asymbol *) bfd_malloc (size);
18181   if (s == NULL)
18182     return -1;
18183 
18184   offset = elf32_arm_plt0_size (abfd, data);
18185   if (offset == (bfd_vma) -1)
18186     return -1;
18187 
18188   names = (char *) (s + count);
18189   p = relplt->relocation;
18190   n = 0;
18191   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18192     {
18193       size_t len;
18194 
18195       bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18196       if (plt_size == (bfd_vma) -1)
18197 	break;
18198 
18199       *s = **p->sym_ptr_ptr;
18200       /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
18201 	 we are defining a symbol, ensure one of them is set.  */
18202       if ((s->flags & BSF_LOCAL) == 0)
18203 	s->flags |= BSF_GLOBAL;
18204       s->flags |= BSF_SYNTHETIC;
18205       s->section = plt;
18206       s->value = offset;
18207       s->name = names;
18208       s->udata.p = NULL;
18209       len = strlen ((*p->sym_ptr_ptr)->name);
18210       memcpy (names, (*p->sym_ptr_ptr)->name, len);
18211       names += len;
18212       if (p->addend != 0)
18213 	{
18214 	  char buf[30], *a;
18215 
18216 	  memcpy (names, "+0x", sizeof ("+0x") - 1);
18217 	  names += sizeof ("+0x") - 1;
18218 	  bfd_sprintf_vma (abfd, buf, p->addend);
18219 	  for (a = buf; *a == '0'; ++a)
18220 	    ;
18221 	  len = strlen (a);
18222 	  memcpy (names, a, len);
18223 	  names += len;
18224 	}
18225       memcpy (names, "@plt", sizeof ("@plt"));
18226       names += sizeof ("@plt");
18227       ++s, ++n;
18228       offset += plt_size;
18229     }
18230 
18231   return n;
18232 }
18233 
18234 static bfd_boolean
18235 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18236 {
18237   if (hdr->sh_flags & SHF_ARM_PURECODE)
18238     *flags |= SEC_ELF_PURECODE;
18239   return TRUE;
18240 }
18241 
18242 static flagword
18243 elf32_arm_lookup_section_flags (char *flag_name)
18244 {
18245   if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
18246     return SHF_ARM_PURECODE;
18247 
18248   return SEC_NO_FLAGS;
18249 }
18250 
18251 static unsigned int
18252 elf32_arm_count_additional_relocs (asection *sec)
18253 {
18254   struct _arm_elf_section_data *arm_data;
18255   arm_data = get_arm_elf_section_data (sec);
18256   return arm_data->additional_reloc_count;
18257 }
18258 
18259 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18260    has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised
18261    FALSE otherwise.  ISECTION is the best guess matching section from the
18262    input bfd IBFD, but it might be NULL.  */
18263 
18264 static bfd_boolean
18265 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18266 				       bfd *obfd ATTRIBUTE_UNUSED,
18267 				       const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18268 				       Elf_Internal_Shdr *osection)
18269 {
18270   switch (osection->sh_type)
18271     {
18272     case SHT_ARM_EXIDX:
18273       {
18274 	Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18275 	Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18276 	unsigned i = 0;
18277 
18278 	osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18279 	osection->sh_info = 0;
18280 
18281 	/* The sh_link field must be set to the text section associated with
18282 	   this index section.  Unfortunately the ARM EHABI does not specify
18283 	   exactly how to determine this association.  Our caller does try
18284 	   to match up OSECTION with its corresponding input section however
18285 	   so that is a good first guess.  */
18286 	if (isection != NULL
18287 	    && osection->bfd_section != NULL
18288 	    && isection->bfd_section != NULL
18289 	    && isection->bfd_section->output_section != NULL
18290 	    && isection->bfd_section->output_section == osection->bfd_section
18291 	    && iheaders != NULL
18292 	    && isection->sh_link > 0
18293 	    && isection->sh_link < elf_numsections (ibfd)
18294 	    && iheaders[isection->sh_link]->bfd_section != NULL
18295 	    && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18296 	    )
18297 	  {
18298 	    for (i = elf_numsections (obfd); i-- > 0;)
18299 	      if (oheaders[i]->bfd_section
18300 		  == iheaders[isection->sh_link]->bfd_section->output_section)
18301 		break;
18302 	  }
18303 
18304 	if (i == 0)
18305 	  {
18306 	    /* Failing that we have to find a matching section ourselves.  If
18307 	       we had the output section name available we could compare that
18308 	       with input section names.  Unfortunately we don't.  So instead
18309 	       we use a simple heuristic and look for the nearest executable
18310 	       section before this one.  */
18311 	    for (i = elf_numsections (obfd); i-- > 0;)
18312 	      if (oheaders[i] == osection)
18313 		break;
18314 	    if (i == 0)
18315 	      break;
18316 
18317 	    while (i-- > 0)
18318 	      if (oheaders[i]->sh_type == SHT_PROGBITS
18319 		  && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18320 		  == (SHF_ALLOC | SHF_EXECINSTR))
18321 		break;
18322 	  }
18323 
18324 	if (i)
18325 	  {
18326 	    osection->sh_link = i;
18327 	    /* If the text section was part of a group
18328 	       then the index section should be too.  */
18329 	    if (oheaders[i]->sh_flags & SHF_GROUP)
18330 	      osection->sh_flags |= SHF_GROUP;
18331 	    return TRUE;
18332 	  }
18333       }
18334       break;
18335 
18336     case SHT_ARM_PREEMPTMAP:
18337       osection->sh_flags = SHF_ALLOC;
18338       break;
18339 
18340     case SHT_ARM_ATTRIBUTES:
18341     case SHT_ARM_DEBUGOVERLAY:
18342     case SHT_ARM_OVERLAYSECTION:
18343     default:
18344       break;
18345     }
18346 
18347   return FALSE;
18348 }
18349 
18350 /* Returns TRUE if NAME is an ARM mapping symbol.
18351    Traditionally the symbols $a, $d and $t have been used.
18352    The ARM ELF standard also defines $x (for A64 code).  It also allows a
18353    period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
18354    Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
18355    not support them here.  $t.x indicates the start of ThumbEE instructions.  */
18356 
18357 static bfd_boolean
18358 is_arm_mapping_symbol (const char * name)
18359 {
18360   return name != NULL /* Paranoia.  */
18361     && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
18362 			 the mapping symbols could have acquired a prefix.
18363 			 We do not support this here, since such symbols no
18364 			 longer conform to the ARM ELF ABI.  */
18365     && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
18366     && (name[2] == 0 || name[2] == '.');
18367   /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
18368      any characters that follow the period are legal characters for the body
18369      of a symbol's name.  For now we just assume that this is the case.  */
18370 }
18371 
18372 /* Make sure that mapping symbols in object files are not removed via the
18373    "strip --strip-unneeded" tool.  These symbols are needed in order to
18374    correctly generate interworking veneers, and for byte swapping code
18375    regions.  Once an object file has been linked, it is safe to remove the
18376    symbols as they will no longer be needed.  */
18377 
18378 static void
18379 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
18380 {
18381   if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
18382       && sym->section != bfd_abs_section_ptr
18383       && is_arm_mapping_symbol (sym->name))
18384     sym->flags |= BSF_KEEP;
18385 }
18386 
18387 #undef  elf_backend_copy_special_section_fields
18388 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18389 
18390 #define ELF_ARCH			bfd_arch_arm
18391 #define ELF_TARGET_ID			ARM_ELF_DATA
18392 #define ELF_MACHINE_CODE		EM_ARM
18393 #ifdef __QNXTARGET__
18394 #define ELF_MAXPAGESIZE			0x1000
18395 #else
18396 #define ELF_MAXPAGESIZE			0x10000
18397 #endif
18398 #define ELF_MINPAGESIZE			0x1000
18399 #define ELF_COMMONPAGESIZE		0x1000
18400 
18401 #define bfd_elf32_mkobject		        elf32_arm_mkobject
18402 
18403 #define bfd_elf32_bfd_copy_private_bfd_data	elf32_arm_copy_private_bfd_data
18404 #define bfd_elf32_bfd_merge_private_bfd_data	elf32_arm_merge_private_bfd_data
18405 #define bfd_elf32_bfd_set_private_flags		elf32_arm_set_private_flags
18406 #define bfd_elf32_bfd_print_private_bfd_data	elf32_arm_print_private_bfd_data
18407 #define bfd_elf32_bfd_link_hash_table_create    elf32_arm_link_hash_table_create
18408 #define bfd_elf32_bfd_reloc_type_lookup		elf32_arm_reloc_type_lookup
18409 #define bfd_elf32_bfd_reloc_name_lookup		elf32_arm_reloc_name_lookup
18410 #define bfd_elf32_find_nearest_line	        elf32_arm_find_nearest_line
18411 #define bfd_elf32_find_inliner_info	        elf32_arm_find_inliner_info
18412 #define bfd_elf32_new_section_hook		elf32_arm_new_section_hook
18413 #define bfd_elf32_bfd_is_target_special_symbol	elf32_arm_is_target_special_symbol
18414 #define bfd_elf32_bfd_final_link		elf32_arm_final_link
18415 #define bfd_elf32_get_synthetic_symtab  elf32_arm_get_synthetic_symtab
18416 
18417 #define elf_backend_get_symbol_type             elf32_arm_get_symbol_type
18418 #define elf_backend_gc_mark_hook                elf32_arm_gc_mark_hook
18419 #define elf_backend_gc_mark_extra_sections	elf32_arm_gc_mark_extra_sections
18420 #define elf_backend_gc_sweep_hook               elf32_arm_gc_sweep_hook
18421 #define elf_backend_check_relocs                elf32_arm_check_relocs
18422 #define elf_backend_relocate_section		elf32_arm_relocate_section
18423 #define elf_backend_write_section		elf32_arm_write_section
18424 #define elf_backend_adjust_dynamic_symbol	elf32_arm_adjust_dynamic_symbol
18425 #define elf_backend_create_dynamic_sections     elf32_arm_create_dynamic_sections
18426 #define elf_backend_finish_dynamic_symbol	elf32_arm_finish_dynamic_symbol
18427 #define elf_backend_finish_dynamic_sections	elf32_arm_finish_dynamic_sections
18428 #define elf_backend_size_dynamic_sections	elf32_arm_size_dynamic_sections
18429 #define elf_backend_always_size_sections	elf32_arm_always_size_sections
18430 #define elf_backend_init_index_section		_bfd_elf_init_2_index_sections
18431 #define elf_backend_post_process_headers	elf32_arm_post_process_headers
18432 #define elf_backend_reloc_type_class		elf32_arm_reloc_type_class
18433 #define elf_backend_object_p			elf32_arm_object_p
18434 #define elf_backend_fake_sections  		elf32_arm_fake_sections
18435 #define elf_backend_section_from_shdr  		elf32_arm_section_from_shdr
18436 #define elf_backend_final_write_processing      elf32_arm_final_write_processing
18437 #define elf_backend_copy_indirect_symbol        elf32_arm_copy_indirect_symbol
18438 #define elf_backend_size_info			elf32_arm_size_info
18439 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
18440 #define elf_backend_additional_program_headers  elf32_arm_additional_program_headers
18441 #define elf_backend_output_arch_local_syms      elf32_arm_output_arch_local_syms
18442 #define elf_backend_begin_write_processing      elf32_arm_begin_write_processing
18443 #define elf_backend_add_symbol_hook		elf32_arm_add_symbol_hook
18444 #define elf_backend_count_additional_relocs	elf32_arm_count_additional_relocs
18445 #define elf_backend_symbol_processing		elf32_arm_backend_symbol_processing
18446 
18447 #define elf_backend_can_refcount       1
18448 #define elf_backend_can_gc_sections    1
18449 #define elf_backend_plt_readonly       1
18450 #define elf_backend_want_got_plt       1
18451 #define elf_backend_want_plt_sym       0
18452 #define elf_backend_may_use_rel_p      1
18453 #define elf_backend_may_use_rela_p     0
18454 #define elf_backend_default_use_rela_p 0
18455 
18456 #define elf_backend_got_header_size	12
18457 #define elf_backend_extern_protected_data 1
18458 
18459 #undef  elf_backend_obj_attrs_vendor
18460 #define elf_backend_obj_attrs_vendor		"aeabi"
18461 #undef  elf_backend_obj_attrs_section
18462 #define elf_backend_obj_attrs_section		".ARM.attributes"
18463 #undef  elf_backend_obj_attrs_arg_type
18464 #define elf_backend_obj_attrs_arg_type		elf32_arm_obj_attrs_arg_type
18465 #undef  elf_backend_obj_attrs_section_type
18466 #define elf_backend_obj_attrs_section_type	SHT_ARM_ATTRIBUTES
18467 #define elf_backend_obj_attrs_order		elf32_arm_obj_attrs_order
18468 #define elf_backend_obj_attrs_handle_unknown 	elf32_arm_obj_attrs_handle_unknown
18469 
18470 #undef elf_backend_section_flags
18471 #define elf_backend_section_flags		elf32_arm_section_flags
18472 #undef elf_backend_lookup_section_flags_hook
18473 #define elf_backend_lookup_section_flags_hook   elf32_arm_lookup_section_flags
18474 
18475 #include "elf32-target.h"
18476 
18477 /* Native Client targets.  */
18478 
18479 #undef	TARGET_LITTLE_SYM
18480 #define TARGET_LITTLE_SYM		arm_elf32_nacl_le_vec
18481 #undef	TARGET_LITTLE_NAME
18482 #define TARGET_LITTLE_NAME		"elf32-littlearm-nacl"
18483 #undef	TARGET_BIG_SYM
18484 #define TARGET_BIG_SYM			arm_elf32_nacl_be_vec
18485 #undef	TARGET_BIG_NAME
18486 #define TARGET_BIG_NAME			"elf32-bigarm-nacl"
18487 
18488 /* Like elf32_arm_link_hash_table_create -- but overrides
18489    appropriately for NaCl.  */
18490 
18491 static struct bfd_link_hash_table *
18492 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18493 {
18494   struct bfd_link_hash_table *ret;
18495 
18496   ret = elf32_arm_link_hash_table_create (abfd);
18497   if (ret)
18498     {
18499       struct elf32_arm_link_hash_table *htab
18500 	= (struct elf32_arm_link_hash_table *) ret;
18501 
18502       htab->nacl_p = 1;
18503 
18504       htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18505       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18506     }
18507   return ret;
18508 }
18509 
18510 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18511    really need to use elf32_arm_modify_segment_map.  But we do it
18512    anyway just to reduce gratuitous differences with the stock ARM backend.  */
18513 
18514 static bfd_boolean
18515 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18516 {
18517   return (elf32_arm_modify_segment_map (abfd, info)
18518 	  && nacl_modify_segment_map (abfd, info));
18519 }
18520 
18521 static void
18522 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18523 {
18524   elf32_arm_final_write_processing (abfd, linker);
18525   nacl_final_write_processing (abfd, linker);
18526 }
18527 
18528 static bfd_vma
18529 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18530 			    const arelent *rel ATTRIBUTE_UNUSED)
18531 {
18532   return plt->vma
18533     + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18534 	   i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18535 }
18536 
18537 #undef	elf32_bed
18538 #define elf32_bed				elf32_arm_nacl_bed
18539 #undef  bfd_elf32_bfd_link_hash_table_create
18540 #define bfd_elf32_bfd_link_hash_table_create	\
18541   elf32_arm_nacl_link_hash_table_create
18542 #undef	elf_backend_plt_alignment
18543 #define elf_backend_plt_alignment		4
18544 #undef	elf_backend_modify_segment_map
18545 #define	elf_backend_modify_segment_map		elf32_arm_nacl_modify_segment_map
18546 #undef	elf_backend_modify_program_headers
18547 #define	elf_backend_modify_program_headers	nacl_modify_program_headers
18548 #undef  elf_backend_final_write_processing
18549 #define elf_backend_final_write_processing	elf32_arm_nacl_final_write_processing
18550 #undef bfd_elf32_get_synthetic_symtab
18551 #undef  elf_backend_plt_sym_val
18552 #define elf_backend_plt_sym_val			elf32_arm_nacl_plt_sym_val
18553 #undef  elf_backend_copy_special_section_fields
18554 
18555 #undef	ELF_MINPAGESIZE
18556 #undef	ELF_COMMONPAGESIZE
18557 
18558 
18559 #include "elf32-target.h"
18560 
18561 /* Reset to defaults.  */
18562 #undef	elf_backend_plt_alignment
18563 #undef	elf_backend_modify_segment_map
18564 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
18565 #undef	elf_backend_modify_program_headers
18566 #undef  elf_backend_final_write_processing
18567 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
18568 #undef	ELF_MINPAGESIZE
18569 #define ELF_MINPAGESIZE			0x1000
18570 #undef	ELF_COMMONPAGESIZE
18571 #define ELF_COMMONPAGESIZE		0x1000
18572 
18573 
18574 /* VxWorks Targets.  */
18575 
18576 #undef  TARGET_LITTLE_SYM
18577 #define TARGET_LITTLE_SYM               arm_elf32_vxworks_le_vec
18578 #undef  TARGET_LITTLE_NAME
18579 #define TARGET_LITTLE_NAME              "elf32-littlearm-vxworks"
18580 #undef  TARGET_BIG_SYM
18581 #define TARGET_BIG_SYM                  arm_elf32_vxworks_be_vec
18582 #undef  TARGET_BIG_NAME
18583 #define TARGET_BIG_NAME                 "elf32-bigarm-vxworks"
18584 
18585 /* Like elf32_arm_link_hash_table_create -- but overrides
18586    appropriately for VxWorks.  */
18587 
18588 static struct bfd_link_hash_table *
18589 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18590 {
18591   struct bfd_link_hash_table *ret;
18592 
18593   ret = elf32_arm_link_hash_table_create (abfd);
18594   if (ret)
18595     {
18596       struct elf32_arm_link_hash_table *htab
18597 	= (struct elf32_arm_link_hash_table *) ret;
18598       htab->use_rel = 0;
18599       htab->vxworks_p = 1;
18600     }
18601   return ret;
18602 }
18603 
18604 static void
18605 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18606 {
18607   elf32_arm_final_write_processing (abfd, linker);
18608   elf_vxworks_final_write_processing (abfd, linker);
18609 }
18610 
18611 #undef  elf32_bed
18612 #define elf32_bed elf32_arm_vxworks_bed
18613 
18614 #undef  bfd_elf32_bfd_link_hash_table_create
18615 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_vxworks_link_hash_table_create
18616 #undef  elf_backend_final_write_processing
18617 #define elf_backend_final_write_processing	elf32_arm_vxworks_final_write_processing
18618 #undef  elf_backend_emit_relocs
18619 #define elf_backend_emit_relocs			elf_vxworks_emit_relocs
18620 
18621 #undef  elf_backend_may_use_rel_p
18622 #define elf_backend_may_use_rel_p	0
18623 #undef  elf_backend_may_use_rela_p
18624 #define elf_backend_may_use_rela_p	1
18625 #undef  elf_backend_default_use_rela_p
18626 #define elf_backend_default_use_rela_p	1
18627 #undef  elf_backend_want_plt_sym
18628 #define elf_backend_want_plt_sym	1
18629 #undef  ELF_MAXPAGESIZE
18630 #define ELF_MAXPAGESIZE			0x1000
18631 
18632 #include "elf32-target.h"
18633 
18634 
18635 /* Merge backend specific data from an object file to the output
18636    object file when linking.  */
18637 
18638 static bfd_boolean
18639 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18640 {
18641   flagword out_flags;
18642   flagword in_flags;
18643   bfd_boolean flags_compatible = TRUE;
18644   asection *sec;
18645 
18646   /* Check if we have the same endianness.  */
18647   if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18648     return FALSE;
18649 
18650   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18651     return TRUE;
18652 
18653   if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18654     return FALSE;
18655 
18656   /* The input BFD must have had its flags initialised.  */
18657   /* The following seems bogus to me -- The flags are initialized in
18658      the assembler but I don't think an elf_flags_init field is
18659      written into the object.  */
18660   /* BFD_ASSERT (elf_flags_init (ibfd)); */
18661 
18662   in_flags  = elf_elfheader (ibfd)->e_flags;
18663   out_flags = elf_elfheader (obfd)->e_flags;
18664 
18665   /* In theory there is no reason why we couldn't handle this.  However
18666      in practice it isn't even close to working and there is no real
18667      reason to want it.  */
18668   if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18669       && !(ibfd->flags & DYNAMIC)
18670       && (in_flags & EF_ARM_BE8))
18671     {
18672       _bfd_error_handler (_("error: %B is already in final BE8 format"),
18673 			  ibfd);
18674       return FALSE;
18675     }
18676 
18677   if (!elf_flags_init (obfd))
18678     {
18679       /* If the input is the default architecture and had the default
18680 	 flags then do not bother setting the flags for the output
18681 	 architecture, instead allow future merges to do this.  If no
18682 	 future merges ever set these flags then they will retain their
18683 	 uninitialised values, which surprise surprise, correspond
18684 	 to the default values.  */
18685       if (bfd_get_arch_info (ibfd)->the_default
18686 	  && elf_elfheader (ibfd)->e_flags == 0)
18687 	return TRUE;
18688 
18689       elf_flags_init (obfd) = TRUE;
18690       elf_elfheader (obfd)->e_flags = in_flags;
18691 
18692       if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18693 	  && bfd_get_arch_info (obfd)->the_default)
18694 	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18695 
18696       return TRUE;
18697     }
18698 
18699   /* Determine what should happen if the input ARM architecture
18700      does not match the output ARM architecture.  */
18701   if (! bfd_arm_merge_machines (ibfd, obfd))
18702     return FALSE;
18703 
18704   /* Identical flags must be compatible.  */
18705   if (in_flags == out_flags)
18706     return TRUE;
18707 
18708   /* Check to see if the input BFD actually contains any sections.  If
18709      not, its flags may not have been initialised either, but it
18710      cannot actually cause any incompatiblity.  Do not short-circuit
18711      dynamic objects; their section list may be emptied by
18712     elf_link_add_object_symbols.
18713 
18714     Also check to see if there are no code sections in the input.
18715     In this case there is no need to check for code specific flags.
18716     XXX - do we need to worry about floating-point format compatability
18717     in data sections ?  */
18718   if (!(ibfd->flags & DYNAMIC))
18719     {
18720       bfd_boolean null_input_bfd = TRUE;
18721       bfd_boolean only_data_sections = TRUE;
18722 
18723       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18724 	{
18725 	  /* Ignore synthetic glue sections.  */
18726 	  if (strcmp (sec->name, ".glue_7")
18727 	      && strcmp (sec->name, ".glue_7t"))
18728 	    {
18729 	      if ((bfd_get_section_flags (ibfd, sec)
18730 		   & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18731 		  == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18732 		only_data_sections = FALSE;
18733 
18734 	      null_input_bfd = FALSE;
18735 	      break;
18736 	    }
18737 	}
18738 
18739       if (null_input_bfd || only_data_sections)
18740 	return TRUE;
18741     }
18742 
18743   /* Complain about various flag mismatches.  */
18744   if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18745 				      EF_ARM_EABI_VERSION (out_flags)))
18746     {
18747       _bfd_error_handler
18748 	(_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18749 	 ibfd, obfd,
18750 	 (in_flags & EF_ARM_EABIMASK) >> 24,
18751 	 (out_flags & EF_ARM_EABIMASK) >> 24);
18752       return FALSE;
18753     }
18754 
18755   /* Not sure what needs to be checked for EABI versions >= 1.  */
18756   /* VxWorks libraries do not use these flags.  */
18757   if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18758       && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18759       && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18760     {
18761       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18762 	{
18763 	  _bfd_error_handler
18764 	    (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18765 	     ibfd, obfd,
18766 	     in_flags & EF_ARM_APCS_26 ? 26 : 32,
18767 	     out_flags & EF_ARM_APCS_26 ? 26 : 32);
18768 	  flags_compatible = FALSE;
18769 	}
18770 
18771       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18772 	{
18773 	  if (in_flags & EF_ARM_APCS_FLOAT)
18774 	    _bfd_error_handler
18775 	      (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18776 	       ibfd, obfd);
18777 	  else
18778 	    _bfd_error_handler
18779 	      (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18780 	       ibfd, obfd);
18781 
18782 	  flags_compatible = FALSE;
18783 	}
18784 
18785       if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18786 	{
18787 	  if (in_flags & EF_ARM_VFP_FLOAT)
18788 	    _bfd_error_handler
18789 	      (_("error: %B uses VFP instructions, whereas %B does not"),
18790 	       ibfd, obfd);
18791 	  else
18792 	    _bfd_error_handler
18793 	      (_("error: %B uses FPA instructions, whereas %B does not"),
18794 	       ibfd, obfd);
18795 
18796 	  flags_compatible = FALSE;
18797 	}
18798 
18799       if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18800 	{
18801 	  if (in_flags & EF_ARM_MAVERICK_FLOAT)
18802 	    _bfd_error_handler
18803 	      (_("error: %B uses Maverick instructions, whereas %B does not"),
18804 	       ibfd, obfd);
18805 	  else
18806 	    _bfd_error_handler
18807 	      (_("error: %B does not use Maverick instructions, whereas %B does"),
18808 	       ibfd, obfd);
18809 
18810 	  flags_compatible = FALSE;
18811 	}
18812 
18813 #ifdef EF_ARM_SOFT_FLOAT
18814       if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18815 	{
18816 	  /* We can allow interworking between code that is VFP format
18817 	     layout, and uses either soft float or integer regs for
18818 	     passing floating point arguments and results.  We already
18819 	     know that the APCS_FLOAT flags match; similarly for VFP
18820 	     flags.  */
18821 	  if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18822 	      || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18823 	    {
18824 	      if (in_flags & EF_ARM_SOFT_FLOAT)
18825 		_bfd_error_handler
18826 		  (_("error: %B uses software FP, whereas %B uses hardware FP"),
18827 		   ibfd, obfd);
18828 	      else
18829 		_bfd_error_handler
18830 		  (_("error: %B uses hardware FP, whereas %B uses software FP"),
18831 		   ibfd, obfd);
18832 
18833 	      flags_compatible = FALSE;
18834 	    }
18835 	}
18836 #endif
18837 
18838       /* Interworking mismatch is only a warning.  */
18839       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18840 	{
18841 	  if (in_flags & EF_ARM_INTERWORK)
18842 	    {
18843 	      _bfd_error_handler
18844 		(_("Warning: %B supports interworking, whereas %B does not"),
18845 		 ibfd, obfd);
18846 	    }
18847 	  else
18848 	    {
18849 	      _bfd_error_handler
18850 		(_("Warning: %B does not support interworking, whereas %B does"),
18851 		 ibfd, obfd);
18852 	    }
18853 	}
18854     }
18855 
18856   return flags_compatible;
18857 }
18858 
18859 
18860 /* Symbian OS Targets.  */
18861 
18862 #undef  TARGET_LITTLE_SYM
18863 #define TARGET_LITTLE_SYM               arm_elf32_symbian_le_vec
18864 #undef  TARGET_LITTLE_NAME
18865 #define TARGET_LITTLE_NAME              "elf32-littlearm-symbian"
18866 #undef  TARGET_BIG_SYM
18867 #define TARGET_BIG_SYM                  arm_elf32_symbian_be_vec
18868 #undef  TARGET_BIG_NAME
18869 #define TARGET_BIG_NAME                 "elf32-bigarm-symbian"
18870 
18871 /* Like elf32_arm_link_hash_table_create -- but overrides
18872    appropriately for Symbian OS.  */
18873 
18874 static struct bfd_link_hash_table *
18875 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18876 {
18877   struct bfd_link_hash_table *ret;
18878 
18879   ret = elf32_arm_link_hash_table_create (abfd);
18880   if (ret)
18881     {
18882       struct elf32_arm_link_hash_table *htab
18883 	= (struct elf32_arm_link_hash_table *)ret;
18884       /* There is no PLT header for Symbian OS.  */
18885       htab->plt_header_size = 0;
18886       /* The PLT entries are each one instruction and one word.  */
18887       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18888       htab->symbian_p = 1;
18889       /* Symbian uses armv5t or above, so use_blx is always true.  */
18890       htab->use_blx = 1;
18891       htab->root.is_relocatable_executable = 1;
18892     }
18893   return ret;
18894 }
18895 
18896 static const struct bfd_elf_special_section
18897 elf32_arm_symbian_special_sections[] =
18898 {
18899   /* In a BPABI executable, the dynamic linking sections do not go in
18900      the loadable read-only segment.  The post-linker may wish to
18901      refer to these sections, but they are not part of the final
18902      program image.  */
18903   { STRING_COMMA_LEN (".dynamic"),       0, SHT_DYNAMIC,  0 },
18904   { STRING_COMMA_LEN (".dynstr"),        0, SHT_STRTAB,   0 },
18905   { STRING_COMMA_LEN (".dynsym"),        0, SHT_DYNSYM,   0 },
18906   { STRING_COMMA_LEN (".got"),           0, SHT_PROGBITS, 0 },
18907   { STRING_COMMA_LEN (".hash"),          0, SHT_HASH,     0 },
18908   /* These sections do not need to be writable as the SymbianOS
18909      postlinker will arrange things so that no dynamic relocation is
18910      required.  */
18911   { STRING_COMMA_LEN (".init_array"),    0, SHT_INIT_ARRAY,    SHF_ALLOC },
18912   { STRING_COMMA_LEN (".fini_array"),    0, SHT_FINI_ARRAY,    SHF_ALLOC },
18913   { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18914   { NULL,                             0, 0, 0,                 0 }
18915 };
18916 
18917 static void
18918 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18919 					  struct bfd_link_info *link_info)
18920 {
18921   /* BPABI objects are never loaded directly by an OS kernel; they are
18922      processed by a postlinker first, into an OS-specific format.  If
18923      the D_PAGED bit is set on the file, BFD will align segments on
18924      page boundaries, so that an OS can directly map the file.  With
18925      BPABI objects, that just results in wasted space.  In addition,
18926      because we clear the D_PAGED bit, map_sections_to_segments will
18927      recognize that the program headers should not be mapped into any
18928      loadable segment.  */
18929   abfd->flags &= ~D_PAGED;
18930   elf32_arm_begin_write_processing (abfd, link_info);
18931 }
18932 
18933 static bfd_boolean
18934 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18935 				      struct bfd_link_info *info)
18936 {
18937   struct elf_segment_map *m;
18938   asection *dynsec;
18939 
18940   /* BPABI shared libraries and executables should have a PT_DYNAMIC
18941      segment.  However, because the .dynamic section is not marked
18942      with SEC_LOAD, the generic ELF code will not create such a
18943      segment.  */
18944   dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18945   if (dynsec)
18946     {
18947       for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18948 	if (m->p_type == PT_DYNAMIC)
18949 	  break;
18950 
18951       if (m == NULL)
18952 	{
18953 	  m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18954 	  m->next = elf_seg_map (abfd);
18955 	  elf_seg_map (abfd) = m;
18956 	}
18957     }
18958 
18959   /* Also call the generic arm routine.  */
18960   return elf32_arm_modify_segment_map (abfd, info);
18961 }
18962 
18963 /* Return address for Ith PLT stub in section PLT, for relocation REL
18964    or (bfd_vma) -1 if it should not be included.  */
18965 
18966 static bfd_vma
18967 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18968 			       const arelent *rel ATTRIBUTE_UNUSED)
18969 {
18970   return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18971 }
18972 
18973 #undef  elf32_bed
18974 #define elf32_bed elf32_arm_symbian_bed
18975 
18976 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18977    will process them and then discard them.  */
18978 #undef  ELF_DYNAMIC_SEC_FLAGS
18979 #define ELF_DYNAMIC_SEC_FLAGS \
18980   (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18981 
18982 #undef elf_backend_emit_relocs
18983 
18984 #undef  bfd_elf32_bfd_link_hash_table_create
18985 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_symbian_link_hash_table_create
18986 #undef  elf_backend_special_sections
18987 #define elf_backend_special_sections 		elf32_arm_symbian_special_sections
18988 #undef  elf_backend_begin_write_processing
18989 #define elf_backend_begin_write_processing	elf32_arm_symbian_begin_write_processing
18990 #undef  elf_backend_final_write_processing
18991 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
18992 
18993 #undef  elf_backend_modify_segment_map
18994 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18995 
18996 /* There is no .got section for BPABI objects, and hence no header.  */
18997 #undef  elf_backend_got_header_size
18998 #define elf_backend_got_header_size 0
18999 
19000 /* Similarly, there is no .got.plt section.  */
19001 #undef  elf_backend_want_got_plt
19002 #define elf_backend_want_got_plt 0
19003 
19004 #undef  elf_backend_plt_sym_val
19005 #define elf_backend_plt_sym_val		elf32_arm_symbian_plt_sym_val
19006 
19007 #undef  elf_backend_may_use_rel_p
19008 #define elf_backend_may_use_rel_p	1
19009 #undef  elf_backend_may_use_rela_p
19010 #define elf_backend_may_use_rela_p	0
19011 #undef  elf_backend_default_use_rela_p
19012 #define elf_backend_default_use_rela_p	0
19013 #undef  elf_backend_want_plt_sym
19014 #define elf_backend_want_plt_sym	0
19015 #undef  ELF_MAXPAGESIZE
19016 #define ELF_MAXPAGESIZE			0x8000
19017 
19018 #include "elf32-target.h"
19019