xref: /netbsd-src/external/gpl3/gdb/dist/bfd/elf32-arm.c (revision cef8759bd76c1b621f8eab8faa6f208faabc2e15)
1 /* 32-bit ELF support for ARM
2    Copyright (C) 1998-2019 Free Software Foundation, Inc.
3 
4    This file is part of BFD, the Binary File Descriptor library.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19    MA 02110-1301, USA.  */
20 
21 #include "sysdep.h"
22 #include <limits.h>
23 
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 
32 /* Return the relocation section associated with NAME.  HTAB is the
33    bfd's elf32_arm_link_hash_entry.  */
34 #define RELOC_SECTION(HTAB, NAME) \
35   ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
36 
37 /* Return size of a relocation entry.  HTAB is the bfd's
38    elf32_arm_link_hash_entry.  */
39 #define RELOC_SIZE(HTAB) \
40   ((HTAB)->use_rel \
41    ? sizeof (Elf32_External_Rel) \
42    : sizeof (Elf32_External_Rela))
43 
44 /* Return function to swap relocations in.  HTAB is the bfd's
45    elf32_arm_link_hash_entry.  */
46 #define SWAP_RELOC_IN(HTAB) \
47   ((HTAB)->use_rel \
48    ? bfd_elf32_swap_reloc_in \
49    : bfd_elf32_swap_reloca_in)
50 
51 /* Return function to swap relocations out.  HTAB is the bfd's
52    elf32_arm_link_hash_entry.  */
53 #define SWAP_RELOC_OUT(HTAB) \
54   ((HTAB)->use_rel \
55    ? bfd_elf32_swap_reloc_out \
56    : bfd_elf32_swap_reloca_out)
57 
58 #define elf_info_to_howto		NULL
59 #define elf_info_to_howto_rel		elf32_arm_info_to_howto
60 
61 #define ARM_ELF_ABI_VERSION		0
62 #define ARM_ELF_OS_ABI_VERSION		ELFOSABI_ARM
63 
64 /* The Adjusted Place, as defined by AAELF.  */
65 #define Pa(X) ((X) & 0xfffffffc)
66 
67 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
68 					    struct bfd_link_info *link_info,
69 					    asection *sec,
70 					    bfd_byte *contents);
71 
72 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
73    R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
74    in that slot.  */
75 
76 static reloc_howto_type elf32_arm_howto_table_1[] =
77 {
78   /* No relocation.  */
79   HOWTO (R_ARM_NONE,		/* type */
80 	 0,			/* rightshift */
81 	 3,			/* size (0 = byte, 1 = short, 2 = long) */
82 	 0,			/* bitsize */
83 	 FALSE,			/* pc_relative */
84 	 0,			/* bitpos */
85 	 complain_overflow_dont,/* complain_on_overflow */
86 	 bfd_elf_generic_reloc,	/* special_function */
87 	 "R_ARM_NONE",		/* name */
88 	 FALSE,			/* partial_inplace */
89 	 0,			/* src_mask */
90 	 0,			/* dst_mask */
91 	 FALSE),		/* pcrel_offset */
92 
93   HOWTO (R_ARM_PC24,		/* type */
94 	 2,			/* rightshift */
95 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
96 	 24,			/* bitsize */
97 	 TRUE,			/* pc_relative */
98 	 0,			/* bitpos */
99 	 complain_overflow_signed,/* complain_on_overflow */
100 	 bfd_elf_generic_reloc,	/* special_function */
101 	 "R_ARM_PC24",		/* name */
102 	 FALSE,			/* partial_inplace */
103 	 0x00ffffff,		/* src_mask */
104 	 0x00ffffff,		/* dst_mask */
105 	 TRUE),			/* pcrel_offset */
106 
107   /* 32 bit absolute */
108   HOWTO (R_ARM_ABS32,		/* type */
109 	 0,			/* rightshift */
110 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
111 	 32,			/* bitsize */
112 	 FALSE,			/* pc_relative */
113 	 0,			/* bitpos */
114 	 complain_overflow_bitfield,/* complain_on_overflow */
115 	 bfd_elf_generic_reloc,	/* special_function */
116 	 "R_ARM_ABS32",		/* name */
117 	 FALSE,			/* partial_inplace */
118 	 0xffffffff,		/* src_mask */
119 	 0xffffffff,		/* dst_mask */
120 	 FALSE),		/* pcrel_offset */
121 
122   /* standard 32bit pc-relative reloc */
123   HOWTO (R_ARM_REL32,		/* type */
124 	 0,			/* rightshift */
125 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
126 	 32,			/* bitsize */
127 	 TRUE,			/* pc_relative */
128 	 0,			/* bitpos */
129 	 complain_overflow_bitfield,/* complain_on_overflow */
130 	 bfd_elf_generic_reloc,	/* special_function */
131 	 "R_ARM_REL32",		/* name */
132 	 FALSE,			/* partial_inplace */
133 	 0xffffffff,		/* src_mask */
134 	 0xffffffff,		/* dst_mask */
135 	 TRUE),			/* pcrel_offset */
136 
137   /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
138   HOWTO (R_ARM_LDR_PC_G0,	/* type */
139 	 0,			/* rightshift */
140 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
141 	 32,			/* bitsize */
142 	 TRUE,			/* pc_relative */
143 	 0,			/* bitpos */
144 	 complain_overflow_dont,/* complain_on_overflow */
145 	 bfd_elf_generic_reloc,	/* special_function */
146 	 "R_ARM_LDR_PC_G0",     /* name */
147 	 FALSE,			/* partial_inplace */
148 	 0xffffffff,		/* src_mask */
149 	 0xffffffff,		/* dst_mask */
150 	 TRUE),			/* pcrel_offset */
151 
152    /* 16 bit absolute */
153   HOWTO (R_ARM_ABS16,		/* type */
154 	 0,			/* rightshift */
155 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
156 	 16,			/* bitsize */
157 	 FALSE,			/* pc_relative */
158 	 0,			/* bitpos */
159 	 complain_overflow_bitfield,/* complain_on_overflow */
160 	 bfd_elf_generic_reloc,	/* special_function */
161 	 "R_ARM_ABS16",		/* name */
162 	 FALSE,			/* partial_inplace */
163 	 0x0000ffff,		/* src_mask */
164 	 0x0000ffff,		/* dst_mask */
165 	 FALSE),		/* pcrel_offset */
166 
167   /* 12 bit absolute */
168   HOWTO (R_ARM_ABS12,		/* type */
169 	 0,			/* rightshift */
170 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
171 	 12,			/* bitsize */
172 	 FALSE,			/* pc_relative */
173 	 0,			/* bitpos */
174 	 complain_overflow_bitfield,/* complain_on_overflow */
175 	 bfd_elf_generic_reloc,	/* special_function */
176 	 "R_ARM_ABS12",		/* name */
177 	 FALSE,			/* partial_inplace */
178 	 0x00000fff,		/* src_mask */
179 	 0x00000fff,		/* dst_mask */
180 	 FALSE),		/* pcrel_offset */
181 
182   HOWTO (R_ARM_THM_ABS5,	/* type */
183 	 6,			/* rightshift */
184 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
185 	 5,			/* bitsize */
186 	 FALSE,			/* pc_relative */
187 	 0,			/* bitpos */
188 	 complain_overflow_bitfield,/* complain_on_overflow */
189 	 bfd_elf_generic_reloc,	/* special_function */
190 	 "R_ARM_THM_ABS5",	/* name */
191 	 FALSE,			/* partial_inplace */
192 	 0x000007e0,		/* src_mask */
193 	 0x000007e0,		/* dst_mask */
194 	 FALSE),		/* pcrel_offset */
195 
196   /* 8 bit absolute */
197   HOWTO (R_ARM_ABS8,		/* type */
198 	 0,			/* rightshift */
199 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
200 	 8,			/* bitsize */
201 	 FALSE,			/* pc_relative */
202 	 0,			/* bitpos */
203 	 complain_overflow_bitfield,/* complain_on_overflow */
204 	 bfd_elf_generic_reloc,	/* special_function */
205 	 "R_ARM_ABS8",		/* name */
206 	 FALSE,			/* partial_inplace */
207 	 0x000000ff,		/* src_mask */
208 	 0x000000ff,		/* dst_mask */
209 	 FALSE),		/* pcrel_offset */
210 
211   HOWTO (R_ARM_SBREL32,		/* type */
212 	 0,			/* rightshift */
213 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
214 	 32,			/* bitsize */
215 	 FALSE,			/* pc_relative */
216 	 0,			/* bitpos */
217 	 complain_overflow_dont,/* complain_on_overflow */
218 	 bfd_elf_generic_reloc,	/* special_function */
219 	 "R_ARM_SBREL32",	/* name */
220 	 FALSE,			/* partial_inplace */
221 	 0xffffffff,		/* src_mask */
222 	 0xffffffff,		/* dst_mask */
223 	 FALSE),		/* pcrel_offset */
224 
225   HOWTO (R_ARM_THM_CALL,	/* type */
226 	 1,			/* rightshift */
227 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
228 	 24,			/* bitsize */
229 	 TRUE,			/* pc_relative */
230 	 0,			/* bitpos */
231 	 complain_overflow_signed,/* complain_on_overflow */
232 	 bfd_elf_generic_reloc,	/* special_function */
233 	 "R_ARM_THM_CALL",	/* name */
234 	 FALSE,			/* partial_inplace */
235 	 0x07ff2fff,		/* src_mask */
236 	 0x07ff2fff,		/* dst_mask */
237 	 TRUE),			/* pcrel_offset */
238 
239   HOWTO (R_ARM_THM_PC8,		/* type */
240 	 1,			/* rightshift */
241 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
242 	 8,			/* bitsize */
243 	 TRUE,			/* pc_relative */
244 	 0,			/* bitpos */
245 	 complain_overflow_signed,/* complain_on_overflow */
246 	 bfd_elf_generic_reloc,	/* special_function */
247 	 "R_ARM_THM_PC8",	/* name */
248 	 FALSE,			/* partial_inplace */
249 	 0x000000ff,		/* src_mask */
250 	 0x000000ff,		/* dst_mask */
251 	 TRUE),			/* pcrel_offset */
252 
253   HOWTO (R_ARM_BREL_ADJ,	/* type */
254 	 1,			/* rightshift */
255 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
256 	 32,			/* bitsize */
257 	 FALSE,			/* pc_relative */
258 	 0,			/* bitpos */
259 	 complain_overflow_signed,/* complain_on_overflow */
260 	 bfd_elf_generic_reloc,	/* special_function */
261 	 "R_ARM_BREL_ADJ",	/* name */
262 	 FALSE,			/* partial_inplace */
263 	 0xffffffff,		/* src_mask */
264 	 0xffffffff,		/* dst_mask */
265 	 FALSE),		/* pcrel_offset */
266 
267   HOWTO (R_ARM_TLS_DESC,	/* type */
268 	 0,			/* rightshift */
269 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
270 	 32,			/* bitsize */
271 	 FALSE,			/* pc_relative */
272 	 0,			/* bitpos */
273 	 complain_overflow_bitfield,/* complain_on_overflow */
274 	 bfd_elf_generic_reloc,	/* special_function */
275 	 "R_ARM_TLS_DESC",	/* name */
276 	 FALSE,			/* partial_inplace */
277 	 0xffffffff,		/* src_mask */
278 	 0xffffffff,		/* dst_mask */
279 	 FALSE),		/* pcrel_offset */
280 
281   HOWTO (R_ARM_THM_SWI8,	/* type */
282 	 0,			/* rightshift */
283 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
284 	 0,			/* bitsize */
285 	 FALSE,			/* pc_relative */
286 	 0,			/* bitpos */
287 	 complain_overflow_signed,/* complain_on_overflow */
288 	 bfd_elf_generic_reloc,	/* special_function */
289 	 "R_ARM_SWI8",		/* name */
290 	 FALSE,			/* partial_inplace */
291 	 0x00000000,		/* src_mask */
292 	 0x00000000,		/* dst_mask */
293 	 FALSE),		/* pcrel_offset */
294 
295   /* BLX instruction for the ARM.  */
296   HOWTO (R_ARM_XPC25,		/* type */
297 	 2,			/* rightshift */
298 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
299 	 24,			/* bitsize */
300 	 TRUE,			/* pc_relative */
301 	 0,			/* bitpos */
302 	 complain_overflow_signed,/* complain_on_overflow */
303 	 bfd_elf_generic_reloc,	/* special_function */
304 	 "R_ARM_XPC25",		/* name */
305 	 FALSE,			/* partial_inplace */
306 	 0x00ffffff,		/* src_mask */
307 	 0x00ffffff,		/* dst_mask */
308 	 TRUE),			/* pcrel_offset */
309 
310   /* BLX instruction for the Thumb.  */
311   HOWTO (R_ARM_THM_XPC22,	/* type */
312 	 2,			/* rightshift */
313 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
314 	 24,			/* bitsize */
315 	 TRUE,			/* pc_relative */
316 	 0,			/* bitpos */
317 	 complain_overflow_signed,/* complain_on_overflow */
318 	 bfd_elf_generic_reloc,	/* special_function */
319 	 "R_ARM_THM_XPC22",	/* name */
320 	 FALSE,			/* partial_inplace */
321 	 0x07ff2fff,		/* src_mask */
322 	 0x07ff2fff,		/* dst_mask */
323 	 TRUE),			/* pcrel_offset */
324 
325   /* Dynamic TLS relocations.  */
326 
327   HOWTO (R_ARM_TLS_DTPMOD32,	/* type */
328 	 0,			/* rightshift */
329 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
330 	 32,			/* bitsize */
331 	 FALSE,			/* pc_relative */
332 	 0,			/* bitpos */
333 	 complain_overflow_bitfield,/* complain_on_overflow */
334 	 bfd_elf_generic_reloc, /* special_function */
335 	 "R_ARM_TLS_DTPMOD32",	/* name */
336 	 TRUE,			/* partial_inplace */
337 	 0xffffffff,		/* src_mask */
338 	 0xffffffff,		/* dst_mask */
339 	 FALSE),		/* pcrel_offset */
340 
341   HOWTO (R_ARM_TLS_DTPOFF32,	/* type */
342 	 0,			/* rightshift */
343 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
344 	 32,			/* bitsize */
345 	 FALSE,			/* pc_relative */
346 	 0,			/* bitpos */
347 	 complain_overflow_bitfield,/* complain_on_overflow */
348 	 bfd_elf_generic_reloc, /* special_function */
349 	 "R_ARM_TLS_DTPOFF32",	/* name */
350 	 TRUE,			/* partial_inplace */
351 	 0xffffffff,		/* src_mask */
352 	 0xffffffff,		/* dst_mask */
353 	 FALSE),		/* pcrel_offset */
354 
355   HOWTO (R_ARM_TLS_TPOFF32,	/* type */
356 	 0,			/* rightshift */
357 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
358 	 32,			/* bitsize */
359 	 FALSE,			/* pc_relative */
360 	 0,			/* bitpos */
361 	 complain_overflow_bitfield,/* complain_on_overflow */
362 	 bfd_elf_generic_reloc, /* special_function */
363 	 "R_ARM_TLS_TPOFF32",	/* name */
364 	 TRUE,			/* partial_inplace */
365 	 0xffffffff,		/* src_mask */
366 	 0xffffffff,		/* dst_mask */
367 	 FALSE),		/* pcrel_offset */
368 
369   /* Relocs used in ARM Linux */
370 
371   HOWTO (R_ARM_COPY,		/* type */
372 	 0,			/* rightshift */
373 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
374 	 32,			/* bitsize */
375 	 FALSE,			/* pc_relative */
376 	 0,			/* bitpos */
377 	 complain_overflow_bitfield,/* complain_on_overflow */
378 	 bfd_elf_generic_reloc, /* special_function */
379 	 "R_ARM_COPY",		/* name */
380 	 TRUE,			/* partial_inplace */
381 	 0xffffffff,		/* src_mask */
382 	 0xffffffff,		/* dst_mask */
383 	 FALSE),		/* pcrel_offset */
384 
385   HOWTO (R_ARM_GLOB_DAT,	/* type */
386 	 0,			/* rightshift */
387 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
388 	 32,			/* bitsize */
389 	 FALSE,			/* pc_relative */
390 	 0,			/* bitpos */
391 	 complain_overflow_bitfield,/* complain_on_overflow */
392 	 bfd_elf_generic_reloc, /* special_function */
393 	 "R_ARM_GLOB_DAT",	/* name */
394 	 TRUE,			/* partial_inplace */
395 	 0xffffffff,		/* src_mask */
396 	 0xffffffff,		/* dst_mask */
397 	 FALSE),		/* pcrel_offset */
398 
399   HOWTO (R_ARM_JUMP_SLOT,	/* type */
400 	 0,			/* rightshift */
401 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
402 	 32,			/* bitsize */
403 	 FALSE,			/* pc_relative */
404 	 0,			/* bitpos */
405 	 complain_overflow_bitfield,/* complain_on_overflow */
406 	 bfd_elf_generic_reloc, /* special_function */
407 	 "R_ARM_JUMP_SLOT",	/* name */
408 	 TRUE,			/* partial_inplace */
409 	 0xffffffff,		/* src_mask */
410 	 0xffffffff,		/* dst_mask */
411 	 FALSE),		/* pcrel_offset */
412 
413   HOWTO (R_ARM_RELATIVE,	/* type */
414 	 0,			/* rightshift */
415 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
416 	 32,			/* bitsize */
417 	 FALSE,			/* pc_relative */
418 	 0,			/* bitpos */
419 	 complain_overflow_bitfield,/* complain_on_overflow */
420 	 bfd_elf_generic_reloc, /* special_function */
421 	 "R_ARM_RELATIVE",	/* name */
422 	 TRUE,			/* partial_inplace */
423 	 0xffffffff,		/* src_mask */
424 	 0xffffffff,		/* dst_mask */
425 	 FALSE),		/* pcrel_offset */
426 
427   HOWTO (R_ARM_GOTOFF32,	/* type */
428 	 0,			/* rightshift */
429 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
430 	 32,			/* bitsize */
431 	 FALSE,			/* pc_relative */
432 	 0,			/* bitpos */
433 	 complain_overflow_bitfield,/* complain_on_overflow */
434 	 bfd_elf_generic_reloc, /* special_function */
435 	 "R_ARM_GOTOFF32",	/* name */
436 	 TRUE,			/* partial_inplace */
437 	 0xffffffff,		/* src_mask */
438 	 0xffffffff,		/* dst_mask */
439 	 FALSE),		/* pcrel_offset */
440 
441   HOWTO (R_ARM_GOTPC,		/* type */
442 	 0,			/* rightshift */
443 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
444 	 32,			/* bitsize */
445 	 TRUE,			/* pc_relative */
446 	 0,			/* bitpos */
447 	 complain_overflow_bitfield,/* complain_on_overflow */
448 	 bfd_elf_generic_reloc, /* special_function */
449 	 "R_ARM_GOTPC",		/* name */
450 	 TRUE,			/* partial_inplace */
451 	 0xffffffff,		/* src_mask */
452 	 0xffffffff,		/* dst_mask */
453 	 TRUE),			/* pcrel_offset */
454 
455   HOWTO (R_ARM_GOT32,		/* type */
456 	 0,			/* rightshift */
457 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
458 	 32,			/* bitsize */
459 	 FALSE,			/* pc_relative */
460 	 0,			/* bitpos */
461 	 complain_overflow_bitfield,/* complain_on_overflow */
462 	 bfd_elf_generic_reloc, /* special_function */
463 	 "R_ARM_GOT32",		/* name */
464 	 TRUE,			/* partial_inplace */
465 	 0xffffffff,		/* src_mask */
466 	 0xffffffff,		/* dst_mask */
467 	 FALSE),		/* pcrel_offset */
468 
469   HOWTO (R_ARM_PLT32,		/* type */
470 	 2,			/* rightshift */
471 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
472 	 24,			/* bitsize */
473 	 TRUE,			/* pc_relative */
474 	 0,			/* bitpos */
475 	 complain_overflow_bitfield,/* complain_on_overflow */
476 	 bfd_elf_generic_reloc, /* special_function */
477 	 "R_ARM_PLT32",		/* name */
478 	 FALSE,			/* partial_inplace */
479 	 0x00ffffff,		/* src_mask */
480 	 0x00ffffff,		/* dst_mask */
481 	 TRUE),			/* pcrel_offset */
482 
483   HOWTO (R_ARM_CALL,		/* type */
484 	 2,			/* rightshift */
485 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
486 	 24,			/* bitsize */
487 	 TRUE,			/* pc_relative */
488 	 0,			/* bitpos */
489 	 complain_overflow_signed,/* complain_on_overflow */
490 	 bfd_elf_generic_reloc,	/* special_function */
491 	 "R_ARM_CALL",		/* name */
492 	 FALSE,			/* partial_inplace */
493 	 0x00ffffff,		/* src_mask */
494 	 0x00ffffff,		/* dst_mask */
495 	 TRUE),			/* pcrel_offset */
496 
497   HOWTO (R_ARM_JUMP24,		/* type */
498 	 2,			/* rightshift */
499 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
500 	 24,			/* bitsize */
501 	 TRUE,			/* pc_relative */
502 	 0,			/* bitpos */
503 	 complain_overflow_signed,/* complain_on_overflow */
504 	 bfd_elf_generic_reloc,	/* special_function */
505 	 "R_ARM_JUMP24",	/* name */
506 	 FALSE,			/* partial_inplace */
507 	 0x00ffffff,		/* src_mask */
508 	 0x00ffffff,		/* dst_mask */
509 	 TRUE),			/* pcrel_offset */
510 
511   HOWTO (R_ARM_THM_JUMP24,	/* type */
512 	 1,			/* rightshift */
513 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
514 	 24,			/* bitsize */
515 	 TRUE,			/* pc_relative */
516 	 0,			/* bitpos */
517 	 complain_overflow_signed,/* complain_on_overflow */
518 	 bfd_elf_generic_reloc,	/* special_function */
519 	 "R_ARM_THM_JUMP24",	/* name */
520 	 FALSE,			/* partial_inplace */
521 	 0x07ff2fff,		/* src_mask */
522 	 0x07ff2fff,		/* dst_mask */
523 	 TRUE),			/* pcrel_offset */
524 
525   HOWTO (R_ARM_BASE_ABS,	/* type */
526 	 0,			/* rightshift */
527 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
528 	 32,			/* bitsize */
529 	 FALSE,			/* pc_relative */
530 	 0,			/* bitpos */
531 	 complain_overflow_dont,/* complain_on_overflow */
532 	 bfd_elf_generic_reloc,	/* special_function */
533 	 "R_ARM_BASE_ABS",	/* name */
534 	 FALSE,			/* partial_inplace */
535 	 0xffffffff,		/* src_mask */
536 	 0xffffffff,		/* dst_mask */
537 	 FALSE),		/* pcrel_offset */
538 
539   HOWTO (R_ARM_ALU_PCREL7_0,	/* type */
540 	 0,			/* rightshift */
541 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
542 	 12,			/* bitsize */
543 	 TRUE,			/* pc_relative */
544 	 0,			/* bitpos */
545 	 complain_overflow_dont,/* complain_on_overflow */
546 	 bfd_elf_generic_reloc,	/* special_function */
547 	 "R_ARM_ALU_PCREL_7_0",	/* name */
548 	 FALSE,			/* partial_inplace */
549 	 0x00000fff,		/* src_mask */
550 	 0x00000fff,		/* dst_mask */
551 	 TRUE),			/* pcrel_offset */
552 
553   HOWTO (R_ARM_ALU_PCREL15_8,	/* type */
554 	 0,			/* rightshift */
555 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
556 	 12,			/* bitsize */
557 	 TRUE,			/* pc_relative */
558 	 8,			/* bitpos */
559 	 complain_overflow_dont,/* complain_on_overflow */
560 	 bfd_elf_generic_reloc,	/* special_function */
561 	 "R_ARM_ALU_PCREL_15_8",/* name */
562 	 FALSE,			/* partial_inplace */
563 	 0x00000fff,		/* src_mask */
564 	 0x00000fff,		/* dst_mask */
565 	 TRUE),			/* pcrel_offset */
566 
567   HOWTO (R_ARM_ALU_PCREL23_15,	/* type */
568 	 0,			/* rightshift */
569 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
570 	 12,			/* bitsize */
571 	 TRUE,			/* pc_relative */
572 	 16,			/* bitpos */
573 	 complain_overflow_dont,/* complain_on_overflow */
574 	 bfd_elf_generic_reloc,	/* special_function */
575 	 "R_ARM_ALU_PCREL_23_15",/* name */
576 	 FALSE,			/* partial_inplace */
577 	 0x00000fff,		/* src_mask */
578 	 0x00000fff,		/* dst_mask */
579 	 TRUE),			/* pcrel_offset */
580 
581   HOWTO (R_ARM_LDR_SBREL_11_0,	/* type */
582 	 0,			/* rightshift */
583 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
584 	 12,			/* bitsize */
585 	 FALSE,			/* pc_relative */
586 	 0,			/* bitpos */
587 	 complain_overflow_dont,/* complain_on_overflow */
588 	 bfd_elf_generic_reloc,	/* special_function */
589 	 "R_ARM_LDR_SBREL_11_0",/* name */
590 	 FALSE,			/* partial_inplace */
591 	 0x00000fff,		/* src_mask */
592 	 0x00000fff,		/* dst_mask */
593 	 FALSE),		/* pcrel_offset */
594 
595   HOWTO (R_ARM_ALU_SBREL_19_12,	/* type */
596 	 0,			/* rightshift */
597 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
598 	 8,			/* bitsize */
599 	 FALSE,			/* pc_relative */
600 	 12,			/* bitpos */
601 	 complain_overflow_dont,/* complain_on_overflow */
602 	 bfd_elf_generic_reloc,	/* special_function */
603 	 "R_ARM_ALU_SBREL_19_12",/* name */
604 	 FALSE,			/* partial_inplace */
605 	 0x000ff000,		/* src_mask */
606 	 0x000ff000,		/* dst_mask */
607 	 FALSE),		/* pcrel_offset */
608 
609   HOWTO (R_ARM_ALU_SBREL_27_20,	/* type */
610 	 0,			/* rightshift */
611 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
612 	 8,			/* bitsize */
613 	 FALSE,			/* pc_relative */
614 	 20,			/* bitpos */
615 	 complain_overflow_dont,/* complain_on_overflow */
616 	 bfd_elf_generic_reloc,	/* special_function */
617 	 "R_ARM_ALU_SBREL_27_20",/* name */
618 	 FALSE,			/* partial_inplace */
619 	 0x0ff00000,		/* src_mask */
620 	 0x0ff00000,		/* dst_mask */
621 	 FALSE),		/* pcrel_offset */
622 
623   HOWTO (R_ARM_TARGET1,		/* type */
624 	 0,			/* rightshift */
625 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
626 	 32,			/* bitsize */
627 	 FALSE,			/* pc_relative */
628 	 0,			/* bitpos */
629 	 complain_overflow_dont,/* complain_on_overflow */
630 	 bfd_elf_generic_reloc,	/* special_function */
631 	 "R_ARM_TARGET1",	/* name */
632 	 FALSE,			/* partial_inplace */
633 	 0xffffffff,		/* src_mask */
634 	 0xffffffff,		/* dst_mask */
635 	 FALSE),		/* pcrel_offset */
636 
637   HOWTO (R_ARM_ROSEGREL32,	/* type */
638 	 0,			/* rightshift */
639 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
640 	 32,			/* bitsize */
641 	 FALSE,			/* pc_relative */
642 	 0,			/* bitpos */
643 	 complain_overflow_dont,/* complain_on_overflow */
644 	 bfd_elf_generic_reloc,	/* special_function */
645 	 "R_ARM_ROSEGREL32",	/* name */
646 	 FALSE,			/* partial_inplace */
647 	 0xffffffff,		/* src_mask */
648 	 0xffffffff,		/* dst_mask */
649 	 FALSE),		/* pcrel_offset */
650 
651   HOWTO (R_ARM_V4BX,		/* type */
652 	 0,			/* rightshift */
653 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
654 	 32,			/* bitsize */
655 	 FALSE,			/* pc_relative */
656 	 0,			/* bitpos */
657 	 complain_overflow_dont,/* complain_on_overflow */
658 	 bfd_elf_generic_reloc,	/* special_function */
659 	 "R_ARM_V4BX",		/* name */
660 	 FALSE,			/* partial_inplace */
661 	 0xffffffff,		/* src_mask */
662 	 0xffffffff,		/* dst_mask */
663 	 FALSE),		/* pcrel_offset */
664 
665   HOWTO (R_ARM_TARGET2,		/* type */
666 	 0,			/* rightshift */
667 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
668 	 32,			/* bitsize */
669 	 FALSE,			/* pc_relative */
670 	 0,			/* bitpos */
671 	 complain_overflow_signed,/* complain_on_overflow */
672 	 bfd_elf_generic_reloc,	/* special_function */
673 	 "R_ARM_TARGET2",	/* name */
674 	 FALSE,			/* partial_inplace */
675 	 0xffffffff,		/* src_mask */
676 	 0xffffffff,		/* dst_mask */
677 	 TRUE),			/* pcrel_offset */
678 
679   HOWTO (R_ARM_PREL31,		/* type */
680 	 0,			/* rightshift */
681 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
682 	 31,			/* bitsize */
683 	 TRUE,			/* pc_relative */
684 	 0,			/* bitpos */
685 	 complain_overflow_signed,/* complain_on_overflow */
686 	 bfd_elf_generic_reloc,	/* special_function */
687 	 "R_ARM_PREL31",	/* name */
688 	 FALSE,			/* partial_inplace */
689 	 0x7fffffff,		/* src_mask */
690 	 0x7fffffff,		/* dst_mask */
691 	 TRUE),			/* pcrel_offset */
692 
693   HOWTO (R_ARM_MOVW_ABS_NC,	/* type */
694 	 0,			/* rightshift */
695 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
696 	 16,			/* bitsize */
697 	 FALSE,			/* pc_relative */
698 	 0,			/* bitpos */
699 	 complain_overflow_dont,/* complain_on_overflow */
700 	 bfd_elf_generic_reloc,	/* special_function */
701 	 "R_ARM_MOVW_ABS_NC",	/* name */
702 	 FALSE,			/* partial_inplace */
703 	 0x000f0fff,		/* src_mask */
704 	 0x000f0fff,		/* dst_mask */
705 	 FALSE),		/* pcrel_offset */
706 
707   HOWTO (R_ARM_MOVT_ABS,	/* type */
708 	 0,			/* rightshift */
709 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
710 	 16,			/* bitsize */
711 	 FALSE,			/* pc_relative */
712 	 0,			/* bitpos */
713 	 complain_overflow_bitfield,/* complain_on_overflow */
714 	 bfd_elf_generic_reloc,	/* special_function */
715 	 "R_ARM_MOVT_ABS",	/* name */
716 	 FALSE,			/* partial_inplace */
717 	 0x000f0fff,		/* src_mask */
718 	 0x000f0fff,		/* dst_mask */
719 	 FALSE),		/* pcrel_offset */
720 
721   HOWTO (R_ARM_MOVW_PREL_NC,	/* type */
722 	 0,			/* rightshift */
723 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
724 	 16,			/* bitsize */
725 	 TRUE,			/* pc_relative */
726 	 0,			/* bitpos */
727 	 complain_overflow_dont,/* complain_on_overflow */
728 	 bfd_elf_generic_reloc,	/* special_function */
729 	 "R_ARM_MOVW_PREL_NC",	/* name */
730 	 FALSE,			/* partial_inplace */
731 	 0x000f0fff,		/* src_mask */
732 	 0x000f0fff,		/* dst_mask */
733 	 TRUE),			/* pcrel_offset */
734 
735   HOWTO (R_ARM_MOVT_PREL,	/* type */
736 	 0,			/* rightshift */
737 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
738 	 16,			/* bitsize */
739 	 TRUE,			/* pc_relative */
740 	 0,			/* bitpos */
741 	 complain_overflow_bitfield,/* complain_on_overflow */
742 	 bfd_elf_generic_reloc,	/* special_function */
743 	 "R_ARM_MOVT_PREL",	/* name */
744 	 FALSE,			/* partial_inplace */
745 	 0x000f0fff,		/* src_mask */
746 	 0x000f0fff,		/* dst_mask */
747 	 TRUE),			/* pcrel_offset */
748 
749   HOWTO (R_ARM_THM_MOVW_ABS_NC,	/* type */
750 	 0,			/* rightshift */
751 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
752 	 16,			/* bitsize */
753 	 FALSE,			/* pc_relative */
754 	 0,			/* bitpos */
755 	 complain_overflow_dont,/* complain_on_overflow */
756 	 bfd_elf_generic_reloc,	/* special_function */
757 	 "R_ARM_THM_MOVW_ABS_NC",/* name */
758 	 FALSE,			/* partial_inplace */
759 	 0x040f70ff,		/* src_mask */
760 	 0x040f70ff,		/* dst_mask */
761 	 FALSE),		/* pcrel_offset */
762 
763   HOWTO (R_ARM_THM_MOVT_ABS,	/* type */
764 	 0,			/* rightshift */
765 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
766 	 16,			/* bitsize */
767 	 FALSE,			/* pc_relative */
768 	 0,			/* bitpos */
769 	 complain_overflow_bitfield,/* complain_on_overflow */
770 	 bfd_elf_generic_reloc,	/* special_function */
771 	 "R_ARM_THM_MOVT_ABS",	/* name */
772 	 FALSE,			/* partial_inplace */
773 	 0x040f70ff,		/* src_mask */
774 	 0x040f70ff,		/* dst_mask */
775 	 FALSE),		/* pcrel_offset */
776 
777   HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
778 	 0,			/* rightshift */
779 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
780 	 16,			/* bitsize */
781 	 TRUE,			/* pc_relative */
782 	 0,			/* bitpos */
783 	 complain_overflow_dont,/* complain_on_overflow */
784 	 bfd_elf_generic_reloc,	/* special_function */
785 	 "R_ARM_THM_MOVW_PREL_NC",/* name */
786 	 FALSE,			/* partial_inplace */
787 	 0x040f70ff,		/* src_mask */
788 	 0x040f70ff,		/* dst_mask */
789 	 TRUE),			/* pcrel_offset */
790 
791   HOWTO (R_ARM_THM_MOVT_PREL,	/* type */
792 	 0,			/* rightshift */
793 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
794 	 16,			/* bitsize */
795 	 TRUE,			/* pc_relative */
796 	 0,			/* bitpos */
797 	 complain_overflow_bitfield,/* complain_on_overflow */
798 	 bfd_elf_generic_reloc,	/* special_function */
799 	 "R_ARM_THM_MOVT_PREL",	/* name */
800 	 FALSE,			/* partial_inplace */
801 	 0x040f70ff,		/* src_mask */
802 	 0x040f70ff,		/* dst_mask */
803 	 TRUE),			/* pcrel_offset */
804 
805   HOWTO (R_ARM_THM_JUMP19,	/* type */
806 	 1,			/* rightshift */
807 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
808 	 19,			/* bitsize */
809 	 TRUE,			/* pc_relative */
810 	 0,			/* bitpos */
811 	 complain_overflow_signed,/* complain_on_overflow */
812 	 bfd_elf_generic_reloc, /* special_function */
813 	 "R_ARM_THM_JUMP19",	/* name */
814 	 FALSE,			/* partial_inplace */
815 	 0x043f2fff,		/* src_mask */
816 	 0x043f2fff,		/* dst_mask */
817 	 TRUE),			/* pcrel_offset */
818 
819   HOWTO (R_ARM_THM_JUMP6,	/* type */
820 	 1,			/* rightshift */
821 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
822 	 6,			/* bitsize */
823 	 TRUE,			/* pc_relative */
824 	 0,			/* bitpos */
825 	 complain_overflow_unsigned,/* complain_on_overflow */
826 	 bfd_elf_generic_reloc,	/* special_function */
827 	 "R_ARM_THM_JUMP6",	/* name */
828 	 FALSE,			/* partial_inplace */
829 	 0x02f8,		/* src_mask */
830 	 0x02f8,		/* dst_mask */
831 	 TRUE),			/* pcrel_offset */
832 
833   /* These are declared as 13-bit signed relocations because we can
834      address -4095 .. 4095(base) by altering ADDW to SUBW or vice
835      versa.  */
836   HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
837 	 0,			/* rightshift */
838 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
839 	 13,			/* bitsize */
840 	 TRUE,			/* pc_relative */
841 	 0,			/* bitpos */
842 	 complain_overflow_dont,/* complain_on_overflow */
843 	 bfd_elf_generic_reloc,	/* special_function */
844 	 "R_ARM_THM_ALU_PREL_11_0",/* name */
845 	 FALSE,			/* partial_inplace */
846 	 0xffffffff,		/* src_mask */
847 	 0xffffffff,		/* dst_mask */
848 	 TRUE),			/* pcrel_offset */
849 
850   HOWTO (R_ARM_THM_PC12,	/* type */
851 	 0,			/* rightshift */
852 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
853 	 13,			/* bitsize */
854 	 TRUE,			/* pc_relative */
855 	 0,			/* bitpos */
856 	 complain_overflow_dont,/* complain_on_overflow */
857 	 bfd_elf_generic_reloc,	/* special_function */
858 	 "R_ARM_THM_PC12",	/* name */
859 	 FALSE,			/* partial_inplace */
860 	 0xffffffff,		/* src_mask */
861 	 0xffffffff,		/* dst_mask */
862 	 TRUE),			/* pcrel_offset */
863 
864   HOWTO (R_ARM_ABS32_NOI,	/* type */
865 	 0,			/* rightshift */
866 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
867 	 32,			/* bitsize */
868 	 FALSE,			/* pc_relative */
869 	 0,			/* bitpos */
870 	 complain_overflow_dont,/* complain_on_overflow */
871 	 bfd_elf_generic_reloc,	/* special_function */
872 	 "R_ARM_ABS32_NOI",	/* name */
873 	 FALSE,			/* partial_inplace */
874 	 0xffffffff,		/* src_mask */
875 	 0xffffffff,		/* dst_mask */
876 	 FALSE),		/* pcrel_offset */
877 
878   HOWTO (R_ARM_REL32_NOI,	/* type */
879 	 0,			/* rightshift */
880 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
881 	 32,			/* bitsize */
882 	 TRUE,			/* pc_relative */
883 	 0,			/* bitpos */
884 	 complain_overflow_dont,/* complain_on_overflow */
885 	 bfd_elf_generic_reloc,	/* special_function */
886 	 "R_ARM_REL32_NOI",	/* name */
887 	 FALSE,			/* partial_inplace */
888 	 0xffffffff,		/* src_mask */
889 	 0xffffffff,		/* dst_mask */
890 	 FALSE),		/* pcrel_offset */
891 
892   /* Group relocations.  */
893 
894   HOWTO (R_ARM_ALU_PC_G0_NC,	/* type */
895 	 0,			/* rightshift */
896 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
897 	 32,			/* bitsize */
898 	 TRUE,			/* pc_relative */
899 	 0,			/* bitpos */
900 	 complain_overflow_dont,/* complain_on_overflow */
901 	 bfd_elf_generic_reloc,	/* special_function */
902 	 "R_ARM_ALU_PC_G0_NC",	/* name */
903 	 FALSE,			/* partial_inplace */
904 	 0xffffffff,		/* src_mask */
905 	 0xffffffff,		/* dst_mask */
906 	 TRUE),			/* pcrel_offset */
907 
908   HOWTO (R_ARM_ALU_PC_G0,	/* type */
909 	 0,			/* rightshift */
910 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
911 	 32,			/* bitsize */
912 	 TRUE,			/* pc_relative */
913 	 0,			/* bitpos */
914 	 complain_overflow_dont,/* complain_on_overflow */
915 	 bfd_elf_generic_reloc,	/* special_function */
916 	 "R_ARM_ALU_PC_G0",	/* name */
917 	 FALSE,			/* partial_inplace */
918 	 0xffffffff,		/* src_mask */
919 	 0xffffffff,		/* dst_mask */
920 	 TRUE),			/* pcrel_offset */
921 
922   HOWTO (R_ARM_ALU_PC_G1_NC,	/* type */
923 	 0,			/* rightshift */
924 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
925 	 32,			/* bitsize */
926 	 TRUE,			/* pc_relative */
927 	 0,			/* bitpos */
928 	 complain_overflow_dont,/* complain_on_overflow */
929 	 bfd_elf_generic_reloc,	/* special_function */
930 	 "R_ARM_ALU_PC_G1_NC",	/* name */
931 	 FALSE,			/* partial_inplace */
932 	 0xffffffff,		/* src_mask */
933 	 0xffffffff,		/* dst_mask */
934 	 TRUE),			/* pcrel_offset */
935 
936   HOWTO (R_ARM_ALU_PC_G1,	/* type */
937 	 0,			/* rightshift */
938 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
939 	 32,			/* bitsize */
940 	 TRUE,			/* pc_relative */
941 	 0,			/* bitpos */
942 	 complain_overflow_dont,/* complain_on_overflow */
943 	 bfd_elf_generic_reloc,	/* special_function */
944 	 "R_ARM_ALU_PC_G1",	/* name */
945 	 FALSE,			/* partial_inplace */
946 	 0xffffffff,		/* src_mask */
947 	 0xffffffff,		/* dst_mask */
948 	 TRUE),			/* pcrel_offset */
949 
950   HOWTO (R_ARM_ALU_PC_G2,	/* type */
951 	 0,			/* rightshift */
952 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
953 	 32,			/* bitsize */
954 	 TRUE,			/* pc_relative */
955 	 0,			/* bitpos */
956 	 complain_overflow_dont,/* complain_on_overflow */
957 	 bfd_elf_generic_reloc,	/* special_function */
958 	 "R_ARM_ALU_PC_G2",	/* name */
959 	 FALSE,			/* partial_inplace */
960 	 0xffffffff,		/* src_mask */
961 	 0xffffffff,		/* dst_mask */
962 	 TRUE),			/* pcrel_offset */
963 
964   HOWTO (R_ARM_LDR_PC_G1,	/* type */
965 	 0,			/* rightshift */
966 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
967 	 32,			/* bitsize */
968 	 TRUE,			/* pc_relative */
969 	 0,			/* bitpos */
970 	 complain_overflow_dont,/* complain_on_overflow */
971 	 bfd_elf_generic_reloc,	/* special_function */
972 	 "R_ARM_LDR_PC_G1",	/* name */
973 	 FALSE,			/* partial_inplace */
974 	 0xffffffff,		/* src_mask */
975 	 0xffffffff,		/* dst_mask */
976 	 TRUE),			/* pcrel_offset */
977 
978   HOWTO (R_ARM_LDR_PC_G2,	/* type */
979 	 0,			/* rightshift */
980 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
981 	 32,			/* bitsize */
982 	 TRUE,			/* pc_relative */
983 	 0,			/* bitpos */
984 	 complain_overflow_dont,/* complain_on_overflow */
985 	 bfd_elf_generic_reloc,	/* special_function */
986 	 "R_ARM_LDR_PC_G2",	/* name */
987 	 FALSE,			/* partial_inplace */
988 	 0xffffffff,		/* src_mask */
989 	 0xffffffff,		/* dst_mask */
990 	 TRUE),			/* pcrel_offset */
991 
992   HOWTO (R_ARM_LDRS_PC_G0,	/* type */
993 	 0,			/* rightshift */
994 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
995 	 32,			/* bitsize */
996 	 TRUE,			/* pc_relative */
997 	 0,			/* bitpos */
998 	 complain_overflow_dont,/* complain_on_overflow */
999 	 bfd_elf_generic_reloc,	/* special_function */
1000 	 "R_ARM_LDRS_PC_G0",	/* name */
1001 	 FALSE,			/* partial_inplace */
1002 	 0xffffffff,		/* src_mask */
1003 	 0xffffffff,		/* dst_mask */
1004 	 TRUE),			/* pcrel_offset */
1005 
1006   HOWTO (R_ARM_LDRS_PC_G1,	/* type */
1007 	 0,			/* rightshift */
1008 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1009 	 32,			/* bitsize */
1010 	 TRUE,			/* pc_relative */
1011 	 0,			/* bitpos */
1012 	 complain_overflow_dont,/* complain_on_overflow */
1013 	 bfd_elf_generic_reloc,	/* special_function */
1014 	 "R_ARM_LDRS_PC_G1",	/* name */
1015 	 FALSE,			/* partial_inplace */
1016 	 0xffffffff,		/* src_mask */
1017 	 0xffffffff,		/* dst_mask */
1018 	 TRUE),			/* pcrel_offset */
1019 
1020   HOWTO (R_ARM_LDRS_PC_G2,	/* type */
1021 	 0,			/* rightshift */
1022 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1023 	 32,			/* bitsize */
1024 	 TRUE,			/* pc_relative */
1025 	 0,			/* bitpos */
1026 	 complain_overflow_dont,/* complain_on_overflow */
1027 	 bfd_elf_generic_reloc,	/* special_function */
1028 	 "R_ARM_LDRS_PC_G2",	/* name */
1029 	 FALSE,			/* partial_inplace */
1030 	 0xffffffff,		/* src_mask */
1031 	 0xffffffff,		/* dst_mask */
1032 	 TRUE),			/* pcrel_offset */
1033 
1034   HOWTO (R_ARM_LDC_PC_G0,	/* type */
1035 	 0,			/* rightshift */
1036 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1037 	 32,			/* bitsize */
1038 	 TRUE,			/* pc_relative */
1039 	 0,			/* bitpos */
1040 	 complain_overflow_dont,/* complain_on_overflow */
1041 	 bfd_elf_generic_reloc,	/* special_function */
1042 	 "R_ARM_LDC_PC_G0",	/* name */
1043 	 FALSE,			/* partial_inplace */
1044 	 0xffffffff,		/* src_mask */
1045 	 0xffffffff,		/* dst_mask */
1046 	 TRUE),			/* pcrel_offset */
1047 
1048   HOWTO (R_ARM_LDC_PC_G1,	/* type */
1049 	 0,			/* rightshift */
1050 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1051 	 32,			/* bitsize */
1052 	 TRUE,			/* pc_relative */
1053 	 0,			/* bitpos */
1054 	 complain_overflow_dont,/* complain_on_overflow */
1055 	 bfd_elf_generic_reloc,	/* special_function */
1056 	 "R_ARM_LDC_PC_G1",	/* name */
1057 	 FALSE,			/* partial_inplace */
1058 	 0xffffffff,		/* src_mask */
1059 	 0xffffffff,		/* dst_mask */
1060 	 TRUE),			/* pcrel_offset */
1061 
1062   HOWTO (R_ARM_LDC_PC_G2,	/* type */
1063 	 0,			/* rightshift */
1064 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1065 	 32,			/* bitsize */
1066 	 TRUE,			/* pc_relative */
1067 	 0,			/* bitpos */
1068 	 complain_overflow_dont,/* complain_on_overflow */
1069 	 bfd_elf_generic_reloc,	/* special_function */
1070 	 "R_ARM_LDC_PC_G2",	/* name */
1071 	 FALSE,			/* partial_inplace */
1072 	 0xffffffff,		/* src_mask */
1073 	 0xffffffff,		/* dst_mask */
1074 	 TRUE),			/* pcrel_offset */
1075 
1076   HOWTO (R_ARM_ALU_SB_G0_NC,	/* type */
1077 	 0,			/* rightshift */
1078 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1079 	 32,			/* bitsize */
1080 	 TRUE,			/* pc_relative */
1081 	 0,			/* bitpos */
1082 	 complain_overflow_dont,/* complain_on_overflow */
1083 	 bfd_elf_generic_reloc,	/* special_function */
1084 	 "R_ARM_ALU_SB_G0_NC",	/* name */
1085 	 FALSE,			/* partial_inplace */
1086 	 0xffffffff,		/* src_mask */
1087 	 0xffffffff,		/* dst_mask */
1088 	 TRUE),			/* pcrel_offset */
1089 
1090   HOWTO (R_ARM_ALU_SB_G0,	/* type */
1091 	 0,			/* rightshift */
1092 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1093 	 32,			/* bitsize */
1094 	 TRUE,			/* pc_relative */
1095 	 0,			/* bitpos */
1096 	 complain_overflow_dont,/* complain_on_overflow */
1097 	 bfd_elf_generic_reloc,	/* special_function */
1098 	 "R_ARM_ALU_SB_G0",	/* name */
1099 	 FALSE,			/* partial_inplace */
1100 	 0xffffffff,		/* src_mask */
1101 	 0xffffffff,		/* dst_mask */
1102 	 TRUE),			/* pcrel_offset */
1103 
1104   HOWTO (R_ARM_ALU_SB_G1_NC,	/* type */
1105 	 0,			/* rightshift */
1106 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1107 	 32,			/* bitsize */
1108 	 TRUE,			/* pc_relative */
1109 	 0,			/* bitpos */
1110 	 complain_overflow_dont,/* complain_on_overflow */
1111 	 bfd_elf_generic_reloc,	/* special_function */
1112 	 "R_ARM_ALU_SB_G1_NC",	/* name */
1113 	 FALSE,			/* partial_inplace */
1114 	 0xffffffff,		/* src_mask */
1115 	 0xffffffff,		/* dst_mask */
1116 	 TRUE),			/* pcrel_offset */
1117 
1118   HOWTO (R_ARM_ALU_SB_G1,	/* type */
1119 	 0,			/* rightshift */
1120 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1121 	 32,			/* bitsize */
1122 	 TRUE,			/* pc_relative */
1123 	 0,			/* bitpos */
1124 	 complain_overflow_dont,/* complain_on_overflow */
1125 	 bfd_elf_generic_reloc,	/* special_function */
1126 	 "R_ARM_ALU_SB_G1",	/* name */
1127 	 FALSE,			/* partial_inplace */
1128 	 0xffffffff,		/* src_mask */
1129 	 0xffffffff,		/* dst_mask */
1130 	 TRUE),			/* pcrel_offset */
1131 
1132   HOWTO (R_ARM_ALU_SB_G2,	/* type */
1133 	 0,			/* rightshift */
1134 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1135 	 32,			/* bitsize */
1136 	 TRUE,			/* pc_relative */
1137 	 0,			/* bitpos */
1138 	 complain_overflow_dont,/* complain_on_overflow */
1139 	 bfd_elf_generic_reloc,	/* special_function */
1140 	 "R_ARM_ALU_SB_G2",	/* name */
1141 	 FALSE,			/* partial_inplace */
1142 	 0xffffffff,		/* src_mask */
1143 	 0xffffffff,		/* dst_mask */
1144 	 TRUE),			/* pcrel_offset */
1145 
1146   HOWTO (R_ARM_LDR_SB_G0,	/* type */
1147 	 0,			/* rightshift */
1148 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1149 	 32,			/* bitsize */
1150 	 TRUE,			/* pc_relative */
1151 	 0,			/* bitpos */
1152 	 complain_overflow_dont,/* complain_on_overflow */
1153 	 bfd_elf_generic_reloc,	/* special_function */
1154 	 "R_ARM_LDR_SB_G0",	/* name */
1155 	 FALSE,			/* partial_inplace */
1156 	 0xffffffff,		/* src_mask */
1157 	 0xffffffff,		/* dst_mask */
1158 	 TRUE),			/* pcrel_offset */
1159 
1160   HOWTO (R_ARM_LDR_SB_G1,	/* type */
1161 	 0,			/* rightshift */
1162 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1163 	 32,			/* bitsize */
1164 	 TRUE,			/* pc_relative */
1165 	 0,			/* bitpos */
1166 	 complain_overflow_dont,/* complain_on_overflow */
1167 	 bfd_elf_generic_reloc,	/* special_function */
1168 	 "R_ARM_LDR_SB_G1",	/* name */
1169 	 FALSE,			/* partial_inplace */
1170 	 0xffffffff,		/* src_mask */
1171 	 0xffffffff,		/* dst_mask */
1172 	 TRUE),			/* pcrel_offset */
1173 
1174   HOWTO (R_ARM_LDR_SB_G2,	/* type */
1175 	 0,			/* rightshift */
1176 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1177 	 32,			/* bitsize */
1178 	 TRUE,			/* pc_relative */
1179 	 0,			/* bitpos */
1180 	 complain_overflow_dont,/* complain_on_overflow */
1181 	 bfd_elf_generic_reloc,	/* special_function */
1182 	 "R_ARM_LDR_SB_G2",	/* name */
1183 	 FALSE,			/* partial_inplace */
1184 	 0xffffffff,		/* src_mask */
1185 	 0xffffffff,		/* dst_mask */
1186 	 TRUE),			/* pcrel_offset */
1187 
1188   HOWTO (R_ARM_LDRS_SB_G0,	/* type */
1189 	 0,			/* rightshift */
1190 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1191 	 32,			/* bitsize */
1192 	 TRUE,			/* pc_relative */
1193 	 0,			/* bitpos */
1194 	 complain_overflow_dont,/* complain_on_overflow */
1195 	 bfd_elf_generic_reloc,	/* special_function */
1196 	 "R_ARM_LDRS_SB_G0",	/* name */
1197 	 FALSE,			/* partial_inplace */
1198 	 0xffffffff,		/* src_mask */
1199 	 0xffffffff,		/* dst_mask */
1200 	 TRUE),			/* pcrel_offset */
1201 
1202   HOWTO (R_ARM_LDRS_SB_G1,	/* type */
1203 	 0,			/* rightshift */
1204 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1205 	 32,			/* bitsize */
1206 	 TRUE,			/* pc_relative */
1207 	 0,			/* bitpos */
1208 	 complain_overflow_dont,/* complain_on_overflow */
1209 	 bfd_elf_generic_reloc,	/* special_function */
1210 	 "R_ARM_LDRS_SB_G1",	/* name */
1211 	 FALSE,			/* partial_inplace */
1212 	 0xffffffff,		/* src_mask */
1213 	 0xffffffff,		/* dst_mask */
1214 	 TRUE),			/* pcrel_offset */
1215 
1216   HOWTO (R_ARM_LDRS_SB_G2,	/* type */
1217 	 0,			/* rightshift */
1218 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1219 	 32,			/* bitsize */
1220 	 TRUE,			/* pc_relative */
1221 	 0,			/* bitpos */
1222 	 complain_overflow_dont,/* complain_on_overflow */
1223 	 bfd_elf_generic_reloc,	/* special_function */
1224 	 "R_ARM_LDRS_SB_G2",	/* name */
1225 	 FALSE,			/* partial_inplace */
1226 	 0xffffffff,		/* src_mask */
1227 	 0xffffffff,		/* dst_mask */
1228 	 TRUE),			/* pcrel_offset */
1229 
1230   HOWTO (R_ARM_LDC_SB_G0,	/* type */
1231 	 0,			/* rightshift */
1232 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1233 	 32,			/* bitsize */
1234 	 TRUE,			/* pc_relative */
1235 	 0,			/* bitpos */
1236 	 complain_overflow_dont,/* complain_on_overflow */
1237 	 bfd_elf_generic_reloc,	/* special_function */
1238 	 "R_ARM_LDC_SB_G0",	/* name */
1239 	 FALSE,			/* partial_inplace */
1240 	 0xffffffff,		/* src_mask */
1241 	 0xffffffff,		/* dst_mask */
1242 	 TRUE),			/* pcrel_offset */
1243 
1244   HOWTO (R_ARM_LDC_SB_G1,	/* type */
1245 	 0,			/* rightshift */
1246 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1247 	 32,			/* bitsize */
1248 	 TRUE,			/* pc_relative */
1249 	 0,			/* bitpos */
1250 	 complain_overflow_dont,/* complain_on_overflow */
1251 	 bfd_elf_generic_reloc,	/* special_function */
1252 	 "R_ARM_LDC_SB_G1",	/* name */
1253 	 FALSE,			/* partial_inplace */
1254 	 0xffffffff,		/* src_mask */
1255 	 0xffffffff,		/* dst_mask */
1256 	 TRUE),			/* pcrel_offset */
1257 
1258   HOWTO (R_ARM_LDC_SB_G2,	/* type */
1259 	 0,			/* rightshift */
1260 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1261 	 32,			/* bitsize */
1262 	 TRUE,			/* pc_relative */
1263 	 0,			/* bitpos */
1264 	 complain_overflow_dont,/* complain_on_overflow */
1265 	 bfd_elf_generic_reloc,	/* special_function */
1266 	 "R_ARM_LDC_SB_G2",	/* name */
1267 	 FALSE,			/* partial_inplace */
1268 	 0xffffffff,		/* src_mask */
1269 	 0xffffffff,		/* dst_mask */
1270 	 TRUE),			/* pcrel_offset */
1271 
1272   /* End of group relocations.  */
1273 
1274   HOWTO (R_ARM_MOVW_BREL_NC,	/* type */
1275 	 0,			/* rightshift */
1276 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1277 	 16,			/* bitsize */
1278 	 FALSE,			/* pc_relative */
1279 	 0,			/* bitpos */
1280 	 complain_overflow_dont,/* complain_on_overflow */
1281 	 bfd_elf_generic_reloc,	/* special_function */
1282 	 "R_ARM_MOVW_BREL_NC",	/* name */
1283 	 FALSE,			/* partial_inplace */
1284 	 0x0000ffff,		/* src_mask */
1285 	 0x0000ffff,		/* dst_mask */
1286 	 FALSE),		/* pcrel_offset */
1287 
1288   HOWTO (R_ARM_MOVT_BREL,	/* type */
1289 	 0,			/* rightshift */
1290 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1291 	 16,			/* bitsize */
1292 	 FALSE,			/* pc_relative */
1293 	 0,			/* bitpos */
1294 	 complain_overflow_bitfield,/* complain_on_overflow */
1295 	 bfd_elf_generic_reloc,	/* special_function */
1296 	 "R_ARM_MOVT_BREL",	/* name */
1297 	 FALSE,			/* partial_inplace */
1298 	 0x0000ffff,		/* src_mask */
1299 	 0x0000ffff,		/* dst_mask */
1300 	 FALSE),		/* pcrel_offset */
1301 
1302   HOWTO (R_ARM_MOVW_BREL,	/* type */
1303 	 0,			/* rightshift */
1304 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1305 	 16,			/* bitsize */
1306 	 FALSE,			/* pc_relative */
1307 	 0,			/* bitpos */
1308 	 complain_overflow_dont,/* complain_on_overflow */
1309 	 bfd_elf_generic_reloc,	/* special_function */
1310 	 "R_ARM_MOVW_BREL",	/* name */
1311 	 FALSE,			/* partial_inplace */
1312 	 0x0000ffff,		/* src_mask */
1313 	 0x0000ffff,		/* dst_mask */
1314 	 FALSE),		/* pcrel_offset */
1315 
1316   HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1317 	 0,			/* rightshift */
1318 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1319 	 16,			/* bitsize */
1320 	 FALSE,			/* pc_relative */
1321 	 0,			/* bitpos */
1322 	 complain_overflow_dont,/* complain_on_overflow */
1323 	 bfd_elf_generic_reloc,	/* special_function */
1324 	 "R_ARM_THM_MOVW_BREL_NC",/* name */
1325 	 FALSE,			/* partial_inplace */
1326 	 0x040f70ff,		/* src_mask */
1327 	 0x040f70ff,		/* dst_mask */
1328 	 FALSE),		/* pcrel_offset */
1329 
1330   HOWTO (R_ARM_THM_MOVT_BREL,	/* type */
1331 	 0,			/* rightshift */
1332 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1333 	 16,			/* bitsize */
1334 	 FALSE,			/* pc_relative */
1335 	 0,			/* bitpos */
1336 	 complain_overflow_bitfield,/* complain_on_overflow */
1337 	 bfd_elf_generic_reloc,	/* special_function */
1338 	 "R_ARM_THM_MOVT_BREL",	/* name */
1339 	 FALSE,			/* partial_inplace */
1340 	 0x040f70ff,		/* src_mask */
1341 	 0x040f70ff,		/* dst_mask */
1342 	 FALSE),		/* pcrel_offset */
1343 
1344   HOWTO (R_ARM_THM_MOVW_BREL,	/* type */
1345 	 0,			/* rightshift */
1346 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1347 	 16,			/* bitsize */
1348 	 FALSE,			/* pc_relative */
1349 	 0,			/* bitpos */
1350 	 complain_overflow_dont,/* complain_on_overflow */
1351 	 bfd_elf_generic_reloc,	/* special_function */
1352 	 "R_ARM_THM_MOVW_BREL",	/* name */
1353 	 FALSE,			/* partial_inplace */
1354 	 0x040f70ff,		/* src_mask */
1355 	 0x040f70ff,		/* dst_mask */
1356 	 FALSE),		/* pcrel_offset */
1357 
1358   HOWTO (R_ARM_TLS_GOTDESC,	/* type */
1359 	 0,			/* rightshift */
1360 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1361 	 32,			/* bitsize */
1362 	 FALSE,			/* pc_relative */
1363 	 0,			/* bitpos */
1364 	 complain_overflow_bitfield,/* complain_on_overflow */
1365 	 NULL,			/* special_function */
1366 	 "R_ARM_TLS_GOTDESC",	/* name */
1367 	 TRUE,			/* partial_inplace */
1368 	 0xffffffff,		/* src_mask */
1369 	 0xffffffff,		/* dst_mask */
1370 	 FALSE),		/* pcrel_offset */
1371 
1372   HOWTO (R_ARM_TLS_CALL,	/* type */
1373 	 0,			/* rightshift */
1374 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1375 	 24,			/* bitsize */
1376 	 FALSE,			/* pc_relative */
1377 	 0,			/* bitpos */
1378 	 complain_overflow_dont,/* complain_on_overflow */
1379 	 bfd_elf_generic_reloc,	/* special_function */
1380 	 "R_ARM_TLS_CALL",	/* name */
1381 	 FALSE,			/* partial_inplace */
1382 	 0x00ffffff,		/* src_mask */
1383 	 0x00ffffff,		/* dst_mask */
1384 	 FALSE),		/* pcrel_offset */
1385 
1386   HOWTO (R_ARM_TLS_DESCSEQ,	/* type */
1387 	 0,			/* rightshift */
1388 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1389 	 0,			/* bitsize */
1390 	 FALSE,			/* pc_relative */
1391 	 0,			/* bitpos */
1392 	 complain_overflow_bitfield,/* complain_on_overflow */
1393 	 bfd_elf_generic_reloc,	/* special_function */
1394 	 "R_ARM_TLS_DESCSEQ",	/* name */
1395 	 FALSE,			/* partial_inplace */
1396 	 0x00000000,		/* src_mask */
1397 	 0x00000000,		/* dst_mask */
1398 	 FALSE),		/* pcrel_offset */
1399 
1400   HOWTO (R_ARM_THM_TLS_CALL,	/* type */
1401 	 0,			/* rightshift */
1402 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1403 	 24,			/* bitsize */
1404 	 FALSE,			/* pc_relative */
1405 	 0,			/* bitpos */
1406 	 complain_overflow_dont,/* complain_on_overflow */
1407 	 bfd_elf_generic_reloc,	/* special_function */
1408 	 "R_ARM_THM_TLS_CALL",	/* name */
1409 	 FALSE,			/* partial_inplace */
1410 	 0x07ff07ff,		/* src_mask */
1411 	 0x07ff07ff,		/* dst_mask */
1412 	 FALSE),		/* pcrel_offset */
1413 
1414   HOWTO (R_ARM_PLT32_ABS,	/* type */
1415 	 0,			/* rightshift */
1416 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1417 	 32,			/* bitsize */
1418 	 FALSE,			/* pc_relative */
1419 	 0,			/* bitpos */
1420 	 complain_overflow_dont,/* complain_on_overflow */
1421 	 bfd_elf_generic_reloc,	/* special_function */
1422 	 "R_ARM_PLT32_ABS",	/* name */
1423 	 FALSE,			/* partial_inplace */
1424 	 0xffffffff,		/* src_mask */
1425 	 0xffffffff,		/* dst_mask */
1426 	 FALSE),		/* pcrel_offset */
1427 
1428   HOWTO (R_ARM_GOT_ABS,		/* type */
1429 	 0,			/* rightshift */
1430 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1431 	 32,			/* bitsize */
1432 	 FALSE,			/* pc_relative */
1433 	 0,			/* bitpos */
1434 	 complain_overflow_dont,/* complain_on_overflow */
1435 	 bfd_elf_generic_reloc,	/* special_function */
1436 	 "R_ARM_GOT_ABS",	/* name */
1437 	 FALSE,			/* partial_inplace */
1438 	 0xffffffff,		/* src_mask */
1439 	 0xffffffff,		/* dst_mask */
1440 	 FALSE),			/* pcrel_offset */
1441 
1442   HOWTO (R_ARM_GOT_PREL,	/* type */
1443 	 0,			/* rightshift */
1444 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1445 	 32,			/* bitsize */
1446 	 TRUE,			/* pc_relative */
1447 	 0,			/* bitpos */
1448 	 complain_overflow_dont,	/* complain_on_overflow */
1449 	 bfd_elf_generic_reloc,	/* special_function */
1450 	 "R_ARM_GOT_PREL",	/* name */
1451 	 FALSE,			/* partial_inplace */
1452 	 0xffffffff,		/* src_mask */
1453 	 0xffffffff,		/* dst_mask */
1454 	 TRUE),			/* pcrel_offset */
1455 
1456   HOWTO (R_ARM_GOT_BREL12,	/* type */
1457 	 0,			/* rightshift */
1458 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1459 	 12,			/* bitsize */
1460 	 FALSE,			/* pc_relative */
1461 	 0,			/* bitpos */
1462 	 complain_overflow_bitfield,/* complain_on_overflow */
1463 	 bfd_elf_generic_reloc,	/* special_function */
1464 	 "R_ARM_GOT_BREL12",	/* name */
1465 	 FALSE,			/* partial_inplace */
1466 	 0x00000fff,		/* src_mask */
1467 	 0x00000fff,		/* dst_mask */
1468 	 FALSE),		/* pcrel_offset */
1469 
1470   HOWTO (R_ARM_GOTOFF12,	/* type */
1471 	 0,			/* rightshift */
1472 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1473 	 12,			/* bitsize */
1474 	 FALSE,			/* pc_relative */
1475 	 0,			/* bitpos */
1476 	 complain_overflow_bitfield,/* complain_on_overflow */
1477 	 bfd_elf_generic_reloc,	/* special_function */
1478 	 "R_ARM_GOTOFF12",	/* name */
1479 	 FALSE,			/* partial_inplace */
1480 	 0x00000fff,		/* src_mask */
1481 	 0x00000fff,		/* dst_mask */
1482 	 FALSE),		/* pcrel_offset */
1483 
1484   EMPTY_HOWTO (R_ARM_GOTRELAX),	 /* reserved for future GOT-load optimizations */
1485 
1486   /* GNU extension to record C++ vtable member usage */
1487   HOWTO (R_ARM_GNU_VTENTRY,	/* type */
1488 	 0,			/* rightshift */
1489 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1490 	 0,			/* bitsize */
1491 	 FALSE,			/* pc_relative */
1492 	 0,			/* bitpos */
1493 	 complain_overflow_dont, /* complain_on_overflow */
1494 	 _bfd_elf_rel_vtable_reloc_fn,	/* special_function */
1495 	 "R_ARM_GNU_VTENTRY",	/* name */
1496 	 FALSE,			/* partial_inplace */
1497 	 0,			/* src_mask */
1498 	 0,			/* dst_mask */
1499 	 FALSE),		/* pcrel_offset */
1500 
1501   /* GNU extension to record C++ vtable hierarchy */
1502   HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1503 	 0,			/* rightshift */
1504 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1505 	 0,			/* bitsize */
1506 	 FALSE,			/* pc_relative */
1507 	 0,			/* bitpos */
1508 	 complain_overflow_dont, /* complain_on_overflow */
1509 	 NULL,			/* special_function */
1510 	 "R_ARM_GNU_VTINHERIT", /* name */
1511 	 FALSE,			/* partial_inplace */
1512 	 0,			/* src_mask */
1513 	 0,			/* dst_mask */
1514 	 FALSE),		/* pcrel_offset */
1515 
1516   HOWTO (R_ARM_THM_JUMP11,	/* type */
1517 	 1,			/* rightshift */
1518 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1519 	 11,			/* bitsize */
1520 	 TRUE,			/* pc_relative */
1521 	 0,			/* bitpos */
1522 	 complain_overflow_signed,	/* complain_on_overflow */
1523 	 bfd_elf_generic_reloc,	/* special_function */
1524 	 "R_ARM_THM_JUMP11",	/* name */
1525 	 FALSE,			/* partial_inplace */
1526 	 0x000007ff,		/* src_mask */
1527 	 0x000007ff,		/* dst_mask */
1528 	 TRUE),			/* pcrel_offset */
1529 
1530   HOWTO (R_ARM_THM_JUMP8,	/* type */
1531 	 1,			/* rightshift */
1532 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1533 	 8,			/* bitsize */
1534 	 TRUE,			/* pc_relative */
1535 	 0,			/* bitpos */
1536 	 complain_overflow_signed,	/* complain_on_overflow */
1537 	 bfd_elf_generic_reloc,	/* special_function */
1538 	 "R_ARM_THM_JUMP8",	/* name */
1539 	 FALSE,			/* partial_inplace */
1540 	 0x000000ff,		/* src_mask */
1541 	 0x000000ff,		/* dst_mask */
1542 	 TRUE),			/* pcrel_offset */
1543 
1544   /* TLS relocations */
1545   HOWTO (R_ARM_TLS_GD32,	/* type */
1546 	 0,			/* rightshift */
1547 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1548 	 32,			/* bitsize */
1549 	 FALSE,			/* pc_relative */
1550 	 0,			/* bitpos */
1551 	 complain_overflow_bitfield,/* complain_on_overflow */
1552 	 NULL,			/* special_function */
1553 	 "R_ARM_TLS_GD32",	/* name */
1554 	 TRUE,			/* partial_inplace */
1555 	 0xffffffff,		/* src_mask */
1556 	 0xffffffff,		/* dst_mask */
1557 	 FALSE),		/* pcrel_offset */
1558 
1559   HOWTO (R_ARM_TLS_LDM32,	/* type */
1560 	 0,			/* rightshift */
1561 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1562 	 32,			/* bitsize */
1563 	 FALSE,			/* pc_relative */
1564 	 0,			/* bitpos */
1565 	 complain_overflow_bitfield,/* complain_on_overflow */
1566 	 bfd_elf_generic_reloc, /* special_function */
1567 	 "R_ARM_TLS_LDM32",	/* name */
1568 	 TRUE,			/* partial_inplace */
1569 	 0xffffffff,		/* src_mask */
1570 	 0xffffffff,		/* dst_mask */
1571 	 FALSE),		/* pcrel_offset */
1572 
1573   HOWTO (R_ARM_TLS_LDO32,	/* type */
1574 	 0,			/* rightshift */
1575 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1576 	 32,			/* bitsize */
1577 	 FALSE,			/* pc_relative */
1578 	 0,			/* bitpos */
1579 	 complain_overflow_bitfield,/* complain_on_overflow */
1580 	 bfd_elf_generic_reloc, /* special_function */
1581 	 "R_ARM_TLS_LDO32",	/* name */
1582 	 TRUE,			/* partial_inplace */
1583 	 0xffffffff,		/* src_mask */
1584 	 0xffffffff,		/* dst_mask */
1585 	 FALSE),		/* pcrel_offset */
1586 
1587   HOWTO (R_ARM_TLS_IE32,	/* type */
1588 	 0,			/* rightshift */
1589 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1590 	 32,			/* bitsize */
1591 	 FALSE,			 /* pc_relative */
1592 	 0,			/* bitpos */
1593 	 complain_overflow_bitfield,/* complain_on_overflow */
1594 	 NULL,			/* special_function */
1595 	 "R_ARM_TLS_IE32",	/* name */
1596 	 TRUE,			/* partial_inplace */
1597 	 0xffffffff,		/* src_mask */
1598 	 0xffffffff,		/* dst_mask */
1599 	 FALSE),		/* pcrel_offset */
1600 
1601   HOWTO (R_ARM_TLS_LE32,	/* type */
1602 	 0,			/* rightshift */
1603 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1604 	 32,			/* bitsize */
1605 	 FALSE,			/* pc_relative */
1606 	 0,			/* bitpos */
1607 	 complain_overflow_bitfield,/* complain_on_overflow */
1608 	 NULL,			/* special_function */
1609 	 "R_ARM_TLS_LE32",	/* name */
1610 	 TRUE,			/* partial_inplace */
1611 	 0xffffffff,		/* src_mask */
1612 	 0xffffffff,		/* dst_mask */
1613 	 FALSE),		/* pcrel_offset */
1614 
1615   HOWTO (R_ARM_TLS_LDO12,	/* type */
1616 	 0,			/* rightshift */
1617 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1618 	 12,			/* bitsize */
1619 	 FALSE,			/* pc_relative */
1620 	 0,			/* bitpos */
1621 	 complain_overflow_bitfield,/* complain_on_overflow */
1622 	 bfd_elf_generic_reloc,	/* special_function */
1623 	 "R_ARM_TLS_LDO12",	/* name */
1624 	 FALSE,			/* partial_inplace */
1625 	 0x00000fff,		/* src_mask */
1626 	 0x00000fff,		/* dst_mask */
1627 	 FALSE),		/* pcrel_offset */
1628 
1629   HOWTO (R_ARM_TLS_LE12,	/* type */
1630 	 0,			/* rightshift */
1631 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1632 	 12,			/* bitsize */
1633 	 FALSE,			/* pc_relative */
1634 	 0,			/* bitpos */
1635 	 complain_overflow_bitfield,/* complain_on_overflow */
1636 	 bfd_elf_generic_reloc,	/* special_function */
1637 	 "R_ARM_TLS_LE12",	/* name */
1638 	 FALSE,			/* partial_inplace */
1639 	 0x00000fff,		/* src_mask */
1640 	 0x00000fff,		/* dst_mask */
1641 	 FALSE),		/* pcrel_offset */
1642 
1643   HOWTO (R_ARM_TLS_IE12GP,	/* type */
1644 	 0,			/* rightshift */
1645 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1646 	 12,			/* bitsize */
1647 	 FALSE,			/* pc_relative */
1648 	 0,			/* bitpos */
1649 	 complain_overflow_bitfield,/* complain_on_overflow */
1650 	 bfd_elf_generic_reloc,	/* special_function */
1651 	 "R_ARM_TLS_IE12GP",	/* name */
1652 	 FALSE,			/* partial_inplace */
1653 	 0x00000fff,		/* src_mask */
1654 	 0x00000fff,		/* dst_mask */
1655 	 FALSE),		/* pcrel_offset */
1656 
1657   /* 112-127 private relocations.  */
1658   EMPTY_HOWTO (112),
1659   EMPTY_HOWTO (113),
1660   EMPTY_HOWTO (114),
1661   EMPTY_HOWTO (115),
1662   EMPTY_HOWTO (116),
1663   EMPTY_HOWTO (117),
1664   EMPTY_HOWTO (118),
1665   EMPTY_HOWTO (119),
1666   EMPTY_HOWTO (120),
1667   EMPTY_HOWTO (121),
1668   EMPTY_HOWTO (122),
1669   EMPTY_HOWTO (123),
1670   EMPTY_HOWTO (124),
1671   EMPTY_HOWTO (125),
1672   EMPTY_HOWTO (126),
1673   EMPTY_HOWTO (127),
1674 
1675   /* R_ARM_ME_TOO, obsolete.  */
1676   EMPTY_HOWTO (128),
1677 
1678   HOWTO (R_ARM_THM_TLS_DESCSEQ,	/* type */
1679 	 0,			/* rightshift */
1680 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1681 	 0,			/* bitsize */
1682 	 FALSE,			/* pc_relative */
1683 	 0,			/* bitpos */
1684 	 complain_overflow_bitfield,/* complain_on_overflow */
1685 	 bfd_elf_generic_reloc,	/* special_function */
1686 	 "R_ARM_THM_TLS_DESCSEQ",/* name */
1687 	 FALSE,			/* partial_inplace */
1688 	 0x00000000,		/* src_mask */
1689 	 0x00000000,		/* dst_mask */
1690 	 FALSE),		/* pcrel_offset */
1691   EMPTY_HOWTO (130),
1692   EMPTY_HOWTO (131),
1693   HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
1694 	 0,			/* rightshift.  */
1695 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1696 	 16,			/* bitsize.  */
1697 	 FALSE,			/* pc_relative.  */
1698 	 0,			/* bitpos.  */
1699 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1700 	 bfd_elf_generic_reloc,	/* special_function.  */
1701 	 "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
1702 	 FALSE,			/* partial_inplace.  */
1703 	 0x00000000,		/* src_mask.  */
1704 	 0x00000000,		/* dst_mask.  */
1705 	 FALSE),		/* pcrel_offset.  */
1706   HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
1707 	 0,			/* rightshift.  */
1708 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1709 	 16,			/* bitsize.  */
1710 	 FALSE,			/* pc_relative.  */
1711 	 0,			/* bitpos.  */
1712 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1713 	 bfd_elf_generic_reloc,	/* special_function.  */
1714 	 "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
1715 	 FALSE,			/* partial_inplace.  */
1716 	 0x00000000,		/* src_mask.  */
1717 	 0x00000000,		/* dst_mask.  */
1718 	 FALSE),		/* pcrel_offset.  */
1719   HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
1720 	 0,			/* rightshift.  */
1721 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1722 	 16,			/* bitsize.  */
1723 	 FALSE,			/* pc_relative.  */
1724 	 0,			/* bitpos.  */
1725 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1726 	 bfd_elf_generic_reloc,	/* special_function.  */
1727 	 "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
1728 	 FALSE,			/* partial_inplace.  */
1729 	 0x00000000,		/* src_mask.  */
1730 	 0x00000000,		/* dst_mask.  */
1731 	 FALSE),		/* pcrel_offset.  */
1732   HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
1733 	 0,			/* rightshift.  */
1734 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1735 	 16,			/* bitsize.  */
1736 	 FALSE,			/* pc_relative.  */
1737 	 0,			/* bitpos.  */
1738 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1739 	 bfd_elf_generic_reloc,	/* special_function.  */
1740 	 "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
1741 	 FALSE,			/* partial_inplace.  */
1742 	 0x00000000,		/* src_mask.  */
1743 	 0x00000000,		/* dst_mask.  */
1744 	 FALSE),		/* pcrel_offset.  */
1745 };
1746 
1747 /* 160 onwards: */
1748 static reloc_howto_type elf32_arm_howto_table_2[8] =
1749 {
1750   HOWTO (R_ARM_IRELATIVE,	/* type */
1751 	 0,			/* rightshift */
1752 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1753 	 32,			/* bitsize */
1754 	 FALSE,			/* pc_relative */
1755 	 0,			/* bitpos */
1756 	 complain_overflow_bitfield,/* complain_on_overflow */
1757 	 bfd_elf_generic_reloc, /* special_function */
1758 	 "R_ARM_IRELATIVE",	/* name */
1759 	 TRUE,			/* partial_inplace */
1760 	 0xffffffff,		/* src_mask */
1761 	 0xffffffff,		/* dst_mask */
1762 	 FALSE),		/* pcrel_offset */
1763   HOWTO (R_ARM_GOTFUNCDESC,	/* type */
1764 	 0,			/* rightshift */
1765 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1766 	 32,			/* bitsize */
1767 	 FALSE,			/* pc_relative */
1768 	 0,			/* bitpos */
1769 	 complain_overflow_bitfield,/* complain_on_overflow */
1770 	 bfd_elf_generic_reloc,	/* special_function */
1771 	 "R_ARM_GOTFUNCDESC",	/* name */
1772 	 FALSE,			/* partial_inplace */
1773 	 0,			/* src_mask */
1774 	 0xffffffff,		/* dst_mask */
1775 	 FALSE),		/* pcrel_offset */
1776   HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1777 	 0,			/* rightshift */
1778 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1779 	 32,			/* bitsize */
1780 	 FALSE,			/* pc_relative */
1781 	 0,			/* bitpos */
1782 	 complain_overflow_bitfield,/* complain_on_overflow */
1783 	 bfd_elf_generic_reloc,	/* special_function */
1784 	 "R_ARM_GOTOFFFUNCDESC",/* name */
1785 	 FALSE,			/* partial_inplace */
1786 	 0,			/* src_mask */
1787 	 0xffffffff,		/* dst_mask */
1788 	 FALSE),		/* pcrel_offset */
1789   HOWTO (R_ARM_FUNCDESC,	/* type */
1790 	 0,			/* rightshift */
1791 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1792 	 32,			/* bitsize */
1793 	 FALSE,			/* pc_relative */
1794 	 0,			/* bitpos */
1795 	 complain_overflow_bitfield,/* complain_on_overflow */
1796 	 bfd_elf_generic_reloc,	/* special_function */
1797 	 "R_ARM_FUNCDESC",	/* name */
1798 	 FALSE,			/* partial_inplace */
1799 	 0,			/* src_mask */
1800 	 0xffffffff,		/* dst_mask */
1801 	 FALSE),		/* pcrel_offset */
1802   HOWTO (R_ARM_FUNCDESC_VALUE,	/* type */
1803 	 0,			/* rightshift */
1804 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1805 	 64,			/* bitsize */
1806 	 FALSE,			/* pc_relative */
1807 	 0,			/* bitpos */
1808 	 complain_overflow_bitfield,/* complain_on_overflow */
1809 	 bfd_elf_generic_reloc,	/* special_function */
1810 	 "R_ARM_FUNCDESC_VALUE",/* name */
1811 	 FALSE,			/* partial_inplace */
1812 	 0,			/* src_mask */
1813 	 0xffffffff,		/* dst_mask */
1814 	 FALSE),		/* pcrel_offset */
1815   HOWTO (R_ARM_TLS_GD32_FDPIC,	/* type */
1816 	 0,			/* rightshift */
1817 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1818 	 32,			/* bitsize */
1819 	 FALSE,			/* pc_relative */
1820 	 0,			/* bitpos */
1821 	 complain_overflow_bitfield,/* complain_on_overflow */
1822 	 bfd_elf_generic_reloc,	/* special_function */
1823 	 "R_ARM_TLS_GD32_FDPIC",/* name */
1824 	 FALSE,			/* partial_inplace */
1825 	 0,			/* src_mask */
1826 	 0xffffffff,		/* dst_mask */
1827 	 FALSE),		/* pcrel_offset */
1828   HOWTO (R_ARM_TLS_LDM32_FDPIC,	/* type */
1829 	 0,			/* rightshift */
1830 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1831 	 32,			/* bitsize */
1832 	 FALSE,			/* pc_relative */
1833 	 0,			/* bitpos */
1834 	 complain_overflow_bitfield,/* complain_on_overflow */
1835 	 bfd_elf_generic_reloc,	/* special_function */
1836 	 "R_ARM_TLS_LDM32_FDPIC",/* name */
1837 	 FALSE,			/* partial_inplace */
1838 	 0,			/* src_mask */
1839 	 0xffffffff,		/* dst_mask */
1840 	 FALSE),		/* pcrel_offset */
1841   HOWTO (R_ARM_TLS_IE32_FDPIC,	/* type */
1842 	 0,			/* rightshift */
1843 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1844 	 32,			/* bitsize */
1845 	 FALSE,			/* pc_relative */
1846 	 0,			/* bitpos */
1847 	 complain_overflow_bitfield,/* complain_on_overflow */
1848 	 bfd_elf_generic_reloc,	/* special_function */
1849 	 "R_ARM_TLS_IE32_FDPIC",/* name */
1850 	 FALSE,			/* partial_inplace */
1851 	 0,			/* src_mask */
1852 	 0xffffffff,		/* dst_mask */
1853 	 FALSE),		/* pcrel_offset */
1854 };
1855 
1856 /* 249-255 extended, currently unused, relocations:  */
1857 static reloc_howto_type elf32_arm_howto_table_3[4] =
1858 {
1859   HOWTO (R_ARM_RREL32,		/* type */
1860 	 0,			/* rightshift */
1861 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1862 	 0,			/* bitsize */
1863 	 FALSE,			/* pc_relative */
1864 	 0,			/* bitpos */
1865 	 complain_overflow_dont,/* complain_on_overflow */
1866 	 bfd_elf_generic_reloc,	/* special_function */
1867 	 "R_ARM_RREL32",	/* name */
1868 	 FALSE,			/* partial_inplace */
1869 	 0,			/* src_mask */
1870 	 0,			/* dst_mask */
1871 	 FALSE),		/* pcrel_offset */
1872 
1873   HOWTO (R_ARM_RABS32,		/* type */
1874 	 0,			/* rightshift */
1875 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1876 	 0,			/* bitsize */
1877 	 FALSE,			/* pc_relative */
1878 	 0,			/* bitpos */
1879 	 complain_overflow_dont,/* complain_on_overflow */
1880 	 bfd_elf_generic_reloc,	/* special_function */
1881 	 "R_ARM_RABS32",	/* name */
1882 	 FALSE,			/* partial_inplace */
1883 	 0,			/* src_mask */
1884 	 0,			/* dst_mask */
1885 	 FALSE),		/* pcrel_offset */
1886 
1887   HOWTO (R_ARM_RPC24,		/* type */
1888 	 0,			/* rightshift */
1889 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1890 	 0,			/* bitsize */
1891 	 FALSE,			/* pc_relative */
1892 	 0,			/* bitpos */
1893 	 complain_overflow_dont,/* complain_on_overflow */
1894 	 bfd_elf_generic_reloc,	/* special_function */
1895 	 "R_ARM_RPC24",		/* name */
1896 	 FALSE,			/* partial_inplace */
1897 	 0,			/* src_mask */
1898 	 0,			/* dst_mask */
1899 	 FALSE),		/* pcrel_offset */
1900 
1901   HOWTO (R_ARM_RBASE,		/* type */
1902 	 0,			/* rightshift */
1903 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1904 	 0,			/* bitsize */
1905 	 FALSE,			/* pc_relative */
1906 	 0,			/* bitpos */
1907 	 complain_overflow_dont,/* complain_on_overflow */
1908 	 bfd_elf_generic_reloc,	/* special_function */
1909 	 "R_ARM_RBASE",		/* name */
1910 	 FALSE,			/* partial_inplace */
1911 	 0,			/* src_mask */
1912 	 0,			/* dst_mask */
1913 	 FALSE)			/* pcrel_offset */
1914 };
1915 
1916 static reloc_howto_type *
1917 elf32_arm_howto_from_type (unsigned int r_type)
1918 {
1919   if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1920     return &elf32_arm_howto_table_1[r_type];
1921 
1922   if (r_type >= R_ARM_IRELATIVE
1923       && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1924     return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1925 
1926   if (r_type >= R_ARM_RREL32
1927       && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1928     return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1929 
1930   return NULL;
1931 }
1932 
1933 static bfd_boolean
1934 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1935 			 Elf_Internal_Rela * elf_reloc)
1936 {
1937   unsigned int r_type;
1938 
1939   r_type = ELF32_R_TYPE (elf_reloc->r_info);
1940   if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1941     {
1942       /* xgettext:c-format */
1943       _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1944 			  abfd, r_type);
1945       bfd_set_error (bfd_error_bad_value);
1946       return FALSE;
1947     }
1948   return TRUE;
1949 }
1950 
1951 struct elf32_arm_reloc_map
1952   {
1953     bfd_reloc_code_real_type  bfd_reloc_val;
1954     unsigned char	      elf_reloc_val;
1955   };
1956 
1957 /* All entries in this list must also be present in elf32_arm_howto_table.  */
1958 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1959   {
1960     {BFD_RELOC_NONE,		     R_ARM_NONE},
1961     {BFD_RELOC_ARM_PCREL_BRANCH,     R_ARM_PC24},
1962     {BFD_RELOC_ARM_PCREL_CALL,	     R_ARM_CALL},
1963     {BFD_RELOC_ARM_PCREL_JUMP,	     R_ARM_JUMP24},
1964     {BFD_RELOC_ARM_PCREL_BLX,	     R_ARM_XPC25},
1965     {BFD_RELOC_THUMB_PCREL_BLX,	     R_ARM_THM_XPC22},
1966     {BFD_RELOC_32,		     R_ARM_ABS32},
1967     {BFD_RELOC_32_PCREL,	     R_ARM_REL32},
1968     {BFD_RELOC_8,		     R_ARM_ABS8},
1969     {BFD_RELOC_16,		     R_ARM_ABS16},
1970     {BFD_RELOC_ARM_OFFSET_IMM,	     R_ARM_ABS12},
1971     {BFD_RELOC_ARM_THUMB_OFFSET,     R_ARM_THM_ABS5},
1972     {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1973     {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1974     {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1975     {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1976     {BFD_RELOC_THUMB_PCREL_BRANCH9,  R_ARM_THM_JUMP8},
1977     {BFD_RELOC_THUMB_PCREL_BRANCH7,  R_ARM_THM_JUMP6},
1978     {BFD_RELOC_ARM_GLOB_DAT,	     R_ARM_GLOB_DAT},
1979     {BFD_RELOC_ARM_JUMP_SLOT,	     R_ARM_JUMP_SLOT},
1980     {BFD_RELOC_ARM_RELATIVE,	     R_ARM_RELATIVE},
1981     {BFD_RELOC_ARM_GOTOFF,	     R_ARM_GOTOFF32},
1982     {BFD_RELOC_ARM_GOTPC,	     R_ARM_GOTPC},
1983     {BFD_RELOC_ARM_GOT_PREL,	     R_ARM_GOT_PREL},
1984     {BFD_RELOC_ARM_GOT32,	     R_ARM_GOT32},
1985     {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
1986     {BFD_RELOC_ARM_TARGET1,	     R_ARM_TARGET1},
1987     {BFD_RELOC_ARM_ROSEGREL32,	     R_ARM_ROSEGREL32},
1988     {BFD_RELOC_ARM_SBREL32,	     R_ARM_SBREL32},
1989     {BFD_RELOC_ARM_PREL31,	     R_ARM_PREL31},
1990     {BFD_RELOC_ARM_TARGET2,	     R_ARM_TARGET2},
1991     {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
1992     {BFD_RELOC_ARM_TLS_GOTDESC,	     R_ARM_TLS_GOTDESC},
1993     {BFD_RELOC_ARM_TLS_CALL,	     R_ARM_TLS_CALL},
1994     {BFD_RELOC_ARM_THM_TLS_CALL,     R_ARM_THM_TLS_CALL},
1995     {BFD_RELOC_ARM_TLS_DESCSEQ,	     R_ARM_TLS_DESCSEQ},
1996     {BFD_RELOC_ARM_THM_TLS_DESCSEQ,  R_ARM_THM_TLS_DESCSEQ},
1997     {BFD_RELOC_ARM_TLS_DESC,	     R_ARM_TLS_DESC},
1998     {BFD_RELOC_ARM_TLS_GD32,	     R_ARM_TLS_GD32},
1999     {BFD_RELOC_ARM_TLS_LDO32,	     R_ARM_TLS_LDO32},
2000     {BFD_RELOC_ARM_TLS_LDM32,	     R_ARM_TLS_LDM32},
2001     {BFD_RELOC_ARM_TLS_DTPMOD32,     R_ARM_TLS_DTPMOD32},
2002     {BFD_RELOC_ARM_TLS_DTPOFF32,     R_ARM_TLS_DTPOFF32},
2003     {BFD_RELOC_ARM_TLS_TPOFF32,	     R_ARM_TLS_TPOFF32},
2004     {BFD_RELOC_ARM_TLS_IE32,	     R_ARM_TLS_IE32},
2005     {BFD_RELOC_ARM_TLS_LE32,	     R_ARM_TLS_LE32},
2006     {BFD_RELOC_ARM_IRELATIVE,	     R_ARM_IRELATIVE},
2007     {BFD_RELOC_ARM_GOTFUNCDESC,      R_ARM_GOTFUNCDESC},
2008     {BFD_RELOC_ARM_GOTOFFFUNCDESC,   R_ARM_GOTOFFFUNCDESC},
2009     {BFD_RELOC_ARM_FUNCDESC,         R_ARM_FUNCDESC},
2010     {BFD_RELOC_ARM_FUNCDESC_VALUE,   R_ARM_FUNCDESC_VALUE},
2011     {BFD_RELOC_ARM_TLS_GD32_FDPIC,   R_ARM_TLS_GD32_FDPIC},
2012     {BFD_RELOC_ARM_TLS_LDM32_FDPIC,  R_ARM_TLS_LDM32_FDPIC},
2013     {BFD_RELOC_ARM_TLS_IE32_FDPIC,   R_ARM_TLS_IE32_FDPIC},
2014     {BFD_RELOC_VTABLE_INHERIT,	     R_ARM_GNU_VTINHERIT},
2015     {BFD_RELOC_VTABLE_ENTRY,	     R_ARM_GNU_VTENTRY},
2016     {BFD_RELOC_ARM_MOVW,	     R_ARM_MOVW_ABS_NC},
2017     {BFD_RELOC_ARM_MOVT,	     R_ARM_MOVT_ABS},
2018     {BFD_RELOC_ARM_MOVW_PCREL,	     R_ARM_MOVW_PREL_NC},
2019     {BFD_RELOC_ARM_MOVT_PCREL,	     R_ARM_MOVT_PREL},
2020     {BFD_RELOC_ARM_THUMB_MOVW,	     R_ARM_THM_MOVW_ABS_NC},
2021     {BFD_RELOC_ARM_THUMB_MOVT,	     R_ARM_THM_MOVT_ABS},
2022     {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2023     {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2024     {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2025     {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2026     {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2027     {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2028     {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2029     {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2030     {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2031     {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2032     {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2033     {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2034     {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2035     {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2036     {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2037     {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2038     {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2039     {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2040     {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2041     {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2042     {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2043     {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2044     {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2045     {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2046     {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2047     {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2048     {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2049     {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2050     {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2051     {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2052     {BFD_RELOC_ARM_V4BX,	     R_ARM_V4BX},
2053     {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2054     {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2055     {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2056     {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
2057   };
2058 
2059 static reloc_howto_type *
2060 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2061 			     bfd_reloc_code_real_type code)
2062 {
2063   unsigned int i;
2064 
2065   for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2066     if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2067       return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2068 
2069   return NULL;
2070 }
2071 
2072 static reloc_howto_type *
2073 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2074 			     const char *r_name)
2075 {
2076   unsigned int i;
2077 
2078   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2079     if (elf32_arm_howto_table_1[i].name != NULL
2080 	&& strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2081       return &elf32_arm_howto_table_1[i];
2082 
2083   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2084     if (elf32_arm_howto_table_2[i].name != NULL
2085 	&& strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2086       return &elf32_arm_howto_table_2[i];
2087 
2088   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2089     if (elf32_arm_howto_table_3[i].name != NULL
2090 	&& strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2091       return &elf32_arm_howto_table_3[i];
2092 
2093   return NULL;
2094 }
2095 
2096 /* Support for core dump NOTE sections.  */
2097 
2098 static bfd_boolean
2099 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2100 {
2101   int offset;
2102   size_t size;
2103 
2104   switch (note->descsz)
2105     {
2106       default:
2107 	return FALSE;
2108 
2109       case 148:		/* Linux/ARM 32-bit.  */
2110 	/* pr_cursig */
2111 	elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2112 
2113 	/* pr_pid */
2114 	elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2115 
2116 	/* pr_reg */
2117 	offset = 72;
2118 	size = 72;
2119 
2120 	break;
2121     }
2122 
2123   /* Make a ".reg/999" section.  */
2124   return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2125 					  size, note->descpos + offset);
2126 }
2127 
2128 static bfd_boolean
2129 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2130 {
2131   switch (note->descsz)
2132     {
2133       default:
2134 	return FALSE;
2135 
2136       case 124:		/* Linux/ARM elf_prpsinfo.  */
2137 	elf_tdata (abfd)->core->pid
2138 	 = bfd_get_32 (abfd, note->descdata + 12);
2139 	elf_tdata (abfd)->core->program
2140 	 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2141 	elf_tdata (abfd)->core->command
2142 	 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2143     }
2144 
2145   /* Note that for some reason, a spurious space is tacked
2146      onto the end of the args in some (at least one anyway)
2147      implementations, so strip it off if it exists.  */
2148   {
2149     char *command = elf_tdata (abfd)->core->command;
2150     int n = strlen (command);
2151 
2152     if (0 < n && command[n - 1] == ' ')
2153       command[n - 1] = '\0';
2154   }
2155 
2156   return TRUE;
2157 }
2158 
2159 static char *
2160 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2161 				int note_type, ...)
2162 {
2163   switch (note_type)
2164     {
2165     default:
2166       return NULL;
2167 
2168     case NT_PRPSINFO:
2169       {
2170 	char data[124] ATTRIBUTE_NONSTRING;
2171 	va_list ap;
2172 
2173 	va_start (ap, note_type);
2174 	memset (data, 0, sizeof (data));
2175 	strncpy (data + 28, va_arg (ap, const char *), 16);
2176 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2177 	DIAGNOSTIC_PUSH;
2178 	/* GCC 8.0 and 8.1 warn about 80 equals destination size with
2179 	   -Wstringop-truncation:
2180 	   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2181 	 */
2182 	DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2183 #endif
2184 	strncpy (data + 44, va_arg (ap, const char *), 80);
2185 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2186 	DIAGNOSTIC_POP;
2187 #endif
2188 	va_end (ap);
2189 
2190 	return elfcore_write_note (abfd, buf, bufsiz,
2191 				   "CORE", note_type, data, sizeof (data));
2192       }
2193 
2194     case NT_PRSTATUS:
2195       {
2196 	char data[148];
2197 	va_list ap;
2198 	long pid;
2199 	int cursig;
2200 	const void *greg;
2201 
2202 	va_start (ap, note_type);
2203 	memset (data, 0, sizeof (data));
2204 	pid = va_arg (ap, long);
2205 	bfd_put_32 (abfd, pid, data + 24);
2206 	cursig = va_arg (ap, int);
2207 	bfd_put_16 (abfd, cursig, data + 12);
2208 	greg = va_arg (ap, const void *);
2209 	memcpy (data + 72, greg, 72);
2210 	va_end (ap);
2211 
2212 	return elfcore_write_note (abfd, buf, bufsiz,
2213 				   "CORE", note_type, data, sizeof (data));
2214       }
2215     }
2216 }
2217 
2218 #define TARGET_LITTLE_SYM		arm_elf32_le_vec
2219 #define TARGET_LITTLE_NAME		"elf32-littlearm"
2220 #define TARGET_BIG_SYM			arm_elf32_be_vec
2221 #define TARGET_BIG_NAME			"elf32-bigarm"
2222 
2223 #define elf_backend_grok_prstatus	elf32_arm_nabi_grok_prstatus
2224 #define elf_backend_grok_psinfo		elf32_arm_nabi_grok_psinfo
2225 #define elf_backend_write_core_note	elf32_arm_nabi_write_core_note
2226 
2227 typedef unsigned long int insn32;
2228 typedef unsigned short int insn16;
2229 
2230 /* In lieu of proper flags, assume all EABIv4 or later objects are
2231    interworkable.  */
2232 #define INTERWORK_FLAG(abfd)  \
2233   (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2234   || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2235   || ((abfd)->flags & BFD_LINKER_CREATED))
2236 
2237 /* The linker script knows the section names for placement.
2238    The entry_names are used to do simple name mangling on the stubs.
2239    Given a function name, and its type, the stub can be found. The
2240    name can be changed. The only requirement is the %s be present.  */
2241 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2242 #define THUMB2ARM_GLUE_ENTRY_NAME   "__%s_from_thumb"
2243 
2244 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2245 #define ARM2THUMB_GLUE_ENTRY_NAME   "__%s_from_arm"
2246 
2247 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2248 #define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
2249 
2250 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2251 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
2252 
2253 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2254 #define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
2255 
2256 #define STUB_ENTRY_NAME   "__%s_veneer"
2257 
2258 #define CMSE_PREFIX "__acle_se_"
2259 
2260 /* The name of the dynamic interpreter.  This is put in the .interp
2261    section.  */
2262 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
2263 
2264 /* FDPIC default stack size.  */
2265 #define DEFAULT_STACK_SIZE 0x8000
2266 
2267 static const unsigned long tls_trampoline [] =
2268 {
2269   0xe08e0000,		/* add r0, lr, r0 */
2270   0xe5901004,		/* ldr r1, [r0,#4] */
2271   0xe12fff11,		/* bx  r1 */
2272 };
2273 
2274 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2275 {
2276   0xe52d2004, /*	push    {r2}			*/
2277   0xe59f200c, /*      ldr     r2, [pc, #3f - . - 8]	*/
2278   0xe59f100c, /*      ldr     r1, [pc, #4f - . - 8]	*/
2279   0xe79f2002, /* 1:   ldr     r2, [pc, r2]		*/
2280   0xe081100f, /* 2:   add     r1, pc			*/
2281   0xe12fff12, /*      bx      r2			*/
2282   0x00000014, /* 3:   .word  _GLOBAL_OFFSET_TABLE_ - 1b - 8
2283 				+ dl_tlsdesc_lazy_resolver(GOT)   */
2284   0x00000018, /* 4:   .word  _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2285 };
2286 
2287 /* ARM FDPIC PLT entry.  */
2288 /* The last 5 words contain PLT lazy fragment code and data.  */
2289 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2290   {
2291     0xe59fc008,    /* ldr     r12, .L1 */
2292     0xe08cc009,    /* add     r12, r12, r9 */
2293     0xe59c9004,    /* ldr     r9, [r12, #4] */
2294     0xe59cf000,    /* ldr     pc, [r12] */
2295     0x00000000,    /* L1.     .word   foo(GOTOFFFUNCDESC) */
2296     0x00000000,    /* L1.     .word   foo(funcdesc_value_reloc_offset) */
2297     0xe51fc00c,    /* ldr     r12, [pc, #-12] */
2298     0xe92d1000,    /* push    {r12} */
2299     0xe599c004,    /* ldr     r12, [r9, #4] */
2300     0xe599f000,    /* ldr     pc, [r9] */
2301   };
2302 
2303 /* Thumb FDPIC PLT entry.  */
2304 /* The last 5 words contain PLT lazy fragment code and data.  */
2305 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2306   {
2307     0xc00cf8df,    /* ldr.w   r12, .L1 */
2308     0x0c09eb0c,    /* add.w   r12, r12, r9 */
2309     0x9004f8dc,    /* ldr.w   r9, [r12, #4] */
2310     0xf000f8dc,    /* ldr.w   pc, [r12] */
2311     0x00000000,    /* .L1     .word   foo(GOTOFFFUNCDESC) */
2312     0x00000000,    /* .L2     .word   foo(funcdesc_value_reloc_offset) */
2313     0xc008f85f,    /* ldr.w   r12, .L2 */
2314     0xcd04f84d,    /* push    {r12} */
2315     0xc004f8d9,    /* ldr.w   r12, [r9, #4] */
2316     0xf000f8d9,    /* ldr.w   pc, [r9] */
2317   };
2318 
2319 #ifdef FOUR_WORD_PLT
2320 
2321 /* The first entry in a procedure linkage table looks like
2322    this.  It is set up so that any shared library function that is
2323    called before the relocation has been set up calls the dynamic
2324    linker first.  */
2325 static const bfd_vma elf32_arm_plt0_entry [] =
2326 {
2327   0xe52de004,		/* str   lr, [sp, #-4]! */
2328   0xe59fe010,		/* ldr   lr, [pc, #16]  */
2329   0xe08fe00e,		/* add   lr, pc, lr     */
2330   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2331 };
2332 
2333 /* Subsequent entries in a procedure linkage table look like
2334    this.  */
2335 static const bfd_vma elf32_arm_plt_entry [] =
2336 {
2337   0xe28fc600,		/* add   ip, pc, #NN	*/
2338   0xe28cca00,		/* add	 ip, ip, #NN	*/
2339   0xe5bcf000,		/* ldr	 pc, [ip, #NN]! */
2340   0x00000000,		/* unused		*/
2341 };
2342 
2343 #else /* not FOUR_WORD_PLT */
2344 
2345 /* The first entry in a procedure linkage table looks like
2346    this.  It is set up so that any shared library function that is
2347    called before the relocation has been set up calls the dynamic
2348    linker first.  */
2349 static const bfd_vma elf32_arm_plt0_entry [] =
2350 {
2351   0xe52de004,		/* str	 lr, [sp, #-4]! */
2352   0xe59fe004,		/* ldr	 lr, [pc, #4]	*/
2353   0xe08fe00e,		/* add	 lr, pc, lr	*/
2354   0xe5bef008,		/* ldr	 pc, [lr, #8]!	*/
2355   0x00000000,		/* &GOT[0] - .		*/
2356 };
2357 
2358 /* By default subsequent entries in a procedure linkage table look like
2359    this. Offsets that don't fit into 28 bits will cause link error.  */
2360 static const bfd_vma elf32_arm_plt_entry_short [] =
2361 {
2362   0xe28fc600,		/* add   ip, pc, #0xNN00000 */
2363   0xe28cca00,		/* add	 ip, ip, #0xNN000   */
2364   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!  */
2365 };
2366 
2367 /* When explicitly asked, we'll use this "long" entry format
2368    which can cope with arbitrary displacements.  */
2369 static const bfd_vma elf32_arm_plt_entry_long [] =
2370 {
2371   0xe28fc200,		/* add	 ip, pc, #0xN0000000 */
2372   0xe28cc600,		/* add	 ip, ip, #0xNN00000  */
2373   0xe28cca00,		/* add	 ip, ip, #0xNN000    */
2374   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!   */
2375 };
2376 
2377 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2378 
2379 #endif /* not FOUR_WORD_PLT */
2380 
2381 /* The first entry in a procedure linkage table looks like this.
2382    It is set up so that any shared library function that is called before the
2383    relocation has been set up calls the dynamic linker first.  */
2384 static const bfd_vma elf32_thumb2_plt0_entry [] =
2385 {
2386   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2387      an instruction maybe encoded to one or two array elements.  */
2388   0xf8dfb500,		/* push	   {lr}		 */
2389   0x44fee008,		/* ldr.w   lr, [pc, #8]	 */
2390 			/* add	   lr, pc	 */
2391   0xff08f85e,		/* ldr.w   pc, [lr, #8]! */
2392   0x00000000,		/* &GOT[0] - .		 */
2393 };
2394 
2395 /* Subsequent entries in a procedure linkage table for thumb only target
2396    look like this.  */
2397 static const bfd_vma elf32_thumb2_plt_entry [] =
2398 {
2399   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2400      an instruction maybe encoded to one or two array elements.  */
2401   0x0c00f240,		/* movw	   ip, #0xNNNN	  */
2402   0x0c00f2c0,		/* movt	   ip, #0xNNNN	  */
2403   0xf8dc44fc,		/* add	   ip, pc	  */
2404   0xbf00f000		/* ldr.w   pc, [ip]	  */
2405 			/* nop			  */
2406 };
2407 
2408 /* The format of the first entry in the procedure linkage table
2409    for a VxWorks executable.  */
2410 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2411 {
2412   0xe52dc008,		/* str	  ip,[sp,#-8]!			*/
2413   0xe59fc000,		/* ldr	  ip,[pc]			*/
2414   0xe59cf008,		/* ldr	  pc,[ip,#8]			*/
2415   0x00000000,		/* .long  _GLOBAL_OFFSET_TABLE_		*/
2416 };
2417 
2418 /* The format of subsequent entries in a VxWorks executable.  */
2419 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2420 {
2421   0xe59fc000,	      /* ldr	ip,[pc]			*/
2422   0xe59cf000,	      /* ldr	pc,[ip]			*/
2423   0x00000000,	      /* .long	@got				*/
2424   0xe59fc000,	      /* ldr	ip,[pc]			*/
2425   0xea000000,	      /* b	_PLT				*/
2426   0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2427 };
2428 
2429 /* The format of entries in a VxWorks shared library.  */
2430 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2431 {
2432   0xe59fc000,	      /* ldr	ip,[pc]			*/
2433   0xe79cf009,	      /* ldr	pc,[ip,r9]			*/
2434   0x00000000,	      /* .long	@got				*/
2435   0xe59fc000,	      /* ldr	ip,[pc]			*/
2436   0xe599f008,	      /* ldr	pc,[r9,#8]			*/
2437   0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2438 };
2439 
2440 /* An initial stub used if the PLT entry is referenced from Thumb code.  */
2441 #define PLT_THUMB_STUB_SIZE 4
2442 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2443 {
2444   0x4778,		/* bx pc */
2445   0x46c0		/* nop   */
2446 };
2447 
2448 /* The entries in a PLT when using a DLL-based target with multiple
2449    address spaces.  */
2450 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2451 {
2452   0xe51ff004,	      /* ldr   pc, [pc, #-4] */
2453   0x00000000,	      /* dcd   R_ARM_GLOB_DAT(X) */
2454 };
2455 
2456 /* The first entry in a procedure linkage table looks like
2457    this.  It is set up so that any shared library function that is
2458    called before the relocation has been set up calls the dynamic
2459    linker first.  */
2460 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2461 {
2462   /* First bundle: */
2463   0xe300c000,		/* movw	ip, #:lower16:&GOT[2]-.+8	*/
2464   0xe340c000,		/* movt	ip, #:upper16:&GOT[2]-.+8	*/
2465   0xe08cc00f,		/* add	ip, ip, pc			*/
2466   0xe52dc008,		/* str	ip, [sp, #-8]!			*/
2467   /* Second bundle: */
2468   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2469   0xe59cc000,		/* ldr	ip, [ip]			*/
2470   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2471   0xe12fff1c,		/* bx	ip				*/
2472   /* Third bundle: */
2473   0xe320f000,		/* nop					*/
2474   0xe320f000,		/* nop					*/
2475   0xe320f000,		/* nop					*/
2476   /* .Lplt_tail: */
2477   0xe50dc004,		/* str	ip, [sp, #-4]			*/
2478   /* Fourth bundle: */
2479   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2480   0xe59cc000,		/* ldr	ip, [ip]			*/
2481   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2482   0xe12fff1c,		/* bx	ip				*/
2483 };
2484 #define ARM_NACL_PLT_TAIL_OFFSET	(11 * 4)
2485 
2486 /* Subsequent entries in a procedure linkage table look like this.  */
2487 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2488 {
2489   0xe300c000,		/* movw	ip, #:lower16:&GOT[n]-.+8	*/
2490   0xe340c000,		/* movt	ip, #:upper16:&GOT[n]-.+8	*/
2491   0xe08cc00f,		/* add	ip, ip, pc			*/
2492   0xea000000,		/* b	.Lplt_tail			*/
2493 };
2494 
2495 #define ARM_MAX_FWD_BRANCH_OFFSET  ((((1 << 23) - 1) << 2) + 8)
2496 #define ARM_MAX_BWD_BRANCH_OFFSET  ((-((1 << 23) << 2)) + 8)
2497 #define THM_MAX_FWD_BRANCH_OFFSET  ((1 << 22) -2 + 4)
2498 #define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
2499 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2500 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2501 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2502 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2503 
2504 enum stub_insn_type
2505 {
2506   THUMB16_TYPE = 1,
2507   THUMB32_TYPE,
2508   ARM_TYPE,
2509   DATA_TYPE
2510 };
2511 
2512 #define THUMB16_INSN(X)		{(X), THUMB16_TYPE, R_ARM_NONE, 0}
2513 /* A bit of a hack.  A Thumb conditional branch, in which the proper condition
2514    is inserted in arm_build_one_stub().  */
2515 #define THUMB16_BCOND_INSN(X)	{(X), THUMB16_TYPE, R_ARM_NONE, 1}
2516 #define THUMB32_INSN(X)		{(X), THUMB32_TYPE, R_ARM_NONE, 0}
2517 #define THUMB32_MOVT(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2518 #define THUMB32_MOVW(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2519 #define THUMB32_B_INSN(X, Z)	{(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2520 #define ARM_INSN(X)		{(X), ARM_TYPE, R_ARM_NONE, 0}
2521 #define ARM_REL_INSN(X, Z)	{(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2522 #define DATA_WORD(X,Y,Z)	{(X), DATA_TYPE, (Y), (Z)}
2523 
2524 typedef struct
2525 {
2526   bfd_vma	       data;
2527   enum stub_insn_type  type;
2528   unsigned int	       r_type;
2529   int		       reloc_addend;
2530 }  insn_sequence;
2531 
2532 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2533    to reach the stub if necessary.  */
2534 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2535 {
2536   ARM_INSN (0xe51ff004),	    /* ldr   pc, [pc, #-4] */
2537   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2538 };
2539 
2540 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2541    available.  */
2542 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2543 {
2544   ARM_INSN (0xe59fc000),	    /* ldr   ip, [pc, #0] */
2545   ARM_INSN (0xe12fff1c),	    /* bx    ip */
2546   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2547 };
2548 
2549 /* Thumb -> Thumb long branch stub. Used on M-profile architectures.  */
2550 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2551 {
2552   THUMB16_INSN (0xb401),	     /* push {r0} */
2553   THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2554   THUMB16_INSN (0x4684),	     /* mov  ip, r0 */
2555   THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2556   THUMB16_INSN (0x4760),	     /* bx   ip */
2557   THUMB16_INSN (0xbf00),	     /* nop */
2558   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2559 };
2560 
2561 /* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
2562 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2563 {
2564   THUMB32_INSN (0xf85ff000),	     /* ldr.w  pc, [pc, #-0] */
2565   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
2566 };
2567 
2568 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2569    M-profile architectures.  */
2570 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2571 {
2572   THUMB32_MOVW (0xf2400c00),	     /* mov.w ip, R_ARM_MOVW_ABS_NC */
2573   THUMB32_MOVT (0xf2c00c00),	     /* movt  ip, R_ARM_MOVT_ABS << 16 */
2574   THUMB16_INSN (0x4760),	     /* bx   ip */
2575 };
2576 
2577 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2578    allowed.  */
2579 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2580 {
2581   THUMB16_INSN (0x4778),	     /* bx   pc */
2582   THUMB16_INSN (0x46c0),	     /* nop */
2583   ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2584   ARM_INSN (0xe12fff1c),	     /* bx   ip */
2585   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2586 };
2587 
2588 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2589    available.  */
2590 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2591 {
2592   THUMB16_INSN (0x4778),	     /* bx   pc */
2593   THUMB16_INSN (0x46c0),	     /* nop   */
2594   ARM_INSN (0xe51ff004),	     /* ldr   pc, [pc, #-4] */
2595   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd   R_ARM_ABS32(X) */
2596 };
2597 
2598 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2599    one, when the destination is close enough.  */
2600 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2601 {
2602   THUMB16_INSN (0x4778),	     /* bx   pc */
2603   THUMB16_INSN (0x46c0),	     /* nop   */
2604   ARM_REL_INSN (0xea000000, -8),     /* b    (X-8) */
2605 };
2606 
2607 /* ARM/Thumb -> ARM long branch stub, PIC.  On V5T and above, use
2608    blx to reach the stub if necessary.  */
2609 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2610 {
2611   ARM_INSN (0xe59fc000),	     /* ldr   ip, [pc] */
2612   ARM_INSN (0xe08ff00c),	     /* add   pc, pc, ip */
2613   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2614 };
2615 
2616 /* ARM/Thumb -> Thumb long branch stub, PIC.  On V5T and above, use
2617    blx to reach the stub if necessary.  We can not add into pc;
2618    it is not guaranteed to mode switch (different in ARMv6 and
2619    ARMv7).  */
2620 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2621 {
2622   ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2623   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2624   ARM_INSN (0xe12fff1c),	     /* bx    ip */
2625   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2626 };
2627 
2628 /* V4T ARM -> ARM long branch stub, PIC.  */
2629 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2630 {
2631   ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2632   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2633   ARM_INSN (0xe12fff1c),	     /* bx    ip */
2634   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2635 };
2636 
2637 /* V4T Thumb -> ARM long branch stub, PIC.  */
2638 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2639 {
2640   THUMB16_INSN (0x4778),	     /* bx   pc */
2641   THUMB16_INSN (0x46c0),	     /* nop  */
2642   ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2643   ARM_INSN (0xe08cf00f),	     /* add  pc, ip, pc */
2644   DATA_WORD (0, R_ARM_REL32, -4),     /* dcd  R_ARM_REL32(X) */
2645 };
2646 
2647 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2648    architectures.  */
2649 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2650 {
2651   THUMB16_INSN (0xb401),	     /* push {r0} */
2652   THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2653   THUMB16_INSN (0x46fc),	     /* mov  ip, pc */
2654   THUMB16_INSN (0x4484),	     /* add  ip, r0 */
2655   THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2656   THUMB16_INSN (0x4760),	     /* bx   ip */
2657   DATA_WORD (0, R_ARM_REL32, 4),     /* dcd  R_ARM_REL32(X) */
2658 };
2659 
2660 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2661    allowed.  */
2662 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2663 {
2664   THUMB16_INSN (0x4778),	     /* bx   pc */
2665   THUMB16_INSN (0x46c0),	     /* nop */
2666   ARM_INSN (0xe59fc004),	     /* ldr  ip, [pc, #4] */
2667   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2668   ARM_INSN (0xe12fff1c),	     /* bx   ip */
2669   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd  R_ARM_REL32(X) */
2670 };
2671 
2672 /* Thumb2/ARM -> TLS trampoline.  Lowest common denominator, which is a
2673    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2674 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2675 {
2676   ARM_INSN (0xe59f1000),	     /* ldr   r1, [pc] */
2677   ARM_INSN (0xe08ff001),	     /* add   pc, pc, r1 */
2678   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2679 };
2680 
2681 /* V4T Thumb -> TLS trampoline.  lowest common denominator, which is a
2682    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2683 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2684 {
2685   THUMB16_INSN (0x4778),	     /* bx   pc */
2686   THUMB16_INSN (0x46c0),	     /* nop */
2687   ARM_INSN (0xe59f1000),	     /* ldr  r1, [pc, #0] */
2688   ARM_INSN (0xe081f00f),	     /* add  pc, r1, pc */
2689   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd  R_ARM_REL32(X) */
2690 };
2691 
2692 /* NaCl ARM -> ARM long branch stub.  */
2693 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2694 {
2695   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2696   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2697   ARM_INSN (0xe12fff1c),		/* bx	ip */
2698   ARM_INSN (0xe320f000),		/* nop */
2699   ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2700   DATA_WORD (0, R_ARM_ABS32, 0),	/* dcd	R_ARM_ABS32(X) */
2701   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2702   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2703 };
2704 
2705 /* NaCl ARM -> ARM long branch stub, PIC.  */
2706 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2707 {
2708   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2709   ARM_INSN (0xe08cc00f),		/* add	ip, ip, pc */
2710   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2711   ARM_INSN (0xe12fff1c),		/* bx	ip */
2712   ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2713   DATA_WORD (0, R_ARM_REL32, 8),	/* dcd	R_ARM_REL32(X+8) */
2714   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2715   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2716 };
2717 
2718 /* Stub used for transition to secure state (aka SG veneer).  */
2719 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2720 {
2721   THUMB32_INSN (0xe97fe97f),		/* sg.  */
2722   THUMB32_B_INSN (0xf000b800, -4),	/* b.w original_branch_dest.  */
2723 };
2724 
2725 
2726 /* Cortex-A8 erratum-workaround stubs.  */
2727 
2728 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2729    can't use a conditional branch to reach this stub).  */
2730 
2731 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2732 {
2733   THUMB16_BCOND_INSN (0xd001),	       /* b<cond>.n true.  */
2734   THUMB32_B_INSN (0xf000b800, -4),     /* b.w insn_after_original_branch.  */
2735   THUMB32_B_INSN (0xf000b800, -4)      /* true: b.w original_branch_dest.  */
2736 };
2737 
2738 /* Stub used for b.w and bl.w instructions.  */
2739 
2740 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2741 {
2742   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2743 };
2744 
2745 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2746 {
2747   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2748 };
2749 
2750 /* Stub used for Thumb-2 blx.w instructions.  We modified the original blx.w
2751    instruction (which switches to ARM mode) to point to this stub.  Jump to the
2752    real destination using an ARM-mode branch.  */
2753 
2754 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2755 {
2756   ARM_REL_INSN (0xea000000, -8)	/* b original_branch_dest.  */
2757 };
2758 
2759 /* For each section group there can be a specially created linker section
2760    to hold the stubs for that group.  The name of the stub section is based
2761    upon the name of another section within that group with the suffix below
2762    applied.
2763 
2764    PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2765    create what appeared to be a linker stub section when it actually
2766    contained user code/data.  For example, consider this fragment:
2767 
2768      const char * stubborn_problems[] = { "np" };
2769 
2770    If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2771    section called:
2772 
2773      .data.rel.local.stubborn_problems
2774 
2775    This then causes problems in arm32_arm_build_stubs() as it triggers:
2776 
2777       // Ignore non-stub sections.
2778       if (!strstr (stub_sec->name, STUB_SUFFIX))
2779 	continue;
2780 
2781    And so the section would be ignored instead of being processed.  Hence
2782    the change in definition of STUB_SUFFIX to a name that cannot be a valid
2783    C identifier.  */
2784 #define STUB_SUFFIX ".__stub"
2785 
2786 /* One entry per long/short branch stub defined above.  */
2787 #define DEF_STUBS \
2788   DEF_STUB(long_branch_any_any)	\
2789   DEF_STUB(long_branch_v4t_arm_thumb) \
2790   DEF_STUB(long_branch_thumb_only) \
2791   DEF_STUB(long_branch_v4t_thumb_thumb)	\
2792   DEF_STUB(long_branch_v4t_thumb_arm) \
2793   DEF_STUB(short_branch_v4t_thumb_arm) \
2794   DEF_STUB(long_branch_any_arm_pic) \
2795   DEF_STUB(long_branch_any_thumb_pic) \
2796   DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2797   DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2798   DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2799   DEF_STUB(long_branch_thumb_only_pic) \
2800   DEF_STUB(long_branch_any_tls_pic) \
2801   DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2802   DEF_STUB(long_branch_arm_nacl) \
2803   DEF_STUB(long_branch_arm_nacl_pic) \
2804   DEF_STUB(cmse_branch_thumb_only) \
2805   DEF_STUB(a8_veneer_b_cond) \
2806   DEF_STUB(a8_veneer_b) \
2807   DEF_STUB(a8_veneer_bl) \
2808   DEF_STUB(a8_veneer_blx) \
2809   DEF_STUB(long_branch_thumb2_only) \
2810   DEF_STUB(long_branch_thumb2_only_pure)
2811 
2812 #define DEF_STUB(x) arm_stub_##x,
2813 enum elf32_arm_stub_type
2814 {
2815   arm_stub_none,
2816   DEF_STUBS
2817   max_stub_type
2818 };
2819 #undef DEF_STUB
2820 
2821 /* Note the first a8_veneer type.  */
2822 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2823 
2824 typedef struct
2825 {
2826   const insn_sequence* template_sequence;
2827   int template_size;
2828 } stub_def;
2829 
2830 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2831 static const stub_def stub_definitions[] =
2832 {
2833   {NULL, 0},
2834   DEF_STUBS
2835 };
2836 
2837 struct elf32_arm_stub_hash_entry
2838 {
2839   /* Base hash table entry structure.  */
2840   struct bfd_hash_entry root;
2841 
2842   /* The stub section.  */
2843   asection *stub_sec;
2844 
2845   /* Offset within stub_sec of the beginning of this stub.  */
2846   bfd_vma stub_offset;
2847 
2848   /* Given the symbol's value and its section we can determine its final
2849      value when building the stubs (so the stub knows where to jump).  */
2850   bfd_vma target_value;
2851   asection *target_section;
2852 
2853   /* Same as above but for the source of the branch to the stub.  Used for
2854      Cortex-A8 erratum workaround to patch it to branch to the stub.  As
2855      such, source section does not need to be recorded since Cortex-A8 erratum
2856      workaround stubs are only generated when both source and target are in the
2857      same section.  */
2858   bfd_vma source_value;
2859 
2860   /* The instruction which caused this stub to be generated (only valid for
2861      Cortex-A8 erratum workaround stubs at present).  */
2862   unsigned long orig_insn;
2863 
2864   /* The stub type.  */
2865   enum elf32_arm_stub_type stub_type;
2866   /* Its encoding size in bytes.  */
2867   int stub_size;
2868   /* Its template.  */
2869   const insn_sequence *stub_template;
2870   /* The size of the template (number of entries).  */
2871   int stub_template_size;
2872 
2873   /* The symbol table entry, if any, that this was derived from.  */
2874   struct elf32_arm_link_hash_entry *h;
2875 
2876   /* Type of branch.  */
2877   enum arm_st_branch_type branch_type;
2878 
2879   /* Where this stub is being called from, or, in the case of combined
2880      stub sections, the first input section in the group.  */
2881   asection *id_sec;
2882 
2883   /* The name for the local symbol at the start of this stub.  The
2884      stub name in the hash table has to be unique; this does not, so
2885      it can be friendlier.  */
2886   char *output_name;
2887 };
2888 
2889 /* Used to build a map of a section.  This is required for mixed-endian
2890    code/data.  */
2891 
2892 typedef struct elf32_elf_section_map
2893 {
2894   bfd_vma vma;
2895   char type;
2896 }
2897 elf32_arm_section_map;
2898 
2899 /* Information about a VFP11 erratum veneer, or a branch to such a veneer.  */
2900 
2901 typedef enum
2902 {
2903   VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2904   VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2905   VFP11_ERRATUM_ARM_VENEER,
2906   VFP11_ERRATUM_THUMB_VENEER
2907 }
2908 elf32_vfp11_erratum_type;
2909 
2910 typedef struct elf32_vfp11_erratum_list
2911 {
2912   struct elf32_vfp11_erratum_list *next;
2913   bfd_vma vma;
2914   union
2915   {
2916     struct
2917     {
2918       struct elf32_vfp11_erratum_list *veneer;
2919       unsigned int vfp_insn;
2920     } b;
2921     struct
2922     {
2923       struct elf32_vfp11_erratum_list *branch;
2924       unsigned int id;
2925     } v;
2926   } u;
2927   elf32_vfp11_erratum_type type;
2928 }
2929 elf32_vfp11_erratum_list;
2930 
2931 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2932    veneer.  */
2933 typedef enum
2934 {
2935   STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2936   STM32L4XX_ERRATUM_VENEER
2937 }
2938 elf32_stm32l4xx_erratum_type;
2939 
2940 typedef struct elf32_stm32l4xx_erratum_list
2941 {
2942   struct elf32_stm32l4xx_erratum_list *next;
2943   bfd_vma vma;
2944   union
2945   {
2946     struct
2947     {
2948       struct elf32_stm32l4xx_erratum_list *veneer;
2949       unsigned int insn;
2950     } b;
2951     struct
2952     {
2953       struct elf32_stm32l4xx_erratum_list *branch;
2954       unsigned int id;
2955     } v;
2956   } u;
2957   elf32_stm32l4xx_erratum_type type;
2958 }
2959 elf32_stm32l4xx_erratum_list;
2960 
2961 typedef enum
2962 {
2963   DELETE_EXIDX_ENTRY,
2964   INSERT_EXIDX_CANTUNWIND_AT_END
2965 }
2966 arm_unwind_edit_type;
2967 
2968 /* A (sorted) list of edits to apply to an unwind table.  */
2969 typedef struct arm_unwind_table_edit
2970 {
2971   arm_unwind_edit_type type;
2972   /* Note: we sometimes want to insert an unwind entry corresponding to a
2973      section different from the one we're currently writing out, so record the
2974      (text) section this edit relates to here.  */
2975   asection *linked_section;
2976   unsigned int index;
2977   struct arm_unwind_table_edit *next;
2978 }
2979 arm_unwind_table_edit;
2980 
2981 typedef struct _arm_elf_section_data
2982 {
2983   /* Information about mapping symbols.  */
2984   struct bfd_elf_section_data elf;
2985   unsigned int mapcount;
2986   unsigned int mapsize;
2987   elf32_arm_section_map *map;
2988   /* Information about CPU errata.  */
2989   unsigned int erratumcount;
2990   elf32_vfp11_erratum_list *erratumlist;
2991   unsigned int stm32l4xx_erratumcount;
2992   elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2993   unsigned int additional_reloc_count;
2994   /* Information about unwind tables.  */
2995   union
2996   {
2997     /* Unwind info attached to a text section.  */
2998     struct
2999     {
3000       asection *arm_exidx_sec;
3001     } text;
3002 
3003     /* Unwind info attached to an .ARM.exidx section.  */
3004     struct
3005     {
3006       arm_unwind_table_edit *unwind_edit_list;
3007       arm_unwind_table_edit *unwind_edit_tail;
3008     } exidx;
3009   } u;
3010 }
3011 _arm_elf_section_data;
3012 
3013 #define elf32_arm_section_data(sec) \
3014   ((_arm_elf_section_data *) elf_section_data (sec))
3015 
3016 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3017    These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3018    so may be created multiple times: we use an array of these entries whilst
3019    relaxing which we can refresh easily, then create stubs for each potentially
3020    erratum-triggering instruction once we've settled on a solution.  */
3021 
3022 struct a8_erratum_fix
3023 {
3024   bfd *input_bfd;
3025   asection *section;
3026   bfd_vma offset;
3027   bfd_vma target_offset;
3028   unsigned long orig_insn;
3029   char *stub_name;
3030   enum elf32_arm_stub_type stub_type;
3031   enum arm_st_branch_type branch_type;
3032 };
3033 
3034 /* A table of relocs applied to branches which might trigger Cortex-A8
3035    erratum.  */
3036 
3037 struct a8_erratum_reloc
3038 {
3039   bfd_vma from;
3040   bfd_vma destination;
3041   struct elf32_arm_link_hash_entry *hash;
3042   const char *sym_name;
3043   unsigned int r_type;
3044   enum arm_st_branch_type branch_type;
3045   bfd_boolean non_a8_stub;
3046 };
3047 
3048 /* The size of the thread control block.  */
3049 #define TCB_SIZE	8
3050 
3051 /* ARM-specific information about a PLT entry, over and above the usual
3052    gotplt_union.  */
3053 struct arm_plt_info
3054 {
3055   /* We reference count Thumb references to a PLT entry separately,
3056      so that we can emit the Thumb trampoline only if needed.  */
3057   bfd_signed_vma thumb_refcount;
3058 
3059   /* Some references from Thumb code may be eliminated by BL->BLX
3060      conversion, so record them separately.  */
3061   bfd_signed_vma maybe_thumb_refcount;
3062 
3063   /* How many of the recorded PLT accesses were from non-call relocations.
3064      This information is useful when deciding whether anything takes the
3065      address of an STT_GNU_IFUNC PLT.  A value of 0 means that all
3066      non-call references to the function should resolve directly to the
3067      real runtime target.  */
3068   unsigned int noncall_refcount;
3069 
3070   /* Since PLT entries have variable size if the Thumb prologue is
3071      used, we need to record the index into .got.plt instead of
3072      recomputing it from the PLT offset.  */
3073   bfd_signed_vma got_offset;
3074 };
3075 
3076 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol.  */
3077 struct arm_local_iplt_info
3078 {
3079   /* The information that is usually found in the generic ELF part of
3080      the hash table entry.  */
3081   union gotplt_union root;
3082 
3083   /* The information that is usually found in the ARM-specific part of
3084      the hash table entry.  */
3085   struct arm_plt_info arm;
3086 
3087   /* A list of all potential dynamic relocations against this symbol.  */
3088   struct elf_dyn_relocs *dyn_relocs;
3089 };
3090 
3091 /* Structure to handle FDPIC support for local functions.  */
3092 struct fdpic_local {
3093   unsigned int funcdesc_cnt;
3094   unsigned int gotofffuncdesc_cnt;
3095   int funcdesc_offset;
3096 };
3097 
3098 struct elf_arm_obj_tdata
3099 {
3100   struct elf_obj_tdata root;
3101 
3102   /* tls_type for each local got entry.  */
3103   char *local_got_tls_type;
3104 
3105   /* GOTPLT entries for TLS descriptors.  */
3106   bfd_vma *local_tlsdesc_gotent;
3107 
3108   /* Information for local symbols that need entries in .iplt.  */
3109   struct arm_local_iplt_info **local_iplt;
3110 
3111   /* Zero to warn when linking objects with incompatible enum sizes.  */
3112   int no_enum_size_warning;
3113 
3114   /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
3115   int no_wchar_size_warning;
3116 
3117   /* Maintains FDPIC counters and funcdesc info.  */
3118   struct fdpic_local *local_fdpic_cnts;
3119 };
3120 
3121 #define elf_arm_tdata(bfd) \
3122   ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3123 
3124 #define elf32_arm_local_got_tls_type(bfd) \
3125   (elf_arm_tdata (bfd)->local_got_tls_type)
3126 
3127 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3128   (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3129 
3130 #define elf32_arm_local_iplt(bfd) \
3131   (elf_arm_tdata (bfd)->local_iplt)
3132 
3133 #define elf32_arm_local_fdpic_cnts(bfd) \
3134   (elf_arm_tdata (bfd)->local_fdpic_cnts)
3135 
3136 #define is_arm_elf(bfd) \
3137   (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3138    && elf_tdata (bfd) != NULL \
3139    && elf_object_id (bfd) == ARM_ELF_DATA)
3140 
3141 static bfd_boolean
3142 elf32_arm_mkobject (bfd *abfd)
3143 {
3144   return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3145 				  ARM_ELF_DATA);
3146 }
3147 
3148 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3149 
3150 /* Structure to handle FDPIC support for extern functions.  */
3151 struct fdpic_global {
3152   unsigned int gotofffuncdesc_cnt;
3153   unsigned int gotfuncdesc_cnt;
3154   unsigned int funcdesc_cnt;
3155   int funcdesc_offset;
3156   int gotfuncdesc_offset;
3157 };
3158 
3159 /* Arm ELF linker hash entry.  */
3160 struct elf32_arm_link_hash_entry
3161 {
3162   struct elf_link_hash_entry root;
3163 
3164   /* Track dynamic relocs copied for this symbol.  */
3165   struct elf_dyn_relocs *dyn_relocs;
3166 
3167   /* ARM-specific PLT information.  */
3168   struct arm_plt_info plt;
3169 
3170 #define GOT_UNKNOWN	0
3171 #define GOT_NORMAL	1
3172 #define GOT_TLS_GD	2
3173 #define GOT_TLS_IE	4
3174 #define GOT_TLS_GDESC	8
3175 #define GOT_TLS_GD_ANY_P(type)	((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3176   unsigned int tls_type : 8;
3177 
3178   /* True if the symbol's PLT entry is in .iplt rather than .plt.  */
3179   unsigned int is_iplt : 1;
3180 
3181   unsigned int unused : 23;
3182 
3183   /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3184      starting at the end of the jump table.  */
3185   bfd_vma tlsdesc_got;
3186 
3187   /* The symbol marking the real symbol location for exported thumb
3188      symbols with Arm stubs.  */
3189   struct elf_link_hash_entry *export_glue;
3190 
3191   /* A pointer to the most recently used stub hash entry against this
3192      symbol.  */
3193   struct elf32_arm_stub_hash_entry *stub_cache;
3194 
3195   /* Counter for FDPIC relocations against this symbol.  */
3196   struct fdpic_global fdpic_cnts;
3197 };
3198 
3199 /* Traverse an arm ELF linker hash table.  */
3200 #define elf32_arm_link_hash_traverse(table, func, info)			\
3201   (elf_link_hash_traverse						\
3202    (&(table)->root,							\
3203     (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func),	\
3204     (info)))
3205 
3206 /* Get the ARM elf linker hash table from a link_info structure.  */
3207 #define elf32_arm_hash_table(info) \
3208   (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3209   == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3210 
3211 #define arm_stub_hash_lookup(table, string, create, copy) \
3212   ((struct elf32_arm_stub_hash_entry *) \
3213    bfd_hash_lookup ((table), (string), (create), (copy)))
3214 
3215 /* Array to keep track of which stub sections have been created, and
3216    information on stub grouping.  */
3217 struct map_stub
3218 {
3219   /* This is the section to which stubs in the group will be
3220      attached.  */
3221   asection *link_sec;
3222   /* The stub section.  */
3223   asection *stub_sec;
3224 };
3225 
3226 #define elf32_arm_compute_jump_table_size(htab) \
3227   ((htab)->next_tls_desc_index * 4)
3228 
3229 /* ARM ELF linker hash table.  */
3230 struct elf32_arm_link_hash_table
3231 {
3232   /* The main hash table.  */
3233   struct elf_link_hash_table root;
3234 
3235   /* The size in bytes of the section containing the Thumb-to-ARM glue.  */
3236   bfd_size_type thumb_glue_size;
3237 
3238   /* The size in bytes of the section containing the ARM-to-Thumb glue.  */
3239   bfd_size_type arm_glue_size;
3240 
3241   /* The size in bytes of section containing the ARMv4 BX veneers.  */
3242   bfd_size_type bx_glue_size;
3243 
3244   /* Offsets of ARMv4 BX veneers.  Bit1 set if present, and Bit0 set when
3245      veneer has been populated.  */
3246   bfd_vma bx_glue_offset[15];
3247 
3248   /* The size in bytes of the section containing glue for VFP11 erratum
3249      veneers.  */
3250   bfd_size_type vfp11_erratum_glue_size;
3251 
3252  /* The size in bytes of the section containing glue for STM32L4XX erratum
3253      veneers.  */
3254   bfd_size_type stm32l4xx_erratum_glue_size;
3255 
3256   /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
3257      holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3258      elf32_arm_write_section().  */
3259   struct a8_erratum_fix *a8_erratum_fixes;
3260   unsigned int num_a8_erratum_fixes;
3261 
3262   /* An arbitrary input BFD chosen to hold the glue sections.  */
3263   bfd * bfd_of_glue_owner;
3264 
3265   /* Nonzero to output a BE8 image.  */
3266   int byteswap_code;
3267 
3268   /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3269      Nonzero if R_ARM_TARGET1 means R_ARM_REL32.  */
3270   int target1_is_rel;
3271 
3272   /* The relocation to use for R_ARM_TARGET2 relocations.  */
3273   int target2_reloc;
3274 
3275   /* 0 = Ignore R_ARM_V4BX.
3276      1 = Convert BX to MOV PC.
3277      2 = Generate v4 interworing stubs.  */
3278   int fix_v4bx;
3279 
3280   /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum.  */
3281   int fix_cortex_a8;
3282 
3283   /* Whether we should fix the ARM1176 BLX immediate issue.  */
3284   int fix_arm1176;
3285 
3286   /* Nonzero if the ARM/Thumb BLX instructions are available for use.  */
3287   int use_blx;
3288 
3289   /* What sort of code sequences we should look for which may trigger the
3290      VFP11 denorm erratum.  */
3291   bfd_arm_vfp11_fix vfp11_fix;
3292 
3293   /* Global counter for the number of fixes we have emitted.  */
3294   int num_vfp11_fixes;
3295 
3296   /* What sort of code sequences we should look for which may trigger the
3297      STM32L4XX erratum.  */
3298   bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3299 
3300   /* Global counter for the number of fixes we have emitted.  */
3301   int num_stm32l4xx_fixes;
3302 
3303   /* Nonzero to force PIC branch veneers.  */
3304   int pic_veneer;
3305 
3306   /* The number of bytes in the initial entry in the PLT.  */
3307   bfd_size_type plt_header_size;
3308 
3309   /* The number of bytes in the subsequent PLT etries.  */
3310   bfd_size_type plt_entry_size;
3311 
3312   /* True if the target system is VxWorks.  */
3313   int vxworks_p;
3314 
3315   /* True if the target system is Symbian OS.  */
3316   int symbian_p;
3317 
3318   /* True if the target system is Native Client.  */
3319   int nacl_p;
3320 
3321   /* True if the target uses REL relocations.  */
3322   bfd_boolean use_rel;
3323 
3324   /* Nonzero if import library must be a secure gateway import library
3325      as per ARMv8-M Security Extensions.  */
3326   int cmse_implib;
3327 
3328   /* The import library whose symbols' address must remain stable in
3329      the import library generated.  */
3330   bfd *in_implib_bfd;
3331 
3332   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
3333   bfd_vma next_tls_desc_index;
3334 
3335   /* How many R_ARM_TLS_DESC relocations were generated so far.  */
3336   bfd_vma num_tls_desc;
3337 
3338   /* The (unloaded but important) VxWorks .rela.plt.unloaded section.  */
3339   asection *srelplt2;
3340 
3341   /* The offset into splt of the PLT entry for the TLS descriptor
3342      resolver.  Special values are 0, if not necessary (or not found
3343      to be necessary yet), and -1 if needed but not determined
3344      yet.  */
3345   bfd_vma dt_tlsdesc_plt;
3346 
3347   /* The offset into sgot of the GOT entry used by the PLT entry
3348      above.  */
3349   bfd_vma dt_tlsdesc_got;
3350 
3351   /* Offset in .plt section of tls_arm_trampoline.  */
3352   bfd_vma tls_trampoline;
3353 
3354   /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
3355   union
3356   {
3357     bfd_signed_vma refcount;
3358     bfd_vma offset;
3359   } tls_ldm_got;
3360 
3361   /* Small local sym cache.  */
3362   struct sym_cache sym_cache;
3363 
3364   /* For convenience in allocate_dynrelocs.  */
3365   bfd * obfd;
3366 
3367   /* The amount of space used by the reserved portion of the sgotplt
3368      section, plus whatever space is used by the jump slots.  */
3369   bfd_vma sgotplt_jump_table_size;
3370 
3371   /* The stub hash table.  */
3372   struct bfd_hash_table stub_hash_table;
3373 
3374   /* Linker stub bfd.  */
3375   bfd *stub_bfd;
3376 
3377   /* Linker call-backs.  */
3378   asection * (*add_stub_section) (const char *, asection *, asection *,
3379 				  unsigned int);
3380   void (*layout_sections_again) (void);
3381 
3382   /* Array to keep track of which stub sections have been created, and
3383      information on stub grouping.  */
3384   struct map_stub *stub_group;
3385 
3386   /* Input stub section holding secure gateway veneers.  */
3387   asection *cmse_stub_sec;
3388 
3389   /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3390      start to be allocated.  */
3391   bfd_vma new_cmse_stub_offset;
3392 
3393   /* Number of elements in stub_group.  */
3394   unsigned int top_id;
3395 
3396   /* Assorted information used by elf32_arm_size_stubs.  */
3397   unsigned int bfd_count;
3398   unsigned int top_index;
3399   asection **input_list;
3400 
3401   /* True if the target system uses FDPIC. */
3402   int fdpic_p;
3403 
3404   /* Fixup section. Used for FDPIC.  */
3405   asection *srofixup;
3406 };
3407 
3408 /* Add an FDPIC read-only fixup.  */
3409 static void
3410 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3411 {
3412   bfd_vma fixup_offset;
3413 
3414   fixup_offset = srofixup->reloc_count++ * 4;
3415   BFD_ASSERT (fixup_offset < srofixup->size);
3416   bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3417 }
3418 
3419 static inline int
3420 ctz (unsigned int mask)
3421 {
3422 #if GCC_VERSION >= 3004
3423   return __builtin_ctz (mask);
3424 #else
3425   unsigned int i;
3426 
3427   for (i = 0; i < 8 * sizeof (mask); i++)
3428     {
3429       if (mask & 0x1)
3430 	break;
3431       mask = (mask >> 1);
3432     }
3433   return i;
3434 #endif
3435 }
3436 
3437 static inline int
3438 elf32_arm_popcount (unsigned int mask)
3439 {
3440 #if GCC_VERSION >= 3004
3441   return __builtin_popcount (mask);
3442 #else
3443   unsigned int i;
3444   int sum = 0;
3445 
3446   for (i = 0; i < 8 * sizeof (mask); i++)
3447     {
3448       if (mask & 0x1)
3449 	sum++;
3450       mask = (mask >> 1);
3451     }
3452   return sum;
3453 #endif
3454 }
3455 
3456 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3457 				    asection *sreloc, Elf_Internal_Rela *rel);
3458 
3459 static void
3460 arm_elf_fill_funcdesc(bfd *output_bfd,
3461 		      struct bfd_link_info *info,
3462 		      int *funcdesc_offset,
3463 		      int dynindx,
3464 		      int offset,
3465 		      bfd_vma addr,
3466 		      bfd_vma dynreloc_value,
3467 		      bfd_vma seg)
3468 {
3469   if ((*funcdesc_offset & 1) == 0)
3470     {
3471       struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3472       asection *sgot = globals->root.sgot;
3473 
3474       if (bfd_link_pic(info))
3475 	{
3476 	  asection *srelgot = globals->root.srelgot;
3477 	  Elf_Internal_Rela outrel;
3478 
3479 	  outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3480 	  outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3481 	  outrel.r_addend = 0;
3482 
3483 	  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3484 	  bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3485 	  bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3486 	}
3487       else
3488 	{
3489 	  struct elf_link_hash_entry *hgot = globals->root.hgot;
3490 	  bfd_vma got_value = hgot->root.u.def.value
3491 	    + hgot->root.u.def.section->output_section->vma
3492 	    + hgot->root.u.def.section->output_offset;
3493 
3494 	  arm_elf_add_rofixup(output_bfd, globals->srofixup,
3495 			      sgot->output_section->vma + sgot->output_offset
3496 			      + offset);
3497 	  arm_elf_add_rofixup(output_bfd, globals->srofixup,
3498 			      sgot->output_section->vma + sgot->output_offset
3499 			      + offset + 4);
3500 	  bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3501 	  bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3502 	}
3503       *funcdesc_offset |= 1;
3504     }
3505 }
3506 
3507 /* Create an entry in an ARM ELF linker hash table.  */
3508 
3509 static struct bfd_hash_entry *
3510 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3511 			     struct bfd_hash_table * table,
3512 			     const char * string)
3513 {
3514   struct elf32_arm_link_hash_entry * ret =
3515     (struct elf32_arm_link_hash_entry *) entry;
3516 
3517   /* Allocate the structure if it has not already been allocated by a
3518      subclass.  */
3519   if (ret == NULL)
3520     ret = (struct elf32_arm_link_hash_entry *)
3521 	bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3522   if (ret == NULL)
3523     return (struct bfd_hash_entry *) ret;
3524 
3525   /* Call the allocation method of the superclass.  */
3526   ret = ((struct elf32_arm_link_hash_entry *)
3527 	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3528 				     table, string));
3529   if (ret != NULL)
3530     {
3531       ret->dyn_relocs = NULL;
3532       ret->tls_type = GOT_UNKNOWN;
3533       ret->tlsdesc_got = (bfd_vma) -1;
3534       ret->plt.thumb_refcount = 0;
3535       ret->plt.maybe_thumb_refcount = 0;
3536       ret->plt.noncall_refcount = 0;
3537       ret->plt.got_offset = -1;
3538       ret->is_iplt = FALSE;
3539       ret->export_glue = NULL;
3540 
3541       ret->stub_cache = NULL;
3542 
3543       ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3544       ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3545       ret->fdpic_cnts.funcdesc_cnt = 0;
3546       ret->fdpic_cnts.funcdesc_offset = -1;
3547       ret->fdpic_cnts.gotfuncdesc_offset = -1;
3548     }
3549 
3550   return (struct bfd_hash_entry *) ret;
3551 }
3552 
3553 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3554    symbols.  */
3555 
3556 static bfd_boolean
3557 elf32_arm_allocate_local_sym_info (bfd *abfd)
3558 {
3559   if (elf_local_got_refcounts (abfd) == NULL)
3560     {
3561       bfd_size_type num_syms;
3562       bfd_size_type size;
3563       char *data;
3564 
3565       num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3566       size = num_syms * (sizeof (bfd_signed_vma)
3567 			 + sizeof (struct arm_local_iplt_info *)
3568 			 + sizeof (bfd_vma)
3569 			 + sizeof (char)
3570 			 + sizeof (struct fdpic_local));
3571       data = bfd_zalloc (abfd, size);
3572       if (data == NULL)
3573 	return FALSE;
3574 
3575       elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3576       data += num_syms * sizeof (struct fdpic_local);
3577 
3578       elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3579       data += num_syms * sizeof (bfd_signed_vma);
3580 
3581       elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3582       data += num_syms * sizeof (struct arm_local_iplt_info *);
3583 
3584       elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3585       data += num_syms * sizeof (bfd_vma);
3586 
3587       elf32_arm_local_got_tls_type (abfd) = data;
3588     }
3589   return TRUE;
3590 }
3591 
3592 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3593    to input bfd ABFD.  Create the information if it doesn't already exist.
3594    Return null if an allocation fails.  */
3595 
3596 static struct arm_local_iplt_info *
3597 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3598 {
3599   struct arm_local_iplt_info **ptr;
3600 
3601   if (!elf32_arm_allocate_local_sym_info (abfd))
3602     return NULL;
3603 
3604   BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3605   ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3606   if (*ptr == NULL)
3607     *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3608   return *ptr;
3609 }
3610 
3611 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3612    in ABFD's symbol table.  If the symbol is global, H points to its
3613    hash table entry, otherwise H is null.
3614 
3615    Return true if the symbol does have PLT information.  When returning
3616    true, point *ROOT_PLT at the target-independent reference count/offset
3617    union and *ARM_PLT at the ARM-specific information.  */
3618 
3619 static bfd_boolean
3620 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3621 			struct elf32_arm_link_hash_entry *h,
3622 			unsigned long r_symndx, union gotplt_union **root_plt,
3623 			struct arm_plt_info **arm_plt)
3624 {
3625   struct arm_local_iplt_info *local_iplt;
3626 
3627   if (globals->root.splt == NULL && globals->root.iplt == NULL)
3628     return FALSE;
3629 
3630   if (h != NULL)
3631     {
3632       *root_plt = &h->root.plt;
3633       *arm_plt = &h->plt;
3634       return TRUE;
3635     }
3636 
3637   if (elf32_arm_local_iplt (abfd) == NULL)
3638     return FALSE;
3639 
3640   local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3641   if (local_iplt == NULL)
3642     return FALSE;
3643 
3644   *root_plt = &local_iplt->root;
3645   *arm_plt = &local_iplt->arm;
3646   return TRUE;
3647 }
3648 
3649 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3650 
3651 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3652    before it.  */
3653 
3654 static bfd_boolean
3655 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3656 				  struct arm_plt_info *arm_plt)
3657 {
3658   struct elf32_arm_link_hash_table *htab;
3659 
3660   htab = elf32_arm_hash_table (info);
3661 
3662   return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3663 	  || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3664 }
3665 
3666 /* Return a pointer to the head of the dynamic reloc list that should
3667    be used for local symbol ISYM, which is symbol number R_SYMNDX in
3668    ABFD's symbol table.  Return null if an error occurs.  */
3669 
3670 static struct elf_dyn_relocs **
3671 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3672 				   Elf_Internal_Sym *isym)
3673 {
3674   if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3675     {
3676       struct arm_local_iplt_info *local_iplt;
3677 
3678       local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3679       if (local_iplt == NULL)
3680 	return NULL;
3681       return &local_iplt->dyn_relocs;
3682     }
3683   else
3684     {
3685       /* Track dynamic relocs needed for local syms too.
3686 	 We really need local syms available to do this
3687 	 easily.  Oh well.  */
3688       asection *s;
3689       void *vpp;
3690 
3691       s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3692       if (s == NULL)
3693 	abort ();
3694 
3695       vpp = &elf_section_data (s)->local_dynrel;
3696       return (struct elf_dyn_relocs **) vpp;
3697     }
3698 }
3699 
3700 /* Initialize an entry in the stub hash table.  */
3701 
3702 static struct bfd_hash_entry *
3703 stub_hash_newfunc (struct bfd_hash_entry *entry,
3704 		   struct bfd_hash_table *table,
3705 		   const char *string)
3706 {
3707   /* Allocate the structure if it has not already been allocated by a
3708      subclass.  */
3709   if (entry == NULL)
3710     {
3711       entry = (struct bfd_hash_entry *)
3712 	  bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3713       if (entry == NULL)
3714 	return entry;
3715     }
3716 
3717   /* Call the allocation method of the superclass.  */
3718   entry = bfd_hash_newfunc (entry, table, string);
3719   if (entry != NULL)
3720     {
3721       struct elf32_arm_stub_hash_entry *eh;
3722 
3723       /* Initialize the local fields.  */
3724       eh = (struct elf32_arm_stub_hash_entry *) entry;
3725       eh->stub_sec = NULL;
3726       eh->stub_offset = (bfd_vma) -1;
3727       eh->source_value = 0;
3728       eh->target_value = 0;
3729       eh->target_section = NULL;
3730       eh->orig_insn = 0;
3731       eh->stub_type = arm_stub_none;
3732       eh->stub_size = 0;
3733       eh->stub_template = NULL;
3734       eh->stub_template_size = -1;
3735       eh->h = NULL;
3736       eh->id_sec = NULL;
3737       eh->output_name = NULL;
3738     }
3739 
3740   return entry;
3741 }
3742 
3743 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3744    shortcuts to them in our hash table.  */
3745 
3746 static bfd_boolean
3747 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3748 {
3749   struct elf32_arm_link_hash_table *htab;
3750 
3751   htab = elf32_arm_hash_table (info);
3752   if (htab == NULL)
3753     return FALSE;
3754 
3755   /* BPABI objects never have a GOT, or associated sections.  */
3756   if (htab->symbian_p)
3757     return TRUE;
3758 
3759   if (! _bfd_elf_create_got_section (dynobj, info))
3760     return FALSE;
3761 
3762   /* Also create .rofixup.  */
3763   if (htab->fdpic_p)
3764     {
3765       htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3766 						    (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3767 						     | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3768       if (htab->srofixup == NULL || ! bfd_set_section_alignment (dynobj, htab->srofixup, 2))
3769 	return FALSE;
3770     }
3771 
3772   return TRUE;
3773 }
3774 
3775 /* Create the .iplt, .rel(a).iplt and .igot.plt sections.  */
3776 
3777 static bfd_boolean
3778 create_ifunc_sections (struct bfd_link_info *info)
3779 {
3780   struct elf32_arm_link_hash_table *htab;
3781   const struct elf_backend_data *bed;
3782   bfd *dynobj;
3783   asection *s;
3784   flagword flags;
3785 
3786   htab = elf32_arm_hash_table (info);
3787   dynobj = htab->root.dynobj;
3788   bed = get_elf_backend_data (dynobj);
3789   flags = bed->dynamic_sec_flags;
3790 
3791   if (htab->root.iplt == NULL)
3792     {
3793       s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3794 					      flags | SEC_READONLY | SEC_CODE);
3795       if (s == NULL
3796 	  || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3797 	return FALSE;
3798       htab->root.iplt = s;
3799     }
3800 
3801   if (htab->root.irelplt == NULL)
3802     {
3803       s = bfd_make_section_anyway_with_flags (dynobj,
3804 					      RELOC_SECTION (htab, ".iplt"),
3805 					      flags | SEC_READONLY);
3806       if (s == NULL
3807 	  || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3808 	return FALSE;
3809       htab->root.irelplt = s;
3810     }
3811 
3812   if (htab->root.igotplt == NULL)
3813     {
3814       s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3815       if (s == NULL
3816 	  || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3817 	return FALSE;
3818       htab->root.igotplt = s;
3819     }
3820   return TRUE;
3821 }
3822 
3823 /* Determine if we're dealing with a Thumb only architecture.  */
3824 
3825 static bfd_boolean
3826 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3827 {
3828   int arch;
3829   int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3830 					  Tag_CPU_arch_profile);
3831 
3832   if (profile)
3833     return profile == 'M';
3834 
3835   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3836 
3837   /* Force return logic to be reviewed for each new architecture.  */
3838   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3839 
3840   if (arch == TAG_CPU_ARCH_V6_M
3841       || arch == TAG_CPU_ARCH_V6S_M
3842       || arch == TAG_CPU_ARCH_V7E_M
3843       || arch == TAG_CPU_ARCH_V8M_BASE
3844       || arch == TAG_CPU_ARCH_V8M_MAIN)
3845     return TRUE;
3846 
3847   return FALSE;
3848 }
3849 
3850 /* Determine if we're dealing with a Thumb-2 object.  */
3851 
3852 static bfd_boolean
3853 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3854 {
3855   int arch;
3856   int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3857 					    Tag_THUMB_ISA_use);
3858 
3859   if (thumb_isa)
3860     return thumb_isa == 2;
3861 
3862   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3863 
3864   /* Force return logic to be reviewed for each new architecture.  */
3865   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3866 
3867   return (arch == TAG_CPU_ARCH_V6T2
3868 	  || arch == TAG_CPU_ARCH_V7
3869 	  || arch == TAG_CPU_ARCH_V7E_M
3870 	  || arch == TAG_CPU_ARCH_V8
3871 	  || arch == TAG_CPU_ARCH_V8R
3872 	  || arch == TAG_CPU_ARCH_V8M_MAIN);
3873 }
3874 
3875 /* Determine whether Thumb-2 BL instruction is available.  */
3876 
3877 static bfd_boolean
3878 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3879 {
3880   int arch =
3881     bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3882 
3883   /* Force return logic to be reviewed for each new architecture.  */
3884   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
3885 
3886   /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M).  */
3887   return (arch == TAG_CPU_ARCH_V6T2
3888 	  || arch >= TAG_CPU_ARCH_V7);
3889 }
3890 
3891 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3892    .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3893    hash table.  */
3894 
3895 static bfd_boolean
3896 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3897 {
3898   struct elf32_arm_link_hash_table *htab;
3899 
3900   htab = elf32_arm_hash_table (info);
3901   if (htab == NULL)
3902     return FALSE;
3903 
3904   if (!htab->root.sgot && !create_got_section (dynobj, info))
3905     return FALSE;
3906 
3907   if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3908     return FALSE;
3909 
3910   if (htab->vxworks_p)
3911     {
3912       if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3913 	return FALSE;
3914 
3915       if (bfd_link_pic (info))
3916 	{
3917 	  htab->plt_header_size = 0;
3918 	  htab->plt_entry_size
3919 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3920 	}
3921       else
3922 	{
3923 	  htab->plt_header_size
3924 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3925 	  htab->plt_entry_size
3926 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3927 	}
3928 
3929       if (elf_elfheader (dynobj))
3930 	elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3931     }
3932   else
3933     {
3934       /* PR ld/16017
3935 	 Test for thumb only architectures.  Note - we cannot just call
3936 	 using_thumb_only() as the attributes in the output bfd have not been
3937 	 initialised at this point, so instead we use the input bfd.  */
3938       bfd * saved_obfd = htab->obfd;
3939 
3940       htab->obfd = dynobj;
3941       if (using_thumb_only (htab))
3942 	{
3943 	  htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3944 	  htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3945 	}
3946       htab->obfd = saved_obfd;
3947     }
3948 
3949   if (htab->fdpic_p) {
3950     htab->plt_header_size = 0;
3951     if (info->flags & DF_BIND_NOW)
3952       htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
3953     else
3954       htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
3955   }
3956 
3957   if (!htab->root.splt
3958       || !htab->root.srelplt
3959       || !htab->root.sdynbss
3960       || (!bfd_link_pic (info) && !htab->root.srelbss))
3961     abort ();
3962 
3963   return TRUE;
3964 }
3965 
3966 /* Copy the extra info we tack onto an elf_link_hash_entry.  */
3967 
3968 static void
3969 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3970 				struct elf_link_hash_entry *dir,
3971 				struct elf_link_hash_entry *ind)
3972 {
3973   struct elf32_arm_link_hash_entry *edir, *eind;
3974 
3975   edir = (struct elf32_arm_link_hash_entry *) dir;
3976   eind = (struct elf32_arm_link_hash_entry *) ind;
3977 
3978   if (eind->dyn_relocs != NULL)
3979     {
3980       if (edir->dyn_relocs != NULL)
3981 	{
3982 	  struct elf_dyn_relocs **pp;
3983 	  struct elf_dyn_relocs *p;
3984 
3985 	  /* Add reloc counts against the indirect sym to the direct sym
3986 	     list.  Merge any entries against the same section.  */
3987 	  for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3988 	    {
3989 	      struct elf_dyn_relocs *q;
3990 
3991 	      for (q = edir->dyn_relocs; q != NULL; q = q->next)
3992 		if (q->sec == p->sec)
3993 		  {
3994 		    q->pc_count += p->pc_count;
3995 		    q->count += p->count;
3996 		    *pp = p->next;
3997 		    break;
3998 		  }
3999 	      if (q == NULL)
4000 		pp = &p->next;
4001 	    }
4002 	  *pp = edir->dyn_relocs;
4003 	}
4004 
4005       edir->dyn_relocs = eind->dyn_relocs;
4006       eind->dyn_relocs = NULL;
4007     }
4008 
4009   if (ind->root.type == bfd_link_hash_indirect)
4010     {
4011       /* Copy over PLT info.  */
4012       edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4013       eind->plt.thumb_refcount = 0;
4014       edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4015       eind->plt.maybe_thumb_refcount = 0;
4016       edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4017       eind->plt.noncall_refcount = 0;
4018 
4019       /* Copy FDPIC counters.  */
4020       edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4021       edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4022       edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4023 
4024       /* We should only allocate a function to .iplt once the final
4025 	 symbol information is known.  */
4026       BFD_ASSERT (!eind->is_iplt);
4027 
4028       if (dir->got.refcount <= 0)
4029 	{
4030 	  edir->tls_type = eind->tls_type;
4031 	  eind->tls_type = GOT_UNKNOWN;
4032 	}
4033     }
4034 
4035   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4036 }
4037 
4038 /* Destroy an ARM elf linker hash table.  */
4039 
4040 static void
4041 elf32_arm_link_hash_table_free (bfd *obfd)
4042 {
4043   struct elf32_arm_link_hash_table *ret
4044     = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4045 
4046   bfd_hash_table_free (&ret->stub_hash_table);
4047   _bfd_elf_link_hash_table_free (obfd);
4048 }
4049 
4050 /* Create an ARM elf linker hash table.  */
4051 
4052 static struct bfd_link_hash_table *
4053 elf32_arm_link_hash_table_create (bfd *abfd)
4054 {
4055   struct elf32_arm_link_hash_table *ret;
4056   bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4057 
4058   ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4059   if (ret == NULL)
4060     return NULL;
4061 
4062   if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4063 				      elf32_arm_link_hash_newfunc,
4064 				      sizeof (struct elf32_arm_link_hash_entry),
4065 				      ARM_ELF_DATA))
4066     {
4067       free (ret);
4068       return NULL;
4069     }
4070 
4071   ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4072   ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4073 #ifdef FOUR_WORD_PLT
4074   ret->plt_header_size = 16;
4075   ret->plt_entry_size = 16;
4076 #else
4077   ret->plt_header_size = 20;
4078   ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4079 #endif
4080   ret->use_rel = TRUE;
4081   ret->obfd = abfd;
4082   ret->fdpic_p = 0;
4083 
4084   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4085 			    sizeof (struct elf32_arm_stub_hash_entry)))
4086     {
4087       _bfd_elf_link_hash_table_free (abfd);
4088       return NULL;
4089     }
4090   ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4091 
4092   return &ret->root.root;
4093 }
4094 
4095 /* Determine what kind of NOPs are available.  */
4096 
4097 static bfd_boolean
4098 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4099 {
4100   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4101 					     Tag_CPU_arch);
4102 
4103   /* Force return logic to be reviewed for each new architecture.  */
4104   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8M_MAIN);
4105 
4106   return (arch == TAG_CPU_ARCH_V6T2
4107 	  || arch == TAG_CPU_ARCH_V6K
4108 	  || arch == TAG_CPU_ARCH_V7
4109 	  || arch == TAG_CPU_ARCH_V8
4110 	  || arch == TAG_CPU_ARCH_V8R);
4111 }
4112 
4113 static bfd_boolean
4114 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4115 {
4116   switch (stub_type)
4117     {
4118     case arm_stub_long_branch_thumb_only:
4119     case arm_stub_long_branch_thumb2_only:
4120     case arm_stub_long_branch_thumb2_only_pure:
4121     case arm_stub_long_branch_v4t_thumb_arm:
4122     case arm_stub_short_branch_v4t_thumb_arm:
4123     case arm_stub_long_branch_v4t_thumb_arm_pic:
4124     case arm_stub_long_branch_v4t_thumb_tls_pic:
4125     case arm_stub_long_branch_thumb_only_pic:
4126     case arm_stub_cmse_branch_thumb_only:
4127       return TRUE;
4128     case arm_stub_none:
4129       BFD_FAIL ();
4130       return FALSE;
4131       break;
4132     default:
4133       return FALSE;
4134     }
4135 }
4136 
4137 /* Determine the type of stub needed, if any, for a call.  */
4138 
4139 static enum elf32_arm_stub_type
4140 arm_type_of_stub (struct bfd_link_info *info,
4141 		  asection *input_sec,
4142 		  const Elf_Internal_Rela *rel,
4143 		  unsigned char st_type,
4144 		  enum arm_st_branch_type *actual_branch_type,
4145 		  struct elf32_arm_link_hash_entry *hash,
4146 		  bfd_vma destination,
4147 		  asection *sym_sec,
4148 		  bfd *input_bfd,
4149 		  const char *name)
4150 {
4151   bfd_vma location;
4152   bfd_signed_vma branch_offset;
4153   unsigned int r_type;
4154   struct elf32_arm_link_hash_table * globals;
4155   bfd_boolean thumb2, thumb2_bl, thumb_only;
4156   enum elf32_arm_stub_type stub_type = arm_stub_none;
4157   int use_plt = 0;
4158   enum arm_st_branch_type branch_type = *actual_branch_type;
4159   union gotplt_union *root_plt;
4160   struct arm_plt_info *arm_plt;
4161   int arch;
4162   int thumb2_movw;
4163 
4164   if (branch_type == ST_BRANCH_LONG)
4165     return stub_type;
4166 
4167   globals = elf32_arm_hash_table (info);
4168   if (globals == NULL)
4169     return stub_type;
4170 
4171   thumb_only = using_thumb_only (globals);
4172   thumb2 = using_thumb2 (globals);
4173   thumb2_bl = using_thumb2_bl (globals);
4174 
4175   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4176 
4177   /* True for architectures that implement the thumb2 movw instruction.  */
4178   thumb2_movw = thumb2 || (arch  == TAG_CPU_ARCH_V8M_BASE);
4179 
4180   /* Determine where the call point is.  */
4181   location = (input_sec->output_offset
4182 	      + input_sec->output_section->vma
4183 	      + rel->r_offset);
4184 
4185   r_type = ELF32_R_TYPE (rel->r_info);
4186 
4187   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4188      are considering a function call relocation.  */
4189   if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4190 		     || r_type == R_ARM_THM_JUMP19)
4191       && branch_type == ST_BRANCH_TO_ARM)
4192     branch_type = ST_BRANCH_TO_THUMB;
4193 
4194   /* For TLS call relocs, it is the caller's responsibility to provide
4195      the address of the appropriate trampoline.  */
4196   if (r_type != R_ARM_TLS_CALL
4197       && r_type != R_ARM_THM_TLS_CALL
4198       && elf32_arm_get_plt_info (input_bfd, globals, hash,
4199 				 ELF32_R_SYM (rel->r_info), &root_plt,
4200 				 &arm_plt)
4201       && root_plt->offset != (bfd_vma) -1)
4202     {
4203       asection *splt;
4204 
4205       if (hash == NULL || hash->is_iplt)
4206 	splt = globals->root.iplt;
4207       else
4208 	splt = globals->root.splt;
4209       if (splt != NULL)
4210 	{
4211 	  use_plt = 1;
4212 
4213 	  /* Note when dealing with PLT entries: the main PLT stub is in
4214 	     ARM mode, so if the branch is in Thumb mode, another
4215 	     Thumb->ARM stub will be inserted later just before the ARM
4216 	     PLT stub. If a long branch stub is needed, we'll add a
4217 	     Thumb->Arm one and branch directly to the ARM PLT entry.
4218 	     Here, we have to check if a pre-PLT Thumb->ARM stub
4219 	     is needed and if it will be close enough.  */
4220 
4221 	  destination = (splt->output_section->vma
4222 			 + splt->output_offset
4223 			 + root_plt->offset);
4224 	  st_type = STT_FUNC;
4225 
4226 	  /* Thumb branch/call to PLT: it can become a branch to ARM
4227 	     or to Thumb. We must perform the same checks and
4228 	     corrections as in elf32_arm_final_link_relocate.  */
4229 	  if ((r_type == R_ARM_THM_CALL)
4230 	      || (r_type == R_ARM_THM_JUMP24))
4231 	    {
4232 	      if (globals->use_blx
4233 		  && r_type == R_ARM_THM_CALL
4234 		  && !thumb_only)
4235 		{
4236 		  /* If the Thumb BLX instruction is available, convert
4237 		     the BL to a BLX instruction to call the ARM-mode
4238 		     PLT entry.  */
4239 		  branch_type = ST_BRANCH_TO_ARM;
4240 		}
4241 	      else
4242 		{
4243 		  if (!thumb_only)
4244 		    /* Target the Thumb stub before the ARM PLT entry.  */
4245 		    destination -= PLT_THUMB_STUB_SIZE;
4246 		  branch_type = ST_BRANCH_TO_THUMB;
4247 		}
4248 	    }
4249 	  else
4250 	    {
4251 	      branch_type = ST_BRANCH_TO_ARM;
4252 	    }
4253 	}
4254     }
4255   /* Calls to STT_GNU_IFUNC symbols should go through a PLT.  */
4256   BFD_ASSERT (st_type != STT_GNU_IFUNC);
4257 
4258   branch_offset = (bfd_signed_vma)(destination - location);
4259 
4260   if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4261       || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4262     {
4263       /* Handle cases where:
4264 	 - this call goes too far (different Thumb/Thumb2 max
4265 	   distance)
4266 	 - it's a Thumb->Arm call and blx is not available, or it's a
4267 	   Thumb->Arm branch (not bl). A stub is needed in this case,
4268 	   but only if this call is not through a PLT entry. Indeed,
4269 	   PLT stubs handle mode switching already.  */
4270       if ((!thumb2_bl
4271 	    && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4272 		|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4273 	  || (thumb2_bl
4274 	      && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4275 		  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4276 	  || (thumb2
4277 	      && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4278 		  || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4279 	      && (r_type == R_ARM_THM_JUMP19))
4280 	  || (branch_type == ST_BRANCH_TO_ARM
4281 	      && (((r_type == R_ARM_THM_CALL
4282 		    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4283 		  || (r_type == R_ARM_THM_JUMP24)
4284 		  || (r_type == R_ARM_THM_JUMP19))
4285 	      && !use_plt))
4286 	{
4287 	  /* If we need to insert a Thumb-Thumb long branch stub to a
4288 	     PLT, use one that branches directly to the ARM PLT
4289 	     stub. If we pretended we'd use the pre-PLT Thumb->ARM
4290 	     stub, undo this now.  */
4291 	  if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4292 	    {
4293 	      branch_type = ST_BRANCH_TO_ARM;
4294 	      branch_offset += PLT_THUMB_STUB_SIZE;
4295 	    }
4296 
4297 	  if (branch_type == ST_BRANCH_TO_THUMB)
4298 	    {
4299 	      /* Thumb to thumb.  */
4300 	      if (!thumb_only)
4301 		{
4302 		  if (input_sec->flags & SEC_ELF_PURECODE)
4303 		    _bfd_error_handler
4304 		      (_("%pB(%pA): warning: long branch veneers used in"
4305 			 " section with SHF_ARM_PURECODE section"
4306 			 " attribute is only supported for M-profile"
4307 			 " targets that implement the movw instruction"),
4308 		       input_bfd, input_sec);
4309 
4310 		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4311 		    /* PIC stubs.  */
4312 		    ? ((globals->use_blx
4313 			&& (r_type == R_ARM_THM_CALL))
4314 		       /* V5T and above. Stub starts with ARM code, so
4315 			  we must be able to switch mode before
4316 			  reaching it, which is only possible for 'bl'
4317 			  (ie R_ARM_THM_CALL relocation).  */
4318 		       ? arm_stub_long_branch_any_thumb_pic
4319 		       /* On V4T, use Thumb code only.  */
4320 		       : arm_stub_long_branch_v4t_thumb_thumb_pic)
4321 
4322 		    /* non-PIC stubs.  */
4323 		    : ((globals->use_blx
4324 			&& (r_type == R_ARM_THM_CALL))
4325 		       /* V5T and above.  */
4326 		       ? arm_stub_long_branch_any_any
4327 		       /* V4T.  */
4328 		       : arm_stub_long_branch_v4t_thumb_thumb);
4329 		}
4330 	      else
4331 		{
4332 		  if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4333 		      stub_type = arm_stub_long_branch_thumb2_only_pure;
4334 		  else
4335 		    {
4336 		      if (input_sec->flags & SEC_ELF_PURECODE)
4337 			_bfd_error_handler
4338 			  (_("%pB(%pA): warning: long branch veneers used in"
4339 			     " section with SHF_ARM_PURECODE section"
4340 			     " attribute is only supported for M-profile"
4341 			     " targets that implement the movw instruction"),
4342 			   input_bfd, input_sec);
4343 
4344 		      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4345 			/* PIC stub.  */
4346 			? arm_stub_long_branch_thumb_only_pic
4347 			/* non-PIC stub.  */
4348 			: (thumb2 ? arm_stub_long_branch_thumb2_only
4349 				  : arm_stub_long_branch_thumb_only);
4350 		    }
4351 		}
4352 	    }
4353 	  else
4354 	    {
4355 	      if (input_sec->flags & SEC_ELF_PURECODE)
4356 		_bfd_error_handler
4357 		  (_("%pB(%pA): warning: long branch veneers used in"
4358 		     " section with SHF_ARM_PURECODE section"
4359 		     " attribute is only supported" " for M-profile"
4360 		     " targets that implement the movw instruction"),
4361 		   input_bfd, input_sec);
4362 
4363 	      /* Thumb to arm.  */
4364 	      if (sym_sec != NULL
4365 		  && sym_sec->owner != NULL
4366 		  && !INTERWORK_FLAG (sym_sec->owner))
4367 		{
4368 		  _bfd_error_handler
4369 		    (_("%pB(%s): warning: interworking not enabled;"
4370 		       " first occurrence: %pB: %s call to %s"),
4371 		     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4372 		}
4373 
4374 	      stub_type =
4375 		(bfd_link_pic (info) | globals->pic_veneer)
4376 		/* PIC stubs.  */
4377 		? (r_type == R_ARM_THM_TLS_CALL
4378 		   /* TLS PIC stubs.  */
4379 		   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4380 		      : arm_stub_long_branch_v4t_thumb_tls_pic)
4381 		   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4382 		      /* V5T PIC and above.  */
4383 		      ? arm_stub_long_branch_any_arm_pic
4384 		      /* V4T PIC stub.  */
4385 		      : arm_stub_long_branch_v4t_thumb_arm_pic))
4386 
4387 		/* non-PIC stubs.  */
4388 		: ((globals->use_blx && r_type == R_ARM_THM_CALL)
4389 		   /* V5T and above.  */
4390 		   ? arm_stub_long_branch_any_any
4391 		   /* V4T.  */
4392 		   : arm_stub_long_branch_v4t_thumb_arm);
4393 
4394 	      /* Handle v4t short branches.  */
4395 	      if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4396 		  && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4397 		  && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4398 		stub_type = arm_stub_short_branch_v4t_thumb_arm;
4399 	    }
4400 	}
4401     }
4402   else if (r_type == R_ARM_CALL
4403 	   || r_type == R_ARM_JUMP24
4404 	   || r_type == R_ARM_PLT32
4405 	   || r_type == R_ARM_TLS_CALL)
4406     {
4407       if (input_sec->flags & SEC_ELF_PURECODE)
4408 	_bfd_error_handler
4409 	  (_("%pB(%pA): warning: long branch veneers used in"
4410 	     " section with SHF_ARM_PURECODE section"
4411 	     " attribute is only supported for M-profile"
4412 	     " targets that implement the movw instruction"),
4413 	   input_bfd, input_sec);
4414       if (branch_type == ST_BRANCH_TO_THUMB)
4415 	{
4416 	  /* Arm to thumb.  */
4417 
4418 	  if (sym_sec != NULL
4419 	      && sym_sec->owner != NULL
4420 	      && !INTERWORK_FLAG (sym_sec->owner))
4421 	    {
4422 	      _bfd_error_handler
4423 		(_("%pB(%s): warning: interworking not enabled;"
4424 		   " first occurrence: %pB: %s call to %s"),
4425 		 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4426 	    }
4427 
4428 	  /* We have an extra 2-bytes reach because of
4429 	     the mode change (bit 24 (H) of BLX encoding).  */
4430 	  if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4431 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4432 	      || (r_type == R_ARM_CALL && !globals->use_blx)
4433 	      || (r_type == R_ARM_JUMP24)
4434 	      || (r_type == R_ARM_PLT32))
4435 	    {
4436 	      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4437 		/* PIC stubs.  */
4438 		? ((globals->use_blx)
4439 		   /* V5T and above.  */
4440 		   ? arm_stub_long_branch_any_thumb_pic
4441 		   /* V4T stub.  */
4442 		   : arm_stub_long_branch_v4t_arm_thumb_pic)
4443 
4444 		/* non-PIC stubs.  */
4445 		: ((globals->use_blx)
4446 		   /* V5T and above.  */
4447 		   ? arm_stub_long_branch_any_any
4448 		   /* V4T.  */
4449 		   : arm_stub_long_branch_v4t_arm_thumb);
4450 	    }
4451 	}
4452       else
4453 	{
4454 	  /* Arm to arm.  */
4455 	  if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4456 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4457 	    {
4458 	      stub_type =
4459 		(bfd_link_pic (info) | globals->pic_veneer)
4460 		/* PIC stubs.  */
4461 		? (r_type == R_ARM_TLS_CALL
4462 		   /* TLS PIC Stub.  */
4463 		   ? arm_stub_long_branch_any_tls_pic
4464 		   : (globals->nacl_p
4465 		      ? arm_stub_long_branch_arm_nacl_pic
4466 		      : arm_stub_long_branch_any_arm_pic))
4467 		/* non-PIC stubs.  */
4468 		: (globals->nacl_p
4469 		   ? arm_stub_long_branch_arm_nacl
4470 		   : arm_stub_long_branch_any_any);
4471 	    }
4472 	}
4473     }
4474 
4475   /* If a stub is needed, record the actual destination type.  */
4476   if (stub_type != arm_stub_none)
4477     *actual_branch_type = branch_type;
4478 
4479   return stub_type;
4480 }
4481 
4482 /* Build a name for an entry in the stub hash table.  */
4483 
4484 static char *
4485 elf32_arm_stub_name (const asection *input_section,
4486 		     const asection *sym_sec,
4487 		     const struct elf32_arm_link_hash_entry *hash,
4488 		     const Elf_Internal_Rela *rel,
4489 		     enum elf32_arm_stub_type stub_type)
4490 {
4491   char *stub_name;
4492   bfd_size_type len;
4493 
4494   if (hash)
4495     {
4496       len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4497       stub_name = (char *) bfd_malloc (len);
4498       if (stub_name != NULL)
4499 	sprintf (stub_name, "%08x_%s+%x_%d",
4500 		 input_section->id & 0xffffffff,
4501 		 hash->root.root.root.string,
4502 		 (int) rel->r_addend & 0xffffffff,
4503 		 (int) stub_type);
4504     }
4505   else
4506     {
4507       len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4508       stub_name = (char *) bfd_malloc (len);
4509       if (stub_name != NULL)
4510 	sprintf (stub_name, "%08x_%x:%x+%x_%d",
4511 		 input_section->id & 0xffffffff,
4512 		 sym_sec->id & 0xffffffff,
4513 		 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4514 		 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4515 		 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4516 		 (int) rel->r_addend & 0xffffffff,
4517 		 (int) stub_type);
4518     }
4519 
4520   return stub_name;
4521 }
4522 
4523 /* Look up an entry in the stub hash.  Stub entries are cached because
4524    creating the stub name takes a bit of time.  */
4525 
4526 static struct elf32_arm_stub_hash_entry *
4527 elf32_arm_get_stub_entry (const asection *input_section,
4528 			  const asection *sym_sec,
4529 			  struct elf_link_hash_entry *hash,
4530 			  const Elf_Internal_Rela *rel,
4531 			  struct elf32_arm_link_hash_table *htab,
4532 			  enum elf32_arm_stub_type stub_type)
4533 {
4534   struct elf32_arm_stub_hash_entry *stub_entry;
4535   struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4536   const asection *id_sec;
4537 
4538   if ((input_section->flags & SEC_CODE) == 0)
4539     return NULL;
4540 
4541   /* If this input section is part of a group of sections sharing one
4542      stub section, then use the id of the first section in the group.
4543      Stub names need to include a section id, as there may well be
4544      more than one stub used to reach say, printf, and we need to
4545      distinguish between them.  */
4546   BFD_ASSERT (input_section->id <= htab->top_id);
4547   id_sec = htab->stub_group[input_section->id].link_sec;
4548 
4549   if (h != NULL && h->stub_cache != NULL
4550       && h->stub_cache->h == h
4551       && h->stub_cache->id_sec == id_sec
4552       && h->stub_cache->stub_type == stub_type)
4553     {
4554       stub_entry = h->stub_cache;
4555     }
4556   else
4557     {
4558       char *stub_name;
4559 
4560       stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4561       if (stub_name == NULL)
4562 	return NULL;
4563 
4564       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4565 					stub_name, FALSE, FALSE);
4566       if (h != NULL)
4567 	h->stub_cache = stub_entry;
4568 
4569       free (stub_name);
4570     }
4571 
4572   return stub_entry;
4573 }
4574 
4575 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4576    section.  */
4577 
4578 static bfd_boolean
4579 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4580 {
4581   if (stub_type >= max_stub_type)
4582     abort ();  /* Should be unreachable.  */
4583 
4584   switch (stub_type)
4585     {
4586     case arm_stub_cmse_branch_thumb_only:
4587       return TRUE;
4588 
4589     default:
4590       return FALSE;
4591     }
4592 
4593   abort ();  /* Should be unreachable.  */
4594 }
4595 
4596 /* Required alignment (as a power of 2) for the dedicated section holding
4597    veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4598    with input sections.  */
4599 
4600 static int
4601 arm_dedicated_stub_output_section_required_alignment
4602   (enum elf32_arm_stub_type stub_type)
4603 {
4604   if (stub_type >= max_stub_type)
4605     abort ();  /* Should be unreachable.  */
4606 
4607   switch (stub_type)
4608     {
4609     /* Vectors of Secure Gateway veneers must be aligned on 32byte
4610        boundary.  */
4611     case arm_stub_cmse_branch_thumb_only:
4612       return 5;
4613 
4614     default:
4615       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4616       return 0;
4617     }
4618 
4619   abort ();  /* Should be unreachable.  */
4620 }
4621 
4622 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4623    NULL if veneers of this type are interspersed with input sections.  */
4624 
4625 static const char *
4626 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4627 {
4628   if (stub_type >= max_stub_type)
4629     abort ();  /* Should be unreachable.  */
4630 
4631   switch (stub_type)
4632     {
4633     case arm_stub_cmse_branch_thumb_only:
4634       return ".gnu.sgstubs";
4635 
4636     default:
4637       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4638       return NULL;
4639     }
4640 
4641   abort ();  /* Should be unreachable.  */
4642 }
4643 
4644 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4645    returns the address of the hash table field in HTAB holding a pointer to the
4646    corresponding input section.  Otherwise, returns NULL.  */
4647 
4648 static asection **
4649 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4650 				      enum elf32_arm_stub_type stub_type)
4651 {
4652   if (stub_type >= max_stub_type)
4653     abort ();  /* Should be unreachable.  */
4654 
4655   switch (stub_type)
4656     {
4657     case arm_stub_cmse_branch_thumb_only:
4658       return &htab->cmse_stub_sec;
4659 
4660     default:
4661       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4662       return NULL;
4663     }
4664 
4665   abort ();  /* Should be unreachable.  */
4666 }
4667 
4668 /* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
4669    is the section that branch into veneer and can be NULL if stub should go in
4670    a dedicated output section.  Returns a pointer to the stub section, and the
4671    section to which the stub section will be attached (in *LINK_SEC_P).
4672    LINK_SEC_P may be NULL.  */
4673 
4674 static asection *
4675 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4676 				   struct elf32_arm_link_hash_table *htab,
4677 				   enum elf32_arm_stub_type stub_type)
4678 {
4679   asection *link_sec, *out_sec, **stub_sec_p;
4680   const char *stub_sec_prefix;
4681   bfd_boolean dedicated_output_section =
4682     arm_dedicated_stub_output_section_required (stub_type);
4683   int align;
4684 
4685   if (dedicated_output_section)
4686     {
4687       bfd *output_bfd = htab->obfd;
4688       const char *out_sec_name =
4689 	arm_dedicated_stub_output_section_name (stub_type);
4690       link_sec = NULL;
4691       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4692       stub_sec_prefix = out_sec_name;
4693       align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4694       out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4695       if (out_sec == NULL)
4696 	{
4697 	  _bfd_error_handler (_("no address assigned to the veneers output "
4698 				"section %s"), out_sec_name);
4699 	  return NULL;
4700 	}
4701     }
4702   else
4703     {
4704       BFD_ASSERT (section->id <= htab->top_id);
4705       link_sec = htab->stub_group[section->id].link_sec;
4706       BFD_ASSERT (link_sec != NULL);
4707       stub_sec_p = &htab->stub_group[section->id].stub_sec;
4708       if (*stub_sec_p == NULL)
4709 	stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4710       stub_sec_prefix = link_sec->name;
4711       out_sec = link_sec->output_section;
4712       align = htab->nacl_p ? 4 : 3;
4713     }
4714 
4715   if (*stub_sec_p == NULL)
4716     {
4717       size_t namelen;
4718       bfd_size_type len;
4719       char *s_name;
4720 
4721       namelen = strlen (stub_sec_prefix);
4722       len = namelen + sizeof (STUB_SUFFIX);
4723       s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4724       if (s_name == NULL)
4725 	return NULL;
4726 
4727       memcpy (s_name, stub_sec_prefix, namelen);
4728       memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4729       *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4730 					       align);
4731       if (*stub_sec_p == NULL)
4732 	return NULL;
4733 
4734       out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4735 			| SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4736 			| SEC_KEEP;
4737     }
4738 
4739   if (!dedicated_output_section)
4740     htab->stub_group[section->id].stub_sec = *stub_sec_p;
4741 
4742   if (link_sec_p)
4743     *link_sec_p = link_sec;
4744 
4745   return *stub_sec_p;
4746 }
4747 
4748 /* Add a new stub entry to the stub hash.  Not all fields of the new
4749    stub entry are initialised.  */
4750 
4751 static struct elf32_arm_stub_hash_entry *
4752 elf32_arm_add_stub (const char *stub_name, asection *section,
4753 		    struct elf32_arm_link_hash_table *htab,
4754 		    enum elf32_arm_stub_type stub_type)
4755 {
4756   asection *link_sec;
4757   asection *stub_sec;
4758   struct elf32_arm_stub_hash_entry *stub_entry;
4759 
4760   stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4761 						stub_type);
4762   if (stub_sec == NULL)
4763     return NULL;
4764 
4765   /* Enter this entry into the linker stub hash table.  */
4766   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4767 				     TRUE, FALSE);
4768   if (stub_entry == NULL)
4769     {
4770       if (section == NULL)
4771 	section = stub_sec;
4772       _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4773 			  section->owner, stub_name);
4774       return NULL;
4775     }
4776 
4777   stub_entry->stub_sec = stub_sec;
4778   stub_entry->stub_offset = (bfd_vma) -1;
4779   stub_entry->id_sec = link_sec;
4780 
4781   return stub_entry;
4782 }
4783 
4784 /* Store an Arm insn into an output section not processed by
4785    elf32_arm_write_section.  */
4786 
4787 static void
4788 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4789 	      bfd * output_bfd, bfd_vma val, void * ptr)
4790 {
4791   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4792     bfd_putl32 (val, ptr);
4793   else
4794     bfd_putb32 (val, ptr);
4795 }
4796 
4797 /* Store a 16-bit Thumb insn into an output section not processed by
4798    elf32_arm_write_section.  */
4799 
4800 static void
4801 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4802 		bfd * output_bfd, bfd_vma val, void * ptr)
4803 {
4804   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4805     bfd_putl16 (val, ptr);
4806   else
4807     bfd_putb16 (val, ptr);
4808 }
4809 
4810 /* Store a Thumb2 insn into an output section not processed by
4811    elf32_arm_write_section.  */
4812 
4813 static void
4814 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4815 		 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4816 {
4817   /* T2 instructions are 16-bit streamed.  */
4818   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4819     {
4820       bfd_putl16 ((val >> 16) & 0xffff, ptr);
4821       bfd_putl16 ((val & 0xffff), ptr + 2);
4822     }
4823   else
4824     {
4825       bfd_putb16 ((val >> 16) & 0xffff, ptr);
4826       bfd_putb16 ((val & 0xffff), ptr + 2);
4827     }
4828 }
4829 
4830 /* If it's possible to change R_TYPE to a more efficient access
4831    model, return the new reloc type.  */
4832 
4833 static unsigned
4834 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4835 			  struct elf_link_hash_entry *h)
4836 {
4837   int is_local = (h == NULL);
4838 
4839   if (bfd_link_pic (info)
4840       || (h && h->root.type == bfd_link_hash_undefweak))
4841     return r_type;
4842 
4843   /* We do not support relaxations for Old TLS models.  */
4844   switch (r_type)
4845     {
4846     case R_ARM_TLS_GOTDESC:
4847     case R_ARM_TLS_CALL:
4848     case R_ARM_THM_TLS_CALL:
4849     case R_ARM_TLS_DESCSEQ:
4850     case R_ARM_THM_TLS_DESCSEQ:
4851       return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4852     }
4853 
4854   return r_type;
4855 }
4856 
4857 static bfd_reloc_status_type elf32_arm_final_link_relocate
4858   (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4859    Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4860    const char *, unsigned char, enum arm_st_branch_type,
4861    struct elf_link_hash_entry *, bfd_boolean *, char **);
4862 
4863 static unsigned int
4864 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4865 {
4866   switch (stub_type)
4867     {
4868     case arm_stub_a8_veneer_b_cond:
4869     case arm_stub_a8_veneer_b:
4870     case arm_stub_a8_veneer_bl:
4871       return 2;
4872 
4873     case arm_stub_long_branch_any_any:
4874     case arm_stub_long_branch_v4t_arm_thumb:
4875     case arm_stub_long_branch_thumb_only:
4876     case arm_stub_long_branch_thumb2_only:
4877     case arm_stub_long_branch_thumb2_only_pure:
4878     case arm_stub_long_branch_v4t_thumb_thumb:
4879     case arm_stub_long_branch_v4t_thumb_arm:
4880     case arm_stub_short_branch_v4t_thumb_arm:
4881     case arm_stub_long_branch_any_arm_pic:
4882     case arm_stub_long_branch_any_thumb_pic:
4883     case arm_stub_long_branch_v4t_thumb_thumb_pic:
4884     case arm_stub_long_branch_v4t_arm_thumb_pic:
4885     case arm_stub_long_branch_v4t_thumb_arm_pic:
4886     case arm_stub_long_branch_thumb_only_pic:
4887     case arm_stub_long_branch_any_tls_pic:
4888     case arm_stub_long_branch_v4t_thumb_tls_pic:
4889     case arm_stub_cmse_branch_thumb_only:
4890     case arm_stub_a8_veneer_blx:
4891       return 4;
4892 
4893     case arm_stub_long_branch_arm_nacl:
4894     case arm_stub_long_branch_arm_nacl_pic:
4895       return 16;
4896 
4897     default:
4898       abort ();  /* Should be unreachable.  */
4899     }
4900 }
4901 
4902 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4903    veneering (TRUE) or have their own symbol (FALSE).  */
4904 
4905 static bfd_boolean
4906 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4907 {
4908   if (stub_type >= max_stub_type)
4909     abort ();  /* Should be unreachable.  */
4910 
4911   switch (stub_type)
4912     {
4913     case arm_stub_cmse_branch_thumb_only:
4914       return TRUE;
4915 
4916     default:
4917       return FALSE;
4918     }
4919 
4920   abort ();  /* Should be unreachable.  */
4921 }
4922 
4923 /* Returns the padding needed for the dedicated section used stubs of type
4924    STUB_TYPE.  */
4925 
4926 static int
4927 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4928 {
4929   if (stub_type >= max_stub_type)
4930     abort ();  /* Should be unreachable.  */
4931 
4932   switch (stub_type)
4933     {
4934     case arm_stub_cmse_branch_thumb_only:
4935       return 32;
4936 
4937     default:
4938       return 0;
4939     }
4940 
4941   abort ();  /* Should be unreachable.  */
4942 }
4943 
4944 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4945    returns the address of the hash table field in HTAB holding the offset at
4946    which new veneers should be layed out in the stub section.  */
4947 
4948 static bfd_vma*
4949 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
4950 				enum elf32_arm_stub_type stub_type)
4951 {
4952   switch (stub_type)
4953     {
4954     case arm_stub_cmse_branch_thumb_only:
4955       return &htab->new_cmse_stub_offset;
4956 
4957     default:
4958       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4959       return NULL;
4960     }
4961 }
4962 
4963 static bfd_boolean
4964 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4965 		    void * in_arg)
4966 {
4967 #define MAXRELOCS 3
4968   bfd_boolean removed_sg_veneer;
4969   struct elf32_arm_stub_hash_entry *stub_entry;
4970   struct elf32_arm_link_hash_table *globals;
4971   struct bfd_link_info *info;
4972   asection *stub_sec;
4973   bfd *stub_bfd;
4974   bfd_byte *loc;
4975   bfd_vma sym_value;
4976   int template_size;
4977   int size;
4978   const insn_sequence *template_sequence;
4979   int i;
4980   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4981   int stub_reloc_offset[MAXRELOCS] = {0, 0};
4982   int nrelocs = 0;
4983   int just_allocated = 0;
4984 
4985   /* Massage our args to the form they really have.  */
4986   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4987   info = (struct bfd_link_info *) in_arg;
4988 
4989   globals = elf32_arm_hash_table (info);
4990   if (globals == NULL)
4991     return FALSE;
4992 
4993   stub_sec = stub_entry->stub_sec;
4994 
4995   if ((globals->fix_cortex_a8 < 0)
4996       != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4997     /* We have to do less-strictly-aligned fixes last.  */
4998     return TRUE;
4999 
5000   /* Assign a slot at the end of section if none assigned yet.  */
5001   if (stub_entry->stub_offset == (bfd_vma) -1)
5002     {
5003       stub_entry->stub_offset = stub_sec->size;
5004       just_allocated = 1;
5005     }
5006   loc = stub_sec->contents + stub_entry->stub_offset;
5007 
5008   stub_bfd = stub_sec->owner;
5009 
5010   /* This is the address of the stub destination.  */
5011   sym_value = (stub_entry->target_value
5012 	       + stub_entry->target_section->output_offset
5013 	       + stub_entry->target_section->output_section->vma);
5014 
5015   template_sequence = stub_entry->stub_template;
5016   template_size = stub_entry->stub_template_size;
5017 
5018   size = 0;
5019   for (i = 0; i < template_size; i++)
5020     {
5021       switch (template_sequence[i].type)
5022 	{
5023 	case THUMB16_TYPE:
5024 	  {
5025 	    bfd_vma data = (bfd_vma) template_sequence[i].data;
5026 	    if (template_sequence[i].reloc_addend != 0)
5027 	      {
5028 		/* We've borrowed the reloc_addend field to mean we should
5029 		   insert a condition code into this (Thumb-1 branch)
5030 		   instruction.  See THUMB16_BCOND_INSN.  */
5031 		BFD_ASSERT ((data & 0xff00) == 0xd000);
5032 		data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5033 	      }
5034 	    bfd_put_16 (stub_bfd, data, loc + size);
5035 	    size += 2;
5036 	  }
5037 	  break;
5038 
5039 	case THUMB32_TYPE:
5040 	  bfd_put_16 (stub_bfd,
5041 		      (template_sequence[i].data >> 16) & 0xffff,
5042 		      loc + size);
5043 	  bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5044 		      loc + size + 2);
5045 	  if (template_sequence[i].r_type != R_ARM_NONE)
5046 	    {
5047 	      stub_reloc_idx[nrelocs] = i;
5048 	      stub_reloc_offset[nrelocs++] = size;
5049 	    }
5050 	  size += 4;
5051 	  break;
5052 
5053 	case ARM_TYPE:
5054 	  bfd_put_32 (stub_bfd, template_sequence[i].data,
5055 		      loc + size);
5056 	  /* Handle cases where the target is encoded within the
5057 	     instruction.  */
5058 	  if (template_sequence[i].r_type == R_ARM_JUMP24)
5059 	    {
5060 	      stub_reloc_idx[nrelocs] = i;
5061 	      stub_reloc_offset[nrelocs++] = size;
5062 	    }
5063 	  size += 4;
5064 	  break;
5065 
5066 	case DATA_TYPE:
5067 	  bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5068 	  stub_reloc_idx[nrelocs] = i;
5069 	  stub_reloc_offset[nrelocs++] = size;
5070 	  size += 4;
5071 	  break;
5072 
5073 	default:
5074 	  BFD_FAIL ();
5075 	  return FALSE;
5076 	}
5077     }
5078 
5079   if (just_allocated)
5080     stub_sec->size += size;
5081 
5082   /* Stub size has already been computed in arm_size_one_stub. Check
5083      consistency.  */
5084   BFD_ASSERT (size == stub_entry->stub_size);
5085 
5086   /* Destination is Thumb. Force bit 0 to 1 to reflect this.  */
5087   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5088     sym_value |= 1;
5089 
5090   /* Assume non empty slots have at least one and at most MAXRELOCS entries
5091      to relocate in each stub.  */
5092   removed_sg_veneer =
5093     (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5094   BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5095 
5096   for (i = 0; i < nrelocs; i++)
5097     {
5098       Elf_Internal_Rela rel;
5099       bfd_boolean unresolved_reloc;
5100       char *error_message;
5101       bfd_vma points_to =
5102 	sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5103 
5104       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5105       rel.r_info = ELF32_R_INFO (0,
5106 				 template_sequence[stub_reloc_idx[i]].r_type);
5107       rel.r_addend = 0;
5108 
5109       if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5110 	/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5111 	   template should refer back to the instruction after the original
5112 	   branch.  We use target_section as Cortex-A8 erratum workaround stubs
5113 	   are only generated when both source and target are in the same
5114 	   section.  */
5115 	points_to = stub_entry->target_section->output_section->vma
5116 		    + stub_entry->target_section->output_offset
5117 		    + stub_entry->source_value;
5118 
5119       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5120 	  (template_sequence[stub_reloc_idx[i]].r_type),
5121 	   stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5122 	   points_to, info, stub_entry->target_section, "", STT_FUNC,
5123 	   stub_entry->branch_type,
5124 	   (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5125 	   &error_message);
5126     }
5127 
5128   return TRUE;
5129 #undef MAXRELOCS
5130 }
5131 
5132 /* Calculate the template, template size and instruction size for a stub.
5133    Return value is the instruction size.  */
5134 
5135 static unsigned int
5136 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5137 			     const insn_sequence **stub_template,
5138 			     int *stub_template_size)
5139 {
5140   const insn_sequence *template_sequence = NULL;
5141   int template_size = 0, i;
5142   unsigned int size;
5143 
5144   template_sequence = stub_definitions[stub_type].template_sequence;
5145   if (stub_template)
5146     *stub_template = template_sequence;
5147 
5148   template_size = stub_definitions[stub_type].template_size;
5149   if (stub_template_size)
5150     *stub_template_size = template_size;
5151 
5152   size = 0;
5153   for (i = 0; i < template_size; i++)
5154     {
5155       switch (template_sequence[i].type)
5156 	{
5157 	case THUMB16_TYPE:
5158 	  size += 2;
5159 	  break;
5160 
5161 	case ARM_TYPE:
5162 	case THUMB32_TYPE:
5163 	case DATA_TYPE:
5164 	  size += 4;
5165 	  break;
5166 
5167 	default:
5168 	  BFD_FAIL ();
5169 	  return 0;
5170 	}
5171     }
5172 
5173   return size;
5174 }
5175 
5176 /* As above, but don't actually build the stub.  Just bump offset so
5177    we know stub section sizes.  */
5178 
5179 static bfd_boolean
5180 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5181 		   void *in_arg ATTRIBUTE_UNUSED)
5182 {
5183   struct elf32_arm_stub_hash_entry *stub_entry;
5184   const insn_sequence *template_sequence;
5185   int template_size, size;
5186 
5187   /* Massage our args to the form they really have.  */
5188   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5189 
5190   BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5191 	     && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5192 
5193   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5194 				      &template_size);
5195 
5196   /* Initialized to -1.  Null size indicates an empty slot full of zeros.  */
5197   if (stub_entry->stub_template_size)
5198     {
5199       stub_entry->stub_size = size;
5200       stub_entry->stub_template = template_sequence;
5201       stub_entry->stub_template_size = template_size;
5202     }
5203 
5204   /* Already accounted for.  */
5205   if (stub_entry->stub_offset != (bfd_vma) -1)
5206     return TRUE;
5207 
5208   size = (size + 7) & ~7;
5209   stub_entry->stub_sec->size += size;
5210 
5211   return TRUE;
5212 }
5213 
5214 /* External entry points for sizing and building linker stubs.  */
5215 
5216 /* Set up various things so that we can make a list of input sections
5217    for each output section included in the link.  Returns -1 on error,
5218    0 when no stubs will be needed, and 1 on success.  */
5219 
5220 int
5221 elf32_arm_setup_section_lists (bfd *output_bfd,
5222 			       struct bfd_link_info *info)
5223 {
5224   bfd *input_bfd;
5225   unsigned int bfd_count;
5226   unsigned int top_id, top_index;
5227   asection *section;
5228   asection **input_list, **list;
5229   bfd_size_type amt;
5230   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5231 
5232   if (htab == NULL)
5233     return 0;
5234   if (! is_elf_hash_table (htab))
5235     return 0;
5236 
5237   /* Count the number of input BFDs and find the top input section id.  */
5238   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5239        input_bfd != NULL;
5240        input_bfd = input_bfd->link.next)
5241     {
5242       bfd_count += 1;
5243       for (section = input_bfd->sections;
5244 	   section != NULL;
5245 	   section = section->next)
5246 	{
5247 	  if (top_id < section->id)
5248 	    top_id = section->id;
5249 	}
5250     }
5251   htab->bfd_count = bfd_count;
5252 
5253   amt = sizeof (struct map_stub) * (top_id + 1);
5254   htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5255   if (htab->stub_group == NULL)
5256     return -1;
5257   htab->top_id = top_id;
5258 
5259   /* We can't use output_bfd->section_count here to find the top output
5260      section index as some sections may have been removed, and
5261      _bfd_strip_section_from_output doesn't renumber the indices.  */
5262   for (section = output_bfd->sections, top_index = 0;
5263        section != NULL;
5264        section = section->next)
5265     {
5266       if (top_index < section->index)
5267 	top_index = section->index;
5268     }
5269 
5270   htab->top_index = top_index;
5271   amt = sizeof (asection *) * (top_index + 1);
5272   input_list = (asection **) bfd_malloc (amt);
5273   htab->input_list = input_list;
5274   if (input_list == NULL)
5275     return -1;
5276 
5277   /* For sections we aren't interested in, mark their entries with a
5278      value we can check later.  */
5279   list = input_list + top_index;
5280   do
5281     *list = bfd_abs_section_ptr;
5282   while (list-- != input_list);
5283 
5284   for (section = output_bfd->sections;
5285        section != NULL;
5286        section = section->next)
5287     {
5288       if ((section->flags & SEC_CODE) != 0)
5289 	input_list[section->index] = NULL;
5290     }
5291 
5292   return 1;
5293 }
5294 
5295 /* The linker repeatedly calls this function for each input section,
5296    in the order that input sections are linked into output sections.
5297    Build lists of input sections to determine groupings between which
5298    we may insert linker stubs.  */
5299 
5300 void
5301 elf32_arm_next_input_section (struct bfd_link_info *info,
5302 			      asection *isec)
5303 {
5304   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5305 
5306   if (htab == NULL)
5307     return;
5308 
5309   if (isec->output_section->index <= htab->top_index)
5310     {
5311       asection **list = htab->input_list + isec->output_section->index;
5312 
5313       if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5314 	{
5315 	  /* Steal the link_sec pointer for our list.  */
5316 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5317 	  /* This happens to make the list in reverse order,
5318 	     which we reverse later.  */
5319 	  PREV_SEC (isec) = *list;
5320 	  *list = isec;
5321 	}
5322     }
5323 }
5324 
5325 /* See whether we can group stub sections together.  Grouping stub
5326    sections may result in fewer stubs.  More importantly, we need to
5327    put all .init* and .fini* stubs at the end of the .init or
5328    .fini output sections respectively, because glibc splits the
5329    _init and _fini functions into multiple parts.  Putting a stub in
5330    the middle of a function is not a good idea.  */
5331 
5332 static void
5333 group_sections (struct elf32_arm_link_hash_table *htab,
5334 		bfd_size_type stub_group_size,
5335 		bfd_boolean stubs_always_after_branch)
5336 {
5337   asection **list = htab->input_list;
5338 
5339   do
5340     {
5341       asection *tail = *list;
5342       asection *head;
5343 
5344       if (tail == bfd_abs_section_ptr)
5345 	continue;
5346 
5347       /* Reverse the list: we must avoid placing stubs at the
5348 	 beginning of the section because the beginning of the text
5349 	 section may be required for an interrupt vector in bare metal
5350 	 code.  */
5351 #define NEXT_SEC PREV_SEC
5352       head = NULL;
5353       while (tail != NULL)
5354 	{
5355 	  /* Pop from tail.  */
5356 	  asection *item = tail;
5357 	  tail = PREV_SEC (item);
5358 
5359 	  /* Push on head.  */
5360 	  NEXT_SEC (item) = head;
5361 	  head = item;
5362 	}
5363 
5364       while (head != NULL)
5365 	{
5366 	  asection *curr;
5367 	  asection *next;
5368 	  bfd_vma stub_group_start = head->output_offset;
5369 	  bfd_vma end_of_next;
5370 
5371 	  curr = head;
5372 	  while (NEXT_SEC (curr) != NULL)
5373 	    {
5374 	      next = NEXT_SEC (curr);
5375 	      end_of_next = next->output_offset + next->size;
5376 	      if (end_of_next - stub_group_start >= stub_group_size)
5377 		/* End of NEXT is too far from start, so stop.  */
5378 		break;
5379 	      /* Add NEXT to the group.  */
5380 	      curr = next;
5381 	    }
5382 
5383 	  /* OK, the size from the start to the start of CURR is less
5384 	     than stub_group_size and thus can be handled by one stub
5385 	     section.  (Or the head section is itself larger than
5386 	     stub_group_size, in which case we may be toast.)
5387 	     We should really be keeping track of the total size of
5388 	     stubs added here, as stubs contribute to the final output
5389 	     section size.  */
5390 	  do
5391 	    {
5392 	      next = NEXT_SEC (head);
5393 	      /* Set up this stub group.  */
5394 	      htab->stub_group[head->id].link_sec = curr;
5395 	    }
5396 	  while (head != curr && (head = next) != NULL);
5397 
5398 	  /* But wait, there's more!  Input sections up to stub_group_size
5399 	     bytes after the stub section can be handled by it too.  */
5400 	  if (!stubs_always_after_branch)
5401 	    {
5402 	      stub_group_start = curr->output_offset + curr->size;
5403 
5404 	      while (next != NULL)
5405 		{
5406 		  end_of_next = next->output_offset + next->size;
5407 		  if (end_of_next - stub_group_start >= stub_group_size)
5408 		    /* End of NEXT is too far from stubs, so stop.  */
5409 		    break;
5410 		  /* Add NEXT to the stub group.  */
5411 		  head = next;
5412 		  next = NEXT_SEC (head);
5413 		  htab->stub_group[head->id].link_sec = curr;
5414 		}
5415 	    }
5416 	  head = next;
5417 	}
5418     }
5419   while (list++ != htab->input_list + htab->top_index);
5420 
5421   free (htab->input_list);
5422 #undef PREV_SEC
5423 #undef NEXT_SEC
5424 }
5425 
5426 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5427    erratum fix.  */
5428 
5429 static int
5430 a8_reloc_compare (const void *a, const void *b)
5431 {
5432   const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5433   const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5434 
5435   if (ra->from < rb->from)
5436     return -1;
5437   else if (ra->from > rb->from)
5438     return 1;
5439   else
5440     return 0;
5441 }
5442 
5443 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5444 						    const char *, char **);
5445 
5446 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5447    branch/TLB erratum.  Fill in the table described by A8_FIXES_P,
5448    NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P.  Returns true if an error occurs, false
5449    otherwise.  */
5450 
5451 static bfd_boolean
5452 cortex_a8_erratum_scan (bfd *input_bfd,
5453 			struct bfd_link_info *info,
5454 			struct a8_erratum_fix **a8_fixes_p,
5455 			unsigned int *num_a8_fixes_p,
5456 			unsigned int *a8_fix_table_size_p,
5457 			struct a8_erratum_reloc *a8_relocs,
5458 			unsigned int num_a8_relocs,
5459 			unsigned prev_num_a8_fixes,
5460 			bfd_boolean *stub_changed_p)
5461 {
5462   asection *section;
5463   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5464   struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5465   unsigned int num_a8_fixes = *num_a8_fixes_p;
5466   unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5467 
5468   if (htab == NULL)
5469     return FALSE;
5470 
5471   for (section = input_bfd->sections;
5472        section != NULL;
5473        section = section->next)
5474     {
5475       bfd_byte *contents = NULL;
5476       struct _arm_elf_section_data *sec_data;
5477       unsigned int span;
5478       bfd_vma base_vma;
5479 
5480       if (elf_section_type (section) != SHT_PROGBITS
5481 	  || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5482 	  || (section->flags & SEC_EXCLUDE) != 0
5483 	  || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5484 	  || (section->output_section == bfd_abs_section_ptr))
5485 	continue;
5486 
5487       base_vma = section->output_section->vma + section->output_offset;
5488 
5489       if (elf_section_data (section)->this_hdr.contents != NULL)
5490 	contents = elf_section_data (section)->this_hdr.contents;
5491       else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5492 	return TRUE;
5493 
5494       sec_data = elf32_arm_section_data (section);
5495 
5496       for (span = 0; span < sec_data->mapcount; span++)
5497 	{
5498 	  unsigned int span_start = sec_data->map[span].vma;
5499 	  unsigned int span_end = (span == sec_data->mapcount - 1)
5500 	    ? section->size : sec_data->map[span + 1].vma;
5501 	  unsigned int i;
5502 	  char span_type = sec_data->map[span].type;
5503 	  bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5504 
5505 	  if (span_type != 't')
5506 	    continue;
5507 
5508 	  /* Span is entirely within a single 4KB region: skip scanning.  */
5509 	  if (((base_vma + span_start) & ~0xfff)
5510 	      == ((base_vma + span_end) & ~0xfff))
5511 	    continue;
5512 
5513 	  /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5514 
5515 	       * The opcode is BLX.W, BL.W, B.W, Bcc.W
5516 	       * The branch target is in the same 4KB region as the
5517 		 first half of the branch.
5518 	       * The instruction before the branch is a 32-bit
5519 		 length non-branch instruction.  */
5520 	  for (i = span_start; i < span_end;)
5521 	    {
5522 	      unsigned int insn = bfd_getl16 (&contents[i]);
5523 	      bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5524 	      bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5525 
5526 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5527 		insn_32bit = TRUE;
5528 
5529 	      if (insn_32bit)
5530 		{
5531 		  /* Load the rest of the insn (in manual-friendly order).  */
5532 		  insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5533 
5534 		  /* Encoding T4: B<c>.W.  */
5535 		  is_b = (insn & 0xf800d000) == 0xf0009000;
5536 		  /* Encoding T1: BL<c>.W.  */
5537 		  is_bl = (insn & 0xf800d000) == 0xf000d000;
5538 		  /* Encoding T2: BLX<c>.W.  */
5539 		  is_blx = (insn & 0xf800d000) == 0xf000c000;
5540 		  /* Encoding T3: B<c>.W (not permitted in IT block).  */
5541 		  is_bcc = (insn & 0xf800d000) == 0xf0008000
5542 			   && (insn & 0x07f00000) != 0x03800000;
5543 		}
5544 
5545 	      is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5546 
5547 	      if (((base_vma + i) & 0xfff) == 0xffe
5548 		  && insn_32bit
5549 		  && is_32bit_branch
5550 		  && last_was_32bit
5551 		  && ! last_was_branch)
5552 		{
5553 		  bfd_signed_vma offset = 0;
5554 		  bfd_boolean force_target_arm = FALSE;
5555 		  bfd_boolean force_target_thumb = FALSE;
5556 		  bfd_vma target;
5557 		  enum elf32_arm_stub_type stub_type = arm_stub_none;
5558 		  struct a8_erratum_reloc key, *found;
5559 		  bfd_boolean use_plt = FALSE;
5560 
5561 		  key.from = base_vma + i;
5562 		  found = (struct a8_erratum_reloc *)
5563 		      bsearch (&key, a8_relocs, num_a8_relocs,
5564 			       sizeof (struct a8_erratum_reloc),
5565 			       &a8_reloc_compare);
5566 
5567 		  if (found)
5568 		    {
5569 		      char *error_message = NULL;
5570 		      struct elf_link_hash_entry *entry;
5571 
5572 		      /* We don't care about the error returned from this
5573 			 function, only if there is glue or not.  */
5574 		      entry = find_thumb_glue (info, found->sym_name,
5575 					       &error_message);
5576 
5577 		      if (entry)
5578 			found->non_a8_stub = TRUE;
5579 
5580 		      /* Keep a simpler condition, for the sake of clarity.  */
5581 		      if (htab->root.splt != NULL && found->hash != NULL
5582 			  && found->hash->root.plt.offset != (bfd_vma) -1)
5583 			use_plt = TRUE;
5584 
5585 		      if (found->r_type == R_ARM_THM_CALL)
5586 			{
5587 			  if (found->branch_type == ST_BRANCH_TO_ARM
5588 			      || use_plt)
5589 			    force_target_arm = TRUE;
5590 			  else
5591 			    force_target_thumb = TRUE;
5592 			}
5593 		    }
5594 
5595 		  /* Check if we have an offending branch instruction.  */
5596 
5597 		  if (found && found->non_a8_stub)
5598 		    /* We've already made a stub for this instruction, e.g.
5599 		       it's a long branch or a Thumb->ARM stub.  Assume that
5600 		       stub will suffice to work around the A8 erratum (see
5601 		       setting of always_after_branch above).  */
5602 		    ;
5603 		  else if (is_bcc)
5604 		    {
5605 		      offset = (insn & 0x7ff) << 1;
5606 		      offset |= (insn & 0x3f0000) >> 4;
5607 		      offset |= (insn & 0x2000) ? 0x40000 : 0;
5608 		      offset |= (insn & 0x800) ? 0x80000 : 0;
5609 		      offset |= (insn & 0x4000000) ? 0x100000 : 0;
5610 		      if (offset & 0x100000)
5611 			offset |= ~ ((bfd_signed_vma) 0xfffff);
5612 		      stub_type = arm_stub_a8_veneer_b_cond;
5613 		    }
5614 		  else if (is_b || is_bl || is_blx)
5615 		    {
5616 		      int s = (insn & 0x4000000) != 0;
5617 		      int j1 = (insn & 0x2000) != 0;
5618 		      int j2 = (insn & 0x800) != 0;
5619 		      int i1 = !(j1 ^ s);
5620 		      int i2 = !(j2 ^ s);
5621 
5622 		      offset = (insn & 0x7ff) << 1;
5623 		      offset |= (insn & 0x3ff0000) >> 4;
5624 		      offset |= i2 << 22;
5625 		      offset |= i1 << 23;
5626 		      offset |= s << 24;
5627 		      if (offset & 0x1000000)
5628 			offset |= ~ ((bfd_signed_vma) 0xffffff);
5629 
5630 		      if (is_blx)
5631 			offset &= ~ ((bfd_signed_vma) 3);
5632 
5633 		      stub_type = is_blx ? arm_stub_a8_veneer_blx :
5634 			is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5635 		    }
5636 
5637 		  if (stub_type != arm_stub_none)
5638 		    {
5639 		      bfd_vma pc_for_insn = base_vma + i + 4;
5640 
5641 		      /* The original instruction is a BL, but the target is
5642 			 an ARM instruction.  If we were not making a stub,
5643 			 the BL would have been converted to a BLX.  Use the
5644 			 BLX stub instead in that case.  */
5645 		      if (htab->use_blx && force_target_arm
5646 			  && stub_type == arm_stub_a8_veneer_bl)
5647 			{
5648 			  stub_type = arm_stub_a8_veneer_blx;
5649 			  is_blx = TRUE;
5650 			  is_bl = FALSE;
5651 			}
5652 		      /* Conversely, if the original instruction was
5653 			 BLX but the target is Thumb mode, use the BL
5654 			 stub.  */
5655 		      else if (force_target_thumb
5656 			       && stub_type == arm_stub_a8_veneer_blx)
5657 			{
5658 			  stub_type = arm_stub_a8_veneer_bl;
5659 			  is_blx = FALSE;
5660 			  is_bl = TRUE;
5661 			}
5662 
5663 		      if (is_blx)
5664 			pc_for_insn &= ~ ((bfd_vma) 3);
5665 
5666 		      /* If we found a relocation, use the proper destination,
5667 			 not the offset in the (unrelocated) instruction.
5668 			 Note this is always done if we switched the stub type
5669 			 above.  */
5670 		      if (found)
5671 			offset =
5672 			  (bfd_signed_vma) (found->destination - pc_for_insn);
5673 
5674 		      /* If the stub will use a Thumb-mode branch to a
5675 			 PLT target, redirect it to the preceding Thumb
5676 			 entry point.  */
5677 		      if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5678 			offset -= PLT_THUMB_STUB_SIZE;
5679 
5680 		      target = pc_for_insn + offset;
5681 
5682 		      /* The BLX stub is ARM-mode code.  Adjust the offset to
5683 			 take the different PC value (+8 instead of +4) into
5684 			 account.  */
5685 		      if (stub_type == arm_stub_a8_veneer_blx)
5686 			offset += 4;
5687 
5688 		      if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5689 			{
5690 			  char *stub_name = NULL;
5691 
5692 			  if (num_a8_fixes == a8_fix_table_size)
5693 			    {
5694 			      a8_fix_table_size *= 2;
5695 			      a8_fixes = (struct a8_erratum_fix *)
5696 				  bfd_realloc (a8_fixes,
5697 					       sizeof (struct a8_erratum_fix)
5698 					       * a8_fix_table_size);
5699 			    }
5700 
5701 			  if (num_a8_fixes < prev_num_a8_fixes)
5702 			    {
5703 			      /* If we're doing a subsequent scan,
5704 				 check if we've found the same fix as
5705 				 before, and try and reuse the stub
5706 				 name.  */
5707 			      stub_name = a8_fixes[num_a8_fixes].stub_name;
5708 			      if ((a8_fixes[num_a8_fixes].section != section)
5709 				  || (a8_fixes[num_a8_fixes].offset != i))
5710 				{
5711 				  free (stub_name);
5712 				  stub_name = NULL;
5713 				  *stub_changed_p = TRUE;
5714 				}
5715 			    }
5716 
5717 			  if (!stub_name)
5718 			    {
5719 			      stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5720 			      if (stub_name != NULL)
5721 				sprintf (stub_name, "%x:%x", section->id, i);
5722 			    }
5723 
5724 			  a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5725 			  a8_fixes[num_a8_fixes].section = section;
5726 			  a8_fixes[num_a8_fixes].offset = i;
5727 			  a8_fixes[num_a8_fixes].target_offset =
5728 			    target - base_vma;
5729 			  a8_fixes[num_a8_fixes].orig_insn = insn;
5730 			  a8_fixes[num_a8_fixes].stub_name = stub_name;
5731 			  a8_fixes[num_a8_fixes].stub_type = stub_type;
5732 			  a8_fixes[num_a8_fixes].branch_type =
5733 			    is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5734 
5735 			  num_a8_fixes++;
5736 			}
5737 		    }
5738 		}
5739 
5740 	      i += insn_32bit ? 4 : 2;
5741 	      last_was_32bit = insn_32bit;
5742 	      last_was_branch = is_32bit_branch;
5743 	    }
5744 	}
5745 
5746       if (elf_section_data (section)->this_hdr.contents == NULL)
5747 	free (contents);
5748     }
5749 
5750   *a8_fixes_p = a8_fixes;
5751   *num_a8_fixes_p = num_a8_fixes;
5752   *a8_fix_table_size_p = a8_fix_table_size;
5753 
5754   return FALSE;
5755 }
5756 
5757 /* Create or update a stub entry depending on whether the stub can already be
5758    found in HTAB.  The stub is identified by:
5759    - its type STUB_TYPE
5760    - its source branch (note that several can share the same stub) whose
5761      section and relocation (if any) are given by SECTION and IRELA
5762      respectively
5763    - its target symbol whose input section, hash, name, value and branch type
5764      are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5765      respectively
5766 
5767    If found, the value of the stub's target symbol is updated from SYM_VALUE
5768    and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
5769    TRUE and the stub entry is initialized.
5770 
5771    Returns the stub that was created or updated, or NULL if an error
5772    occurred.  */
5773 
5774 static struct elf32_arm_stub_hash_entry *
5775 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5776 		       enum elf32_arm_stub_type stub_type, asection *section,
5777 		       Elf_Internal_Rela *irela, asection *sym_sec,
5778 		       struct elf32_arm_link_hash_entry *hash, char *sym_name,
5779 		       bfd_vma sym_value, enum arm_st_branch_type branch_type,
5780 		       bfd_boolean *new_stub)
5781 {
5782   const asection *id_sec;
5783   char *stub_name;
5784   struct elf32_arm_stub_hash_entry *stub_entry;
5785   unsigned int r_type;
5786   bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5787 
5788   BFD_ASSERT (stub_type != arm_stub_none);
5789   *new_stub = FALSE;
5790 
5791   if (sym_claimed)
5792     stub_name = sym_name;
5793   else
5794     {
5795       BFD_ASSERT (irela);
5796       BFD_ASSERT (section);
5797       BFD_ASSERT (section->id <= htab->top_id);
5798 
5799       /* Support for grouping stub sections.  */
5800       id_sec = htab->stub_group[section->id].link_sec;
5801 
5802       /* Get the name of this stub.  */
5803       stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5804 				       stub_type);
5805       if (!stub_name)
5806 	return NULL;
5807     }
5808 
5809   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5810 				     FALSE);
5811   /* The proper stub has already been created, just update its value.  */
5812   if (stub_entry != NULL)
5813     {
5814       if (!sym_claimed)
5815 	free (stub_name);
5816       stub_entry->target_value = sym_value;
5817       return stub_entry;
5818     }
5819 
5820   stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5821   if (stub_entry == NULL)
5822     {
5823       if (!sym_claimed)
5824 	free (stub_name);
5825       return NULL;
5826     }
5827 
5828   stub_entry->target_value = sym_value;
5829   stub_entry->target_section = sym_sec;
5830   stub_entry->stub_type = stub_type;
5831   stub_entry->h = hash;
5832   stub_entry->branch_type = branch_type;
5833 
5834   if (sym_claimed)
5835     stub_entry->output_name = sym_name;
5836   else
5837     {
5838       if (sym_name == NULL)
5839 	sym_name = "unnamed";
5840       stub_entry->output_name = (char *)
5841 	bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5842 				   + strlen (sym_name));
5843       if (stub_entry->output_name == NULL)
5844 	{
5845 	  free (stub_name);
5846 	  return NULL;
5847 	}
5848 
5849       /* For historical reasons, use the existing names for ARM-to-Thumb and
5850 	 Thumb-to-ARM stubs.  */
5851       r_type = ELF32_R_TYPE (irela->r_info);
5852       if ((r_type == (unsigned int) R_ARM_THM_CALL
5853 	   || r_type == (unsigned int) R_ARM_THM_JUMP24
5854 	   || r_type == (unsigned int) R_ARM_THM_JUMP19)
5855 	  && branch_type == ST_BRANCH_TO_ARM)
5856 	sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5857       else if ((r_type == (unsigned int) R_ARM_CALL
5858 		|| r_type == (unsigned int) R_ARM_JUMP24)
5859 	       && branch_type == ST_BRANCH_TO_THUMB)
5860 	sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5861       else
5862 	sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5863     }
5864 
5865   *new_stub = TRUE;
5866   return stub_entry;
5867 }
5868 
5869 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5870    gateway veneer to transition from non secure to secure state and create them
5871    accordingly.
5872 
5873    "ARMv8-M Security Extensions: Requirements on Development Tools" document
5874    defines the conditions that govern Secure Gateway veneer creation for a
5875    given symbol <SYM> as follows:
5876    - it has function type
5877    - it has non local binding
5878    - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5879      same type, binding and value as <SYM> (called normal symbol).
5880    An entry function can handle secure state transition itself in which case
5881    its special symbol would have a different value from the normal symbol.
5882 
5883    OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5884    entry mapping while HTAB gives the name to hash entry mapping.
5885    *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5886    created.
5887 
5888    The return value gives whether a stub failed to be allocated.  */
5889 
5890 static bfd_boolean
5891 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5892 	   obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5893 	   int *cmse_stub_created)
5894 {
5895   const struct elf_backend_data *bed;
5896   Elf_Internal_Shdr *symtab_hdr;
5897   unsigned i, j, sym_count, ext_start;
5898   Elf_Internal_Sym *cmse_sym, *local_syms;
5899   struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5900   enum arm_st_branch_type branch_type;
5901   char *sym_name, *lsym_name;
5902   bfd_vma sym_value;
5903   asection *section;
5904   struct elf32_arm_stub_hash_entry *stub_entry;
5905   bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5906 
5907   bed = get_elf_backend_data (input_bfd);
5908   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5909   sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5910   ext_start = symtab_hdr->sh_info;
5911   is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5912 	    && out_attr[Tag_CPU_arch_profile].i == 'M');
5913 
5914   local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5915   if (local_syms == NULL)
5916     local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5917 				       symtab_hdr->sh_info, 0, NULL, NULL,
5918 				       NULL);
5919   if (symtab_hdr->sh_info && local_syms == NULL)
5920     return FALSE;
5921 
5922   /* Scan symbols.  */
5923   for (i = 0; i < sym_count; i++)
5924     {
5925       cmse_invalid = FALSE;
5926 
5927       if (i < ext_start)
5928 	{
5929 	  cmse_sym = &local_syms[i];
5930 	  /* Not a special symbol.  */
5931 	  if (!ARM_GET_SYM_CMSE_SPCL (cmse_sym->st_target_internal))
5932 	    continue;
5933 	  sym_name = bfd_elf_string_from_elf_section (input_bfd,
5934 						      symtab_hdr->sh_link,
5935 						      cmse_sym->st_name);
5936 	  /* Special symbol with local binding.  */
5937 	  cmse_invalid = TRUE;
5938 	}
5939       else
5940 	{
5941 	  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
5942 	  sym_name = (char *) cmse_hash->root.root.root.string;
5943 
5944 	  /* Not a special symbol.  */
5945 	  if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
5946 	    continue;
5947 
5948 	  /* Special symbol has incorrect binding or type.  */
5949 	  if ((cmse_hash->root.root.type != bfd_link_hash_defined
5950 	       && cmse_hash->root.root.type != bfd_link_hash_defweak)
5951 	      || cmse_hash->root.type != STT_FUNC)
5952 	    cmse_invalid = TRUE;
5953 	}
5954 
5955       if (!is_v8m)
5956 	{
5957 	  _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
5958 				"ARMv8-M architecture or later"),
5959 			      input_bfd, sym_name);
5960 	  is_v8m = TRUE; /* Avoid multiple warning.  */
5961 	  ret = FALSE;
5962 	}
5963 
5964       if (cmse_invalid)
5965 	{
5966 	  _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
5967 				" a global or weak function symbol"),
5968 			      input_bfd, sym_name);
5969 	  ret = FALSE;
5970 	  if (i < ext_start)
5971 	    continue;
5972 	}
5973 
5974       sym_name += strlen (CMSE_PREFIX);
5975       hash = (struct elf32_arm_link_hash_entry *)
5976 	elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
5977 
5978       /* No associated normal symbol or it is neither global nor weak.  */
5979       if (!hash
5980 	  || (hash->root.root.type != bfd_link_hash_defined
5981 	      && hash->root.root.type != bfd_link_hash_defweak)
5982 	  || hash->root.type != STT_FUNC)
5983 	{
5984 	  /* Initialize here to avoid warning about use of possibly
5985 	     uninitialized variable.  */
5986 	  j = 0;
5987 
5988 	  if (!hash)
5989 	    {
5990 	      /* Searching for a normal symbol with local binding.  */
5991 	      for (; j < ext_start; j++)
5992 		{
5993 		  lsym_name =
5994 		    bfd_elf_string_from_elf_section (input_bfd,
5995 						     symtab_hdr->sh_link,
5996 						     local_syms[j].st_name);
5997 		  if (!strcmp (sym_name, lsym_name))
5998 		    break;
5999 		}
6000 	    }
6001 
6002 	  if (hash || j < ext_start)
6003 	    {
6004 	      _bfd_error_handler
6005 		(_("%pB: invalid standard symbol `%s'; it must be "
6006 		   "a global or weak function symbol"),
6007 		 input_bfd, sym_name);
6008 	    }
6009 	  else
6010 	    _bfd_error_handler
6011 	      (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6012 	  ret = FALSE;
6013 	  if (!hash)
6014 	    continue;
6015 	}
6016 
6017       sym_value = hash->root.root.u.def.value;
6018       section = hash->root.root.u.def.section;
6019 
6020       if (cmse_hash->root.root.u.def.section != section)
6021 	{
6022 	  _bfd_error_handler
6023 	    (_("%pB: `%s' and its special symbol are in different sections"),
6024 	     input_bfd, sym_name);
6025 	  ret = FALSE;
6026 	}
6027       if (cmse_hash->root.root.u.def.value != sym_value)
6028 	continue; /* Ignore: could be an entry function starting with SG.  */
6029 
6030 	/* If this section is a link-once section that will be discarded, then
6031 	   don't create any stubs.  */
6032       if (section->output_section == NULL)
6033 	{
6034 	  _bfd_error_handler
6035 	    (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6036 	  continue;
6037 	}
6038 
6039       if (hash->root.size == 0)
6040 	{
6041 	  _bfd_error_handler
6042 	    (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6043 	  ret = FALSE;
6044 	}
6045 
6046       if (!ret)
6047 	continue;
6048       branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6049       stub_entry
6050 	= elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6051 				 NULL, NULL, section, hash, sym_name,
6052 				 sym_value, branch_type, &new_stub);
6053 
6054       if (stub_entry == NULL)
6055 	 ret = FALSE;
6056       else
6057 	{
6058 	  BFD_ASSERT (new_stub);
6059 	  (*cmse_stub_created)++;
6060 	}
6061     }
6062 
6063   if (!symtab_hdr->contents)
6064     free (local_syms);
6065   return ret;
6066 }
6067 
6068 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6069    code entry function, ie can be called from non secure code without using a
6070    veneer.  */
6071 
6072 static bfd_boolean
6073 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6074 {
6075   bfd_byte contents[4];
6076   uint32_t first_insn;
6077   asection *section;
6078   file_ptr offset;
6079   bfd *abfd;
6080 
6081   /* Defined symbol of function type.  */
6082   if (hash->root.root.type != bfd_link_hash_defined
6083       && hash->root.root.type != bfd_link_hash_defweak)
6084     return FALSE;
6085   if (hash->root.type != STT_FUNC)
6086     return FALSE;
6087 
6088   /* Read first instruction.  */
6089   section = hash->root.root.u.def.section;
6090   abfd = section->owner;
6091   offset = hash->root.root.u.def.value - section->vma;
6092   if (!bfd_get_section_contents (abfd, section, contents, offset,
6093 				 sizeof (contents)))
6094     return FALSE;
6095 
6096   first_insn = bfd_get_32 (abfd, contents);
6097 
6098   /* Starts by SG instruction.  */
6099   return first_insn == 0xe97fe97f;
6100 }
6101 
6102 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6103    secure gateway veneers (ie. the veneers was not in the input import library)
6104    and there is no output import library (GEN_INFO->out_implib_bfd is NULL.  */
6105 
6106 static bfd_boolean
6107 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6108 {
6109   struct elf32_arm_stub_hash_entry *stub_entry;
6110   struct bfd_link_info *info;
6111 
6112   /* Massage our args to the form they really have.  */
6113   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6114   info = (struct bfd_link_info *) gen_info;
6115 
6116   if (info->out_implib_bfd)
6117     return TRUE;
6118 
6119   if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6120     return TRUE;
6121 
6122   if (stub_entry->stub_offset == (bfd_vma) -1)
6123     _bfd_error_handler ("  %s", stub_entry->output_name);
6124 
6125   return TRUE;
6126 }
6127 
6128 /* Set offset of each secure gateway veneers so that its address remain
6129    identical to the one in the input import library referred by
6130    HTAB->in_implib_bfd.  A warning is issued for veneers that disappeared
6131    (present in input import library but absent from the executable being
6132    linked) or if new veneers appeared and there is no output import library
6133    (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6134    number of secure gateway veneers found in the input import library.
6135 
6136    The function returns whether an error occurred.  If no error occurred,
6137    *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6138    and this function and HTAB->new_cmse_stub_offset is set to the biggest
6139    veneer observed set for new veneers to be layed out after.  */
6140 
6141 static bfd_boolean
6142 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6143 				  struct elf32_arm_link_hash_table *htab,
6144 				  int *cmse_stub_created)
6145 {
6146   long symsize;
6147   char *sym_name;
6148   flagword flags;
6149   long i, symcount;
6150   bfd *in_implib_bfd;
6151   asection *stub_out_sec;
6152   bfd_boolean ret = TRUE;
6153   Elf_Internal_Sym *intsym;
6154   const char *out_sec_name;
6155   bfd_size_type cmse_stub_size;
6156   asymbol **sympp = NULL, *sym;
6157   struct elf32_arm_link_hash_entry *hash;
6158   const insn_sequence *cmse_stub_template;
6159   struct elf32_arm_stub_hash_entry *stub_entry;
6160   int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6161   bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6162   bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6163 
6164   /* No input secure gateway import library.  */
6165   if (!htab->in_implib_bfd)
6166     return TRUE;
6167 
6168   in_implib_bfd = htab->in_implib_bfd;
6169   if (!htab->cmse_implib)
6170     {
6171       _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6172 			    "Gateway import libraries"), in_implib_bfd);
6173       return FALSE;
6174     }
6175 
6176   /* Get symbol table size.  */
6177   symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6178   if (symsize < 0)
6179     return FALSE;
6180 
6181   /* Read in the input secure gateway import library's symbol table.  */
6182   sympp = (asymbol **) xmalloc (symsize);
6183   symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6184   if (symcount < 0)
6185     {
6186       ret = FALSE;
6187       goto free_sym_buf;
6188     }
6189 
6190   htab->new_cmse_stub_offset = 0;
6191   cmse_stub_size =
6192     find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6193 				 &cmse_stub_template,
6194 				 &cmse_stub_template_size);
6195   out_sec_name =
6196     arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6197   stub_out_sec =
6198     bfd_get_section_by_name (htab->obfd, out_sec_name);
6199   if (stub_out_sec != NULL)
6200     cmse_stub_sec_vma = stub_out_sec->vma;
6201 
6202   /* Set addresses of veneers mentionned in input secure gateway import
6203      library's symbol table.  */
6204   for (i = 0; i < symcount; i++)
6205     {
6206       sym = sympp[i];
6207       flags = sym->flags;
6208       sym_name = (char *) bfd_asymbol_name (sym);
6209       intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6210 
6211       if (sym->section != bfd_abs_section_ptr
6212 	  || !(flags & (BSF_GLOBAL | BSF_WEAK))
6213 	  || (flags & BSF_FUNCTION) != BSF_FUNCTION
6214 	  || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6215 	      != ST_BRANCH_TO_THUMB))
6216 	{
6217 	  _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6218 				"symbol should be absolute, global and "
6219 				"refer to Thumb functions"),
6220 			      in_implib_bfd, sym_name);
6221 	  ret = FALSE;
6222 	  continue;
6223 	}
6224 
6225       veneer_value = bfd_asymbol_value (sym);
6226       stub_offset = veneer_value - cmse_stub_sec_vma;
6227       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6228 					 FALSE, FALSE);
6229       hash = (struct elf32_arm_link_hash_entry *)
6230 	elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6231 
6232       /* Stub entry should have been created by cmse_scan or the symbol be of
6233 	 a secure function callable from non secure code.  */
6234       if (!stub_entry && !hash)
6235 	{
6236 	  bfd_boolean new_stub;
6237 
6238 	  _bfd_error_handler
6239 	    (_("entry function `%s' disappeared from secure code"), sym_name);
6240 	  hash = (struct elf32_arm_link_hash_entry *)
6241 	    elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6242 	  stub_entry
6243 	    = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6244 				     NULL, NULL, bfd_abs_section_ptr, hash,
6245 				     sym_name, veneer_value,
6246 				     ST_BRANCH_TO_THUMB, &new_stub);
6247 	  if (stub_entry == NULL)
6248 	    ret = FALSE;
6249 	  else
6250 	  {
6251 	    BFD_ASSERT (new_stub);
6252 	    new_cmse_stubs_created++;
6253 	    (*cmse_stub_created)++;
6254 	  }
6255 	  stub_entry->stub_template_size = stub_entry->stub_size = 0;
6256 	  stub_entry->stub_offset = stub_offset;
6257 	}
6258       /* Symbol found is not callable from non secure code.  */
6259       else if (!stub_entry)
6260 	{
6261 	  if (!cmse_entry_fct_p (hash))
6262 	    {
6263 	      _bfd_error_handler (_("`%s' refers to a non entry function"),
6264 				  sym_name);
6265 	      ret = FALSE;
6266 	    }
6267 	  continue;
6268 	}
6269       else
6270 	{
6271 	  /* Only stubs for SG veneers should have been created.  */
6272 	  BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6273 
6274 	  /* Check visibility hasn't changed.  */
6275 	  if (!!(flags & BSF_GLOBAL)
6276 	      != (hash->root.root.type == bfd_link_hash_defined))
6277 	    _bfd_error_handler
6278 	      (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6279 	       sym_name);
6280 
6281 	  stub_entry->stub_offset = stub_offset;
6282 	}
6283 
6284       /* Size should match that of a SG veneer.  */
6285       if (intsym->st_size != cmse_stub_size)
6286 	{
6287 	  _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6288 			      in_implib_bfd, sym_name);
6289 	  ret = FALSE;
6290 	}
6291 
6292       /* Previous veneer address is before current SG veneer section.  */
6293       if (veneer_value < cmse_stub_sec_vma)
6294 	{
6295 	  /* Avoid offset underflow.  */
6296 	  if (stub_entry)
6297 	    stub_entry->stub_offset = 0;
6298 	  stub_offset = 0;
6299 	  ret = FALSE;
6300 	}
6301 
6302       /* Complain if stub offset not a multiple of stub size.  */
6303       if (stub_offset % cmse_stub_size)
6304 	{
6305 	  _bfd_error_handler
6306 	    (_("offset of veneer for entry function `%s' not a multiple of "
6307 	       "its size"), sym_name);
6308 	  ret = FALSE;
6309 	}
6310 
6311       if (!ret)
6312 	continue;
6313 
6314       new_cmse_stubs_created--;
6315       if (veneer_value < cmse_stub_array_start)
6316 	cmse_stub_array_start = veneer_value;
6317       next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6318       if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6319 	htab->new_cmse_stub_offset = next_cmse_stub_offset;
6320     }
6321 
6322   if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6323     {
6324       BFD_ASSERT (new_cmse_stubs_created > 0);
6325       _bfd_error_handler
6326 	(_("new entry function(s) introduced but no output import library "
6327 	   "specified:"));
6328       bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6329     }
6330 
6331   if (cmse_stub_array_start != cmse_stub_sec_vma)
6332     {
6333       _bfd_error_handler
6334 	(_("start address of `%s' is different from previous link"),
6335 	 out_sec_name);
6336       ret = FALSE;
6337     }
6338 
6339 free_sym_buf:
6340   free (sympp);
6341   return ret;
6342 }
6343 
6344 /* Determine and set the size of the stub section for a final link.
6345 
6346    The basic idea here is to examine all the relocations looking for
6347    PC-relative calls to a target that is unreachable with a "bl"
6348    instruction.  */
6349 
6350 bfd_boolean
6351 elf32_arm_size_stubs (bfd *output_bfd,
6352 		      bfd *stub_bfd,
6353 		      struct bfd_link_info *info,
6354 		      bfd_signed_vma group_size,
6355 		      asection * (*add_stub_section) (const char *, asection *,
6356 						      asection *,
6357 						      unsigned int),
6358 		      void (*layout_sections_again) (void))
6359 {
6360   bfd_boolean ret = TRUE;
6361   obj_attribute *out_attr;
6362   int cmse_stub_created = 0;
6363   bfd_size_type stub_group_size;
6364   bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6365   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6366   struct a8_erratum_fix *a8_fixes = NULL;
6367   unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6368   struct a8_erratum_reloc *a8_relocs = NULL;
6369   unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6370 
6371   if (htab == NULL)
6372     return FALSE;
6373 
6374   if (htab->fix_cortex_a8)
6375     {
6376       a8_fixes = (struct a8_erratum_fix *)
6377 	  bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6378       a8_relocs = (struct a8_erratum_reloc *)
6379 	  bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6380     }
6381 
6382   /* Propagate mach to stub bfd, because it may not have been
6383      finalized when we created stub_bfd.  */
6384   bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6385 		     bfd_get_mach (output_bfd));
6386 
6387   /* Stash our params away.  */
6388   htab->stub_bfd = stub_bfd;
6389   htab->add_stub_section = add_stub_section;
6390   htab->layout_sections_again = layout_sections_again;
6391   stubs_always_after_branch = group_size < 0;
6392 
6393   out_attr = elf_known_obj_attributes_proc (output_bfd);
6394   m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6395 
6396   /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6397      as the first half of a 32-bit branch straddling two 4K pages.  This is a
6398      crude way of enforcing that.  */
6399   if (htab->fix_cortex_a8)
6400     stubs_always_after_branch = 1;
6401 
6402   if (group_size < 0)
6403     stub_group_size = -group_size;
6404   else
6405     stub_group_size = group_size;
6406 
6407   if (stub_group_size == 1)
6408     {
6409       /* Default values.  */
6410       /* Thumb branch range is +-4MB has to be used as the default
6411 	 maximum size (a given section can contain both ARM and Thumb
6412 	 code, so the worst case has to be taken into account).
6413 
6414 	 This value is 24K less than that, which allows for 2025
6415 	 12-byte stubs.  If we exceed that, then we will fail to link.
6416 	 The user will have to relink with an explicit group size
6417 	 option.  */
6418       stub_group_size = 4170000;
6419     }
6420 
6421   group_sections (htab, stub_group_size, stubs_always_after_branch);
6422 
6423   /* If we're applying the cortex A8 fix, we need to determine the
6424      program header size now, because we cannot change it later --
6425      that could alter section placements.  Notice the A8 erratum fix
6426      ends up requiring the section addresses to remain unchanged
6427      modulo the page size.  That's something we cannot represent
6428      inside BFD, and we don't want to force the section alignment to
6429      be the page size.  */
6430   if (htab->fix_cortex_a8)
6431     (*htab->layout_sections_again) ();
6432 
6433   while (1)
6434     {
6435       bfd *input_bfd;
6436       unsigned int bfd_indx;
6437       asection *stub_sec;
6438       enum elf32_arm_stub_type stub_type;
6439       bfd_boolean stub_changed = FALSE;
6440       unsigned prev_num_a8_fixes = num_a8_fixes;
6441 
6442       num_a8_fixes = 0;
6443       for (input_bfd = info->input_bfds, bfd_indx = 0;
6444 	   input_bfd != NULL;
6445 	   input_bfd = input_bfd->link.next, bfd_indx++)
6446 	{
6447 	  Elf_Internal_Shdr *symtab_hdr;
6448 	  asection *section;
6449 	  Elf_Internal_Sym *local_syms = NULL;
6450 
6451 	  if (!is_arm_elf (input_bfd)
6452 	      || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0)
6453 	    continue;
6454 
6455 	  num_a8_relocs = 0;
6456 
6457 	  /* We'll need the symbol table in a second.  */
6458 	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6459 	  if (symtab_hdr->sh_info == 0)
6460 	    continue;
6461 
6462 	  /* Limit scan of symbols to object file whose profile is
6463 	     Microcontroller to not hinder performance in the general case.  */
6464 	  if (m_profile && first_veneer_scan)
6465 	    {
6466 	      struct elf_link_hash_entry **sym_hashes;
6467 
6468 	      sym_hashes = elf_sym_hashes (input_bfd);
6469 	      if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6470 			      &cmse_stub_created))
6471 		goto error_ret_free_local;
6472 
6473 	      if (cmse_stub_created != 0)
6474 		stub_changed = TRUE;
6475 	    }
6476 
6477 	  /* Walk over each section attached to the input bfd.  */
6478 	  for (section = input_bfd->sections;
6479 	       section != NULL;
6480 	       section = section->next)
6481 	    {
6482 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6483 
6484 	      /* If there aren't any relocs, then there's nothing more
6485 		 to do.  */
6486 	      if ((section->flags & SEC_RELOC) == 0
6487 		  || section->reloc_count == 0
6488 		  || (section->flags & SEC_CODE) == 0)
6489 		continue;
6490 
6491 	      /* If this section is a link-once section that will be
6492 		 discarded, then don't create any stubs.  */
6493 	      if (section->output_section == NULL
6494 		  || section->output_section->owner != output_bfd)
6495 		continue;
6496 
6497 	      /* Get the relocs.  */
6498 	      internal_relocs
6499 		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6500 					     NULL, info->keep_memory);
6501 	      if (internal_relocs == NULL)
6502 		goto error_ret_free_local;
6503 
6504 	      /* Now examine each relocation.  */
6505 	      irela = internal_relocs;
6506 	      irelaend = irela + section->reloc_count;
6507 	      for (; irela < irelaend; irela++)
6508 		{
6509 		  unsigned int r_type, r_indx;
6510 		  asection *sym_sec;
6511 		  bfd_vma sym_value;
6512 		  bfd_vma destination;
6513 		  struct elf32_arm_link_hash_entry *hash;
6514 		  const char *sym_name;
6515 		  unsigned char st_type;
6516 		  enum arm_st_branch_type branch_type;
6517 		  bfd_boolean created_stub = FALSE;
6518 
6519 		  r_type = ELF32_R_TYPE (irela->r_info);
6520 		  r_indx = ELF32_R_SYM (irela->r_info);
6521 
6522 		  if (r_type >= (unsigned int) R_ARM_max)
6523 		    {
6524 		      bfd_set_error (bfd_error_bad_value);
6525 		    error_ret_free_internal:
6526 		      if (elf_section_data (section)->relocs == NULL)
6527 			free (internal_relocs);
6528 		    /* Fall through.  */
6529 		    error_ret_free_local:
6530 		      if (local_syms != NULL
6531 			  && (symtab_hdr->contents
6532 			      != (unsigned char *) local_syms))
6533 			free (local_syms);
6534 		      return FALSE;
6535 		    }
6536 
6537 		  hash = NULL;
6538 		  if (r_indx >= symtab_hdr->sh_info)
6539 		    hash = elf32_arm_hash_entry
6540 		      (elf_sym_hashes (input_bfd)
6541 		       [r_indx - symtab_hdr->sh_info]);
6542 
6543 		  /* Only look for stubs on branch instructions, or
6544 		     non-relaxed TLSCALL  */
6545 		  if ((r_type != (unsigned int) R_ARM_CALL)
6546 		      && (r_type != (unsigned int) R_ARM_THM_CALL)
6547 		      && (r_type != (unsigned int) R_ARM_JUMP24)
6548 		      && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6549 		      && (r_type != (unsigned int) R_ARM_THM_XPC22)
6550 		      && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6551 		      && (r_type != (unsigned int) R_ARM_PLT32)
6552 		      && !((r_type == (unsigned int) R_ARM_TLS_CALL
6553 			    || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6554 			   && r_type == elf32_arm_tls_transition
6555 			       (info, r_type, &hash->root)
6556 			   && ((hash ? hash->tls_type
6557 				: (elf32_arm_local_got_tls_type
6558 				   (input_bfd)[r_indx]))
6559 			       & GOT_TLS_GDESC) != 0))
6560 		    continue;
6561 
6562 		  /* Now determine the call target, its name, value,
6563 		     section.  */
6564 		  sym_sec = NULL;
6565 		  sym_value = 0;
6566 		  destination = 0;
6567 		  sym_name = NULL;
6568 
6569 		  if (r_type == (unsigned int) R_ARM_TLS_CALL
6570 		      || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6571 		    {
6572 		      /* A non-relaxed TLS call.  The target is the
6573 			 plt-resident trampoline and nothing to do
6574 			 with the symbol.  */
6575 		      BFD_ASSERT (htab->tls_trampoline > 0);
6576 		      sym_sec = htab->root.splt;
6577 		      sym_value = htab->tls_trampoline;
6578 		      hash = 0;
6579 		      st_type = STT_FUNC;
6580 		      branch_type = ST_BRANCH_TO_ARM;
6581 		    }
6582 		  else if (!hash)
6583 		    {
6584 		      /* It's a local symbol.  */
6585 		      Elf_Internal_Sym *sym;
6586 
6587 		      if (local_syms == NULL)
6588 			{
6589 			  local_syms
6590 			    = (Elf_Internal_Sym *) symtab_hdr->contents;
6591 			  if (local_syms == NULL)
6592 			    local_syms
6593 			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6594 						      symtab_hdr->sh_info, 0,
6595 						      NULL, NULL, NULL);
6596 			  if (local_syms == NULL)
6597 			    goto error_ret_free_internal;
6598 			}
6599 
6600 		      sym = local_syms + r_indx;
6601 		      if (sym->st_shndx == SHN_UNDEF)
6602 			sym_sec = bfd_und_section_ptr;
6603 		      else if (sym->st_shndx == SHN_ABS)
6604 			sym_sec = bfd_abs_section_ptr;
6605 		      else if (sym->st_shndx == SHN_COMMON)
6606 			sym_sec = bfd_com_section_ptr;
6607 		      else
6608 			sym_sec =
6609 			  bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6610 
6611 		      if (!sym_sec)
6612 			/* This is an undefined symbol.  It can never
6613 			   be resolved.  */
6614 			continue;
6615 
6616 		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6617 			sym_value = sym->st_value;
6618 		      destination = (sym_value + irela->r_addend
6619 				     + sym_sec->output_offset
6620 				     + sym_sec->output_section->vma);
6621 		      st_type = ELF_ST_TYPE (sym->st_info);
6622 		      branch_type =
6623 			ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6624 		      sym_name
6625 			= bfd_elf_string_from_elf_section (input_bfd,
6626 							   symtab_hdr->sh_link,
6627 							   sym->st_name);
6628 		    }
6629 		  else
6630 		    {
6631 		      /* It's an external symbol.  */
6632 		      while (hash->root.root.type == bfd_link_hash_indirect
6633 			     || hash->root.root.type == bfd_link_hash_warning)
6634 			hash = ((struct elf32_arm_link_hash_entry *)
6635 				hash->root.root.u.i.link);
6636 
6637 		      if (hash->root.root.type == bfd_link_hash_defined
6638 			  || hash->root.root.type == bfd_link_hash_defweak)
6639 			{
6640 			  sym_sec = hash->root.root.u.def.section;
6641 			  sym_value = hash->root.root.u.def.value;
6642 
6643 			  struct elf32_arm_link_hash_table *globals =
6644 						  elf32_arm_hash_table (info);
6645 
6646 			  /* For a destination in a shared library,
6647 			     use the PLT stub as target address to
6648 			     decide whether a branch stub is
6649 			     needed.  */
6650 			  if (globals != NULL
6651 			      && globals->root.splt != NULL
6652 			      && hash != NULL
6653 			      && hash->root.plt.offset != (bfd_vma) -1)
6654 			    {
6655 			      sym_sec = globals->root.splt;
6656 			      sym_value = hash->root.plt.offset;
6657 			      if (sym_sec->output_section != NULL)
6658 				destination = (sym_value
6659 					       + sym_sec->output_offset
6660 					       + sym_sec->output_section->vma);
6661 			    }
6662 			  else if (sym_sec->output_section != NULL)
6663 			    destination = (sym_value + irela->r_addend
6664 					   + sym_sec->output_offset
6665 					   + sym_sec->output_section->vma);
6666 			}
6667 		      else if ((hash->root.root.type == bfd_link_hash_undefined)
6668 			       || (hash->root.root.type == bfd_link_hash_undefweak))
6669 			{
6670 			  /* For a shared library, use the PLT stub as
6671 			     target address to decide whether a long
6672 			     branch stub is needed.
6673 			     For absolute code, they cannot be handled.  */
6674 			  struct elf32_arm_link_hash_table *globals =
6675 			    elf32_arm_hash_table (info);
6676 
6677 			  if (globals != NULL
6678 			      && globals->root.splt != NULL
6679 			      && hash != NULL
6680 			      && hash->root.plt.offset != (bfd_vma) -1)
6681 			    {
6682 			      sym_sec = globals->root.splt;
6683 			      sym_value = hash->root.plt.offset;
6684 			      if (sym_sec->output_section != NULL)
6685 				destination = (sym_value
6686 					       + sym_sec->output_offset
6687 					       + sym_sec->output_section->vma);
6688 			    }
6689 			  else
6690 			    continue;
6691 			}
6692 		      else
6693 			{
6694 			  bfd_set_error (bfd_error_bad_value);
6695 			  goto error_ret_free_internal;
6696 			}
6697 		      st_type = hash->root.type;
6698 		      branch_type =
6699 			ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6700 		      sym_name = hash->root.root.root.string;
6701 		    }
6702 
6703 		  do
6704 		    {
6705 		      bfd_boolean new_stub;
6706 		      struct elf32_arm_stub_hash_entry *stub_entry;
6707 
6708 		      /* Determine what (if any) linker stub is needed.  */
6709 		      stub_type = arm_type_of_stub (info, section, irela,
6710 						    st_type, &branch_type,
6711 						    hash, destination, sym_sec,
6712 						    input_bfd, sym_name);
6713 		      if (stub_type == arm_stub_none)
6714 			break;
6715 
6716 		      /* We've either created a stub for this reloc already,
6717 			 or we are about to.  */
6718 		      stub_entry =
6719 			elf32_arm_create_stub (htab, stub_type, section, irela,
6720 					       sym_sec, hash,
6721 					       (char *) sym_name, sym_value,
6722 					       branch_type, &new_stub);
6723 
6724 		      created_stub = stub_entry != NULL;
6725 		      if (!created_stub)
6726 			goto error_ret_free_internal;
6727 		      else if (!new_stub)
6728 			break;
6729 		      else
6730 			stub_changed = TRUE;
6731 		    }
6732 		  while (0);
6733 
6734 		  /* Look for relocations which might trigger Cortex-A8
6735 		     erratum.  */
6736 		  if (htab->fix_cortex_a8
6737 		      && (r_type == (unsigned int) R_ARM_THM_JUMP24
6738 			  || r_type == (unsigned int) R_ARM_THM_JUMP19
6739 			  || r_type == (unsigned int) R_ARM_THM_CALL
6740 			  || r_type == (unsigned int) R_ARM_THM_XPC22))
6741 		    {
6742 		      bfd_vma from = section->output_section->vma
6743 				     + section->output_offset
6744 				     + irela->r_offset;
6745 
6746 		      if ((from & 0xfff) == 0xffe)
6747 			{
6748 			  /* Found a candidate.  Note we haven't checked the
6749 			     destination is within 4K here: if we do so (and
6750 			     don't create an entry in a8_relocs) we can't tell
6751 			     that a branch should have been relocated when
6752 			     scanning later.  */
6753 			  if (num_a8_relocs == a8_reloc_table_size)
6754 			    {
6755 			      a8_reloc_table_size *= 2;
6756 			      a8_relocs = (struct a8_erratum_reloc *)
6757 				  bfd_realloc (a8_relocs,
6758 					       sizeof (struct a8_erratum_reloc)
6759 					       * a8_reloc_table_size);
6760 			    }
6761 
6762 			  a8_relocs[num_a8_relocs].from = from;
6763 			  a8_relocs[num_a8_relocs].destination = destination;
6764 			  a8_relocs[num_a8_relocs].r_type = r_type;
6765 			  a8_relocs[num_a8_relocs].branch_type = branch_type;
6766 			  a8_relocs[num_a8_relocs].sym_name = sym_name;
6767 			  a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6768 			  a8_relocs[num_a8_relocs].hash = hash;
6769 
6770 			  num_a8_relocs++;
6771 			}
6772 		    }
6773 		}
6774 
6775 	      /* We're done with the internal relocs, free them.  */
6776 	      if (elf_section_data (section)->relocs == NULL)
6777 		free (internal_relocs);
6778 	    }
6779 
6780 	  if (htab->fix_cortex_a8)
6781 	    {
6782 	      /* Sort relocs which might apply to Cortex-A8 erratum.  */
6783 	      qsort (a8_relocs, num_a8_relocs,
6784 		     sizeof (struct a8_erratum_reloc),
6785 		     &a8_reloc_compare);
6786 
6787 	      /* Scan for branches which might trigger Cortex-A8 erratum.  */
6788 	      if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6789 					  &num_a8_fixes, &a8_fix_table_size,
6790 					  a8_relocs, num_a8_relocs,
6791 					  prev_num_a8_fixes, &stub_changed)
6792 		  != 0)
6793 		goto error_ret_free_local;
6794 	    }
6795 
6796 	  if (local_syms != NULL
6797 	      && symtab_hdr->contents != (unsigned char *) local_syms)
6798 	    {
6799 	      if (!info->keep_memory)
6800 		free (local_syms);
6801 	      else
6802 		symtab_hdr->contents = (unsigned char *) local_syms;
6803 	    }
6804 	}
6805 
6806       if (first_veneer_scan
6807 	  && !set_cmse_veneer_addr_from_implib (info, htab,
6808 						&cmse_stub_created))
6809 	ret = FALSE;
6810 
6811       if (prev_num_a8_fixes != num_a8_fixes)
6812 	stub_changed = TRUE;
6813 
6814       if (!stub_changed)
6815 	break;
6816 
6817       /* OK, we've added some stubs.  Find out the new size of the
6818 	 stub sections.  */
6819       for (stub_sec = htab->stub_bfd->sections;
6820 	   stub_sec != NULL;
6821 	   stub_sec = stub_sec->next)
6822 	{
6823 	  /* Ignore non-stub sections.  */
6824 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
6825 	    continue;
6826 
6827 	  stub_sec->size = 0;
6828 	}
6829 
6830       /* Add new SG veneers after those already in the input import
6831 	 library.  */
6832       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6833 	   stub_type++)
6834 	{
6835 	  bfd_vma *start_offset_p;
6836 	  asection **stub_sec_p;
6837 
6838 	  start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6839 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6840 	  if (start_offset_p == NULL)
6841 	    continue;
6842 
6843 	  BFD_ASSERT (stub_sec_p != NULL);
6844 	  if (*stub_sec_p != NULL)
6845 	    (*stub_sec_p)->size = *start_offset_p;
6846 	}
6847 
6848       /* Compute stub section size, considering padding.  */
6849       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6850       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6851 	   stub_type++)
6852 	{
6853 	  int size, padding;
6854 	  asection **stub_sec_p;
6855 
6856 	  padding = arm_dedicated_stub_section_padding (stub_type);
6857 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6858 	  /* Skip if no stub input section or no stub section padding
6859 	     required.  */
6860 	  if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6861 	    continue;
6862 	  /* Stub section padding required but no dedicated section.  */
6863 	  BFD_ASSERT (stub_sec_p);
6864 
6865 	  size = (*stub_sec_p)->size;
6866 	  size = (size + padding - 1) & ~(padding - 1);
6867 	  (*stub_sec_p)->size = size;
6868 	}
6869 
6870       /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
6871       if (htab->fix_cortex_a8)
6872 	for (i = 0; i < num_a8_fixes; i++)
6873 	  {
6874 	    stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6875 			 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6876 
6877 	    if (stub_sec == NULL)
6878 	      return FALSE;
6879 
6880 	    stub_sec->size
6881 	      += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6882 					      NULL);
6883 	  }
6884 
6885 
6886       /* Ask the linker to do its stuff.  */
6887       (*htab->layout_sections_again) ();
6888       first_veneer_scan = FALSE;
6889     }
6890 
6891   /* Add stubs for Cortex-A8 erratum fixes now.  */
6892   if (htab->fix_cortex_a8)
6893     {
6894       for (i = 0; i < num_a8_fixes; i++)
6895 	{
6896 	  struct elf32_arm_stub_hash_entry *stub_entry;
6897 	  char *stub_name = a8_fixes[i].stub_name;
6898 	  asection *section = a8_fixes[i].section;
6899 	  unsigned int section_id = a8_fixes[i].section->id;
6900 	  asection *link_sec = htab->stub_group[section_id].link_sec;
6901 	  asection *stub_sec = htab->stub_group[section_id].stub_sec;
6902 	  const insn_sequence *template_sequence;
6903 	  int template_size, size = 0;
6904 
6905 	  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6906 					     TRUE, FALSE);
6907 	  if (stub_entry == NULL)
6908 	    {
6909 	      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6910 				  section->owner, stub_name);
6911 	      return FALSE;
6912 	    }
6913 
6914 	  stub_entry->stub_sec = stub_sec;
6915 	  stub_entry->stub_offset = (bfd_vma) -1;
6916 	  stub_entry->id_sec = link_sec;
6917 	  stub_entry->stub_type = a8_fixes[i].stub_type;
6918 	  stub_entry->source_value = a8_fixes[i].offset;
6919 	  stub_entry->target_section = a8_fixes[i].section;
6920 	  stub_entry->target_value = a8_fixes[i].target_offset;
6921 	  stub_entry->orig_insn = a8_fixes[i].orig_insn;
6922 	  stub_entry->branch_type = a8_fixes[i].branch_type;
6923 
6924 	  size = find_stub_size_and_template (a8_fixes[i].stub_type,
6925 					      &template_sequence,
6926 					      &template_size);
6927 
6928 	  stub_entry->stub_size = size;
6929 	  stub_entry->stub_template = template_sequence;
6930 	  stub_entry->stub_template_size = template_size;
6931 	}
6932 
6933       /* Stash the Cortex-A8 erratum fix array for use later in
6934 	 elf32_arm_write_section().  */
6935       htab->a8_erratum_fixes = a8_fixes;
6936       htab->num_a8_erratum_fixes = num_a8_fixes;
6937     }
6938   else
6939     {
6940       htab->a8_erratum_fixes = NULL;
6941       htab->num_a8_erratum_fixes = 0;
6942     }
6943   return ret;
6944 }
6945 
6946 /* Build all the stubs associated with the current output file.  The
6947    stubs are kept in a hash table attached to the main linker hash
6948    table.  We also set up the .plt entries for statically linked PIC
6949    functions here.  This function is called via arm_elf_finish in the
6950    linker.  */
6951 
6952 bfd_boolean
6953 elf32_arm_build_stubs (struct bfd_link_info *info)
6954 {
6955   asection *stub_sec;
6956   struct bfd_hash_table *table;
6957   enum elf32_arm_stub_type stub_type;
6958   struct elf32_arm_link_hash_table *htab;
6959 
6960   htab = elf32_arm_hash_table (info);
6961   if (htab == NULL)
6962     return FALSE;
6963 
6964   for (stub_sec = htab->stub_bfd->sections;
6965        stub_sec != NULL;
6966        stub_sec = stub_sec->next)
6967     {
6968       bfd_size_type size;
6969 
6970       /* Ignore non-stub sections.  */
6971       if (!strstr (stub_sec->name, STUB_SUFFIX))
6972 	continue;
6973 
6974       /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
6975 	 must at least be done for stub section requiring padding and for SG
6976 	 veneers to ensure that a non secure code branching to a removed SG
6977 	 veneer causes an error.  */
6978       size = stub_sec->size;
6979       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
6980       if (stub_sec->contents == NULL && size != 0)
6981 	return FALSE;
6982 
6983       stub_sec->size = 0;
6984     }
6985 
6986   /* Add new SG veneers after those already in the input import library.  */
6987   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6988     {
6989       bfd_vma *start_offset_p;
6990       asection **stub_sec_p;
6991 
6992       start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6993       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6994       if (start_offset_p == NULL)
6995 	continue;
6996 
6997       BFD_ASSERT (stub_sec_p != NULL);
6998       if (*stub_sec_p != NULL)
6999 	(*stub_sec_p)->size = *start_offset_p;
7000     }
7001 
7002   /* Build the stubs as directed by the stub hash table.  */
7003   table = &htab->stub_hash_table;
7004   bfd_hash_traverse (table, arm_build_one_stub, info);
7005   if (htab->fix_cortex_a8)
7006     {
7007       /* Place the cortex a8 stubs last.  */
7008       htab->fix_cortex_a8 = -1;
7009       bfd_hash_traverse (table, arm_build_one_stub, info);
7010     }
7011 
7012   return TRUE;
7013 }
7014 
7015 /* Locate the Thumb encoded calling stub for NAME.  */
7016 
7017 static struct elf_link_hash_entry *
7018 find_thumb_glue (struct bfd_link_info *link_info,
7019 		 const char *name,
7020 		 char **error_message)
7021 {
7022   char *tmp_name;
7023   struct elf_link_hash_entry *hash;
7024   struct elf32_arm_link_hash_table *hash_table;
7025 
7026   /* We need a pointer to the armelf specific hash table.  */
7027   hash_table = elf32_arm_hash_table (link_info);
7028   if (hash_table == NULL)
7029     return NULL;
7030 
7031   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7032 				  + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7033 
7034   BFD_ASSERT (tmp_name);
7035 
7036   sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7037 
7038   hash = elf_link_hash_lookup
7039     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7040 
7041   if (hash == NULL
7042       && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7043 		   "Thumb", tmp_name, name) == -1)
7044     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7045 
7046   free (tmp_name);
7047 
7048   return hash;
7049 }
7050 
7051 /* Locate the ARM encoded calling stub for NAME.  */
7052 
7053 static struct elf_link_hash_entry *
7054 find_arm_glue (struct bfd_link_info *link_info,
7055 	       const char *name,
7056 	       char **error_message)
7057 {
7058   char *tmp_name;
7059   struct elf_link_hash_entry *myh;
7060   struct elf32_arm_link_hash_table *hash_table;
7061 
7062   /* We need a pointer to the elfarm specific hash table.  */
7063   hash_table = elf32_arm_hash_table (link_info);
7064   if (hash_table == NULL)
7065     return NULL;
7066 
7067   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7068 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7069 
7070   BFD_ASSERT (tmp_name);
7071 
7072   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7073 
7074   myh = elf_link_hash_lookup
7075     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7076 
7077   if (myh == NULL
7078       && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7079 		   "ARM", tmp_name, name) == -1)
7080     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7081 
7082   free (tmp_name);
7083 
7084   return myh;
7085 }
7086 
7087 /* ARM->Thumb glue (static images):
7088 
7089    .arm
7090    __func_from_arm:
7091    ldr r12, __func_addr
7092    bx  r12
7093    __func_addr:
7094    .word func    @ behave as if you saw a ARM_32 reloc.
7095 
7096    (v5t static images)
7097    .arm
7098    __func_from_arm:
7099    ldr pc, __func_addr
7100    __func_addr:
7101    .word func    @ behave as if you saw a ARM_32 reloc.
7102 
7103    (relocatable images)
7104    .arm
7105    __func_from_arm:
7106    ldr r12, __func_offset
7107    add r12, r12, pc
7108    bx  r12
7109    __func_offset:
7110    .word func - .   */
7111 
7112 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7113 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7114 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7115 static const insn32 a2t3_func_addr_insn = 0x00000001;
7116 
7117 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7118 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7119 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7120 
7121 #define ARM2THUMB_PIC_GLUE_SIZE 16
7122 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7123 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7124 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7125 
7126 /* Thumb->ARM:				Thumb->(non-interworking aware) ARM
7127 
7128      .thumb				.thumb
7129      .align 2				.align 2
7130  __func_from_thumb:		    __func_from_thumb:
7131      bx pc				push {r6, lr}
7132      nop				ldr  r6, __func_addr
7133      .arm				mov  lr, pc
7134      b func				bx   r6
7135 					.arm
7136 				    ;; back_to_thumb
7137 					ldmia r13! {r6, lr}
7138 					bx    lr
7139 				    __func_addr:
7140 					.word	     func  */
7141 
7142 #define THUMB2ARM_GLUE_SIZE 8
7143 static const insn16 t2a1_bx_pc_insn = 0x4778;
7144 static const insn16 t2a2_noop_insn = 0x46c0;
7145 static const insn32 t2a3_b_insn = 0xea000000;
7146 
7147 #define VFP11_ERRATUM_VENEER_SIZE 8
7148 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7149 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7150 
7151 #define ARM_BX_VENEER_SIZE 12
7152 static const insn32 armbx1_tst_insn = 0xe3100001;
7153 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7154 static const insn32 armbx3_bx_insn = 0xe12fff10;
7155 
7156 #ifndef ELFARM_NABI_C_INCLUDED
7157 static void
7158 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7159 {
7160   asection * s;
7161   bfd_byte * contents;
7162 
7163   if (size == 0)
7164     {
7165       /* Do not include empty glue sections in the output.  */
7166       if (abfd != NULL)
7167 	{
7168 	  s = bfd_get_linker_section (abfd, name);
7169 	  if (s != NULL)
7170 	    s->flags |= SEC_EXCLUDE;
7171 	}
7172       return;
7173     }
7174 
7175   BFD_ASSERT (abfd != NULL);
7176 
7177   s = bfd_get_linker_section (abfd, name);
7178   BFD_ASSERT (s != NULL);
7179 
7180   contents = (bfd_byte *) bfd_alloc (abfd, size);
7181 
7182   BFD_ASSERT (s->size == size);
7183   s->contents = contents;
7184 }
7185 
7186 bfd_boolean
7187 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7188 {
7189   struct elf32_arm_link_hash_table * globals;
7190 
7191   globals = elf32_arm_hash_table (info);
7192   BFD_ASSERT (globals != NULL);
7193 
7194   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7195 				   globals->arm_glue_size,
7196 				   ARM2THUMB_GLUE_SECTION_NAME);
7197 
7198   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7199 				   globals->thumb_glue_size,
7200 				   THUMB2ARM_GLUE_SECTION_NAME);
7201 
7202   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7203 				   globals->vfp11_erratum_glue_size,
7204 				   VFP11_ERRATUM_VENEER_SECTION_NAME);
7205 
7206   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7207 				   globals->stm32l4xx_erratum_glue_size,
7208 				   STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7209 
7210   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7211 				   globals->bx_glue_size,
7212 				   ARM_BX_GLUE_SECTION_NAME);
7213 
7214   return TRUE;
7215 }
7216 
7217 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7218    returns the symbol identifying the stub.  */
7219 
7220 static struct elf_link_hash_entry *
7221 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7222 			  struct elf_link_hash_entry * h)
7223 {
7224   const char * name = h->root.root.string;
7225   asection * s;
7226   char * tmp_name;
7227   struct elf_link_hash_entry * myh;
7228   struct bfd_link_hash_entry * bh;
7229   struct elf32_arm_link_hash_table * globals;
7230   bfd_vma val;
7231   bfd_size_type size;
7232 
7233   globals = elf32_arm_hash_table (link_info);
7234   BFD_ASSERT (globals != NULL);
7235   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7236 
7237   s = bfd_get_linker_section
7238     (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7239 
7240   BFD_ASSERT (s != NULL);
7241 
7242   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7243 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7244 
7245   BFD_ASSERT (tmp_name);
7246 
7247   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7248 
7249   myh = elf_link_hash_lookup
7250     (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7251 
7252   if (myh != NULL)
7253     {
7254       /* We've already seen this guy.  */
7255       free (tmp_name);
7256       return myh;
7257     }
7258 
7259   /* The only trick here is using hash_table->arm_glue_size as the value.
7260      Even though the section isn't allocated yet, this is where we will be
7261      putting it.  The +1 on the value marks that the stub has not been
7262      output yet - not that it is a Thumb function.  */
7263   bh = NULL;
7264   val = globals->arm_glue_size + 1;
7265   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7266 				    tmp_name, BSF_GLOBAL, s, val,
7267 				    NULL, TRUE, FALSE, &bh);
7268 
7269   myh = (struct elf_link_hash_entry *) bh;
7270   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7271   myh->forced_local = 1;
7272 
7273   free (tmp_name);
7274 
7275   if (bfd_link_pic (link_info)
7276       || globals->root.is_relocatable_executable
7277       || globals->pic_veneer)
7278     size = ARM2THUMB_PIC_GLUE_SIZE;
7279   else if (globals->use_blx)
7280     size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7281   else
7282     size = ARM2THUMB_STATIC_GLUE_SIZE;
7283 
7284   s->size += size;
7285   globals->arm_glue_size += size;
7286 
7287   return myh;
7288 }
7289 
7290 /* Allocate space for ARMv4 BX veneers.  */
7291 
7292 static void
7293 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7294 {
7295   asection * s;
7296   struct elf32_arm_link_hash_table *globals;
7297   char *tmp_name;
7298   struct elf_link_hash_entry *myh;
7299   struct bfd_link_hash_entry *bh;
7300   bfd_vma val;
7301 
7302   /* BX PC does not need a veneer.  */
7303   if (reg == 15)
7304     return;
7305 
7306   globals = elf32_arm_hash_table (link_info);
7307   BFD_ASSERT (globals != NULL);
7308   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7309 
7310   /* Check if this veneer has already been allocated.  */
7311   if (globals->bx_glue_offset[reg])
7312     return;
7313 
7314   s = bfd_get_linker_section
7315     (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7316 
7317   BFD_ASSERT (s != NULL);
7318 
7319   /* Add symbol for veneer.  */
7320   tmp_name = (char *)
7321       bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7322 
7323   BFD_ASSERT (tmp_name);
7324 
7325   sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7326 
7327   myh = elf_link_hash_lookup
7328     (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7329 
7330   BFD_ASSERT (myh == NULL);
7331 
7332   bh = NULL;
7333   val = globals->bx_glue_size;
7334   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7335 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7336 				    NULL, TRUE, FALSE, &bh);
7337 
7338   myh = (struct elf_link_hash_entry *) bh;
7339   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7340   myh->forced_local = 1;
7341 
7342   s->size += ARM_BX_VENEER_SIZE;
7343   globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7344   globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7345 }
7346 
7347 
7348 /* Add an entry to the code/data map for section SEC.  */
7349 
7350 static void
7351 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7352 {
7353   struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7354   unsigned int newidx;
7355 
7356   if (sec_data->map == NULL)
7357     {
7358       sec_data->map = (elf32_arm_section_map *)
7359 	  bfd_malloc (sizeof (elf32_arm_section_map));
7360       sec_data->mapcount = 0;
7361       sec_data->mapsize = 1;
7362     }
7363 
7364   newidx = sec_data->mapcount++;
7365 
7366   if (sec_data->mapcount > sec_data->mapsize)
7367     {
7368       sec_data->mapsize *= 2;
7369       sec_data->map = (elf32_arm_section_map *)
7370 	  bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7371 			       * sizeof (elf32_arm_section_map));
7372     }
7373 
7374   if (sec_data->map)
7375     {
7376       sec_data->map[newidx].vma = vma;
7377       sec_data->map[newidx].type = type;
7378     }
7379 }
7380 
7381 
7382 /* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
7383    veneers are handled for now.  */
7384 
7385 static bfd_vma
7386 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7387 			     elf32_vfp11_erratum_list *branch,
7388 			     bfd *branch_bfd,
7389 			     asection *branch_sec,
7390 			     unsigned int offset)
7391 {
7392   asection *s;
7393   struct elf32_arm_link_hash_table *hash_table;
7394   char *tmp_name;
7395   struct elf_link_hash_entry *myh;
7396   struct bfd_link_hash_entry *bh;
7397   bfd_vma val;
7398   struct _arm_elf_section_data *sec_data;
7399   elf32_vfp11_erratum_list *newerr;
7400 
7401   hash_table = elf32_arm_hash_table (link_info);
7402   BFD_ASSERT (hash_table != NULL);
7403   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7404 
7405   s = bfd_get_linker_section
7406     (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7407 
7408   sec_data = elf32_arm_section_data (s);
7409 
7410   BFD_ASSERT (s != NULL);
7411 
7412   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7413 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7414 
7415   BFD_ASSERT (tmp_name);
7416 
7417   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7418 	   hash_table->num_vfp11_fixes);
7419 
7420   myh = elf_link_hash_lookup
7421     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7422 
7423   BFD_ASSERT (myh == NULL);
7424 
7425   bh = NULL;
7426   val = hash_table->vfp11_erratum_glue_size;
7427   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7428 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7429 				    NULL, TRUE, FALSE, &bh);
7430 
7431   myh = (struct elf_link_hash_entry *) bh;
7432   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7433   myh->forced_local = 1;
7434 
7435   /* Link veneer back to calling location.  */
7436   sec_data->erratumcount += 1;
7437   newerr = (elf32_vfp11_erratum_list *)
7438       bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7439 
7440   newerr->type = VFP11_ERRATUM_ARM_VENEER;
7441   newerr->vma = -1;
7442   newerr->u.v.branch = branch;
7443   newerr->u.v.id = hash_table->num_vfp11_fixes;
7444   branch->u.b.veneer = newerr;
7445 
7446   newerr->next = sec_data->erratumlist;
7447   sec_data->erratumlist = newerr;
7448 
7449   /* A symbol for the return from the veneer.  */
7450   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7451 	   hash_table->num_vfp11_fixes);
7452 
7453   myh = elf_link_hash_lookup
7454     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7455 
7456   if (myh != NULL)
7457     abort ();
7458 
7459   bh = NULL;
7460   val = offset + 4;
7461   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7462 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
7463 
7464   myh = (struct elf_link_hash_entry *) bh;
7465   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7466   myh->forced_local = 1;
7467 
7468   free (tmp_name);
7469 
7470   /* Generate a mapping symbol for the veneer section, and explicitly add an
7471      entry for that symbol to the code/data map for the section.  */
7472   if (hash_table->vfp11_erratum_glue_size == 0)
7473     {
7474       bh = NULL;
7475       /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
7476 	 ever requires this erratum fix.  */
7477       _bfd_generic_link_add_one_symbol (link_info,
7478 					hash_table->bfd_of_glue_owner, "$a",
7479 					BSF_LOCAL, s, 0, NULL,
7480 					TRUE, FALSE, &bh);
7481 
7482       myh = (struct elf_link_hash_entry *) bh;
7483       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7484       myh->forced_local = 1;
7485 
7486       /* The elf32_arm_init_maps function only cares about symbols from input
7487 	 BFDs.  We must make a note of this generated mapping symbol
7488 	 ourselves so that code byteswapping works properly in
7489 	 elf32_arm_write_section.  */
7490       elf32_arm_section_map_add (s, 'a', 0);
7491     }
7492 
7493   s->size += VFP11_ERRATUM_VENEER_SIZE;
7494   hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7495   hash_table->num_vfp11_fixes++;
7496 
7497   /* The offset of the veneer.  */
7498   return val;
7499 }
7500 
7501 /* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
7502    veneers need to be handled because used only in Cortex-M.  */
7503 
7504 static bfd_vma
7505 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7506 				 elf32_stm32l4xx_erratum_list *branch,
7507 				 bfd *branch_bfd,
7508 				 asection *branch_sec,
7509 				 unsigned int offset,
7510 				 bfd_size_type veneer_size)
7511 {
7512   asection *s;
7513   struct elf32_arm_link_hash_table *hash_table;
7514   char *tmp_name;
7515   struct elf_link_hash_entry *myh;
7516   struct bfd_link_hash_entry *bh;
7517   bfd_vma val;
7518   struct _arm_elf_section_data *sec_data;
7519   elf32_stm32l4xx_erratum_list *newerr;
7520 
7521   hash_table = elf32_arm_hash_table (link_info);
7522   BFD_ASSERT (hash_table != NULL);
7523   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7524 
7525   s = bfd_get_linker_section
7526     (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7527 
7528   BFD_ASSERT (s != NULL);
7529 
7530   sec_data = elf32_arm_section_data (s);
7531 
7532   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7533 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7534 
7535   BFD_ASSERT (tmp_name);
7536 
7537   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7538 	   hash_table->num_stm32l4xx_fixes);
7539 
7540   myh = elf_link_hash_lookup
7541     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7542 
7543   BFD_ASSERT (myh == NULL);
7544 
7545   bh = NULL;
7546   val = hash_table->stm32l4xx_erratum_glue_size;
7547   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7548 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7549 				    NULL, TRUE, FALSE, &bh);
7550 
7551   myh = (struct elf_link_hash_entry *) bh;
7552   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7553   myh->forced_local = 1;
7554 
7555   /* Link veneer back to calling location.  */
7556   sec_data->stm32l4xx_erratumcount += 1;
7557   newerr = (elf32_stm32l4xx_erratum_list *)
7558       bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7559 
7560   newerr->type = STM32L4XX_ERRATUM_VENEER;
7561   newerr->vma = -1;
7562   newerr->u.v.branch = branch;
7563   newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7564   branch->u.b.veneer = newerr;
7565 
7566   newerr->next = sec_data->stm32l4xx_erratumlist;
7567   sec_data->stm32l4xx_erratumlist = newerr;
7568 
7569   /* A symbol for the return from the veneer.  */
7570   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7571 	   hash_table->num_stm32l4xx_fixes);
7572 
7573   myh = elf_link_hash_lookup
7574     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7575 
7576   if (myh != NULL)
7577     abort ();
7578 
7579   bh = NULL;
7580   val = offset + 4;
7581   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7582 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
7583 
7584   myh = (struct elf_link_hash_entry *) bh;
7585   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7586   myh->forced_local = 1;
7587 
7588   free (tmp_name);
7589 
7590   /* Generate a mapping symbol for the veneer section, and explicitly add an
7591      entry for that symbol to the code/data map for the section.  */
7592   if (hash_table->stm32l4xx_erratum_glue_size == 0)
7593     {
7594       bh = NULL;
7595       /* Creates a THUMB symbol since there is no other choice.  */
7596       _bfd_generic_link_add_one_symbol (link_info,
7597 					hash_table->bfd_of_glue_owner, "$t",
7598 					BSF_LOCAL, s, 0, NULL,
7599 					TRUE, FALSE, &bh);
7600 
7601       myh = (struct elf_link_hash_entry *) bh;
7602       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7603       myh->forced_local = 1;
7604 
7605       /* The elf32_arm_init_maps function only cares about symbols from input
7606 	 BFDs.  We must make a note of this generated mapping symbol
7607 	 ourselves so that code byteswapping works properly in
7608 	 elf32_arm_write_section.  */
7609       elf32_arm_section_map_add (s, 't', 0);
7610     }
7611 
7612   s->size += veneer_size;
7613   hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7614   hash_table->num_stm32l4xx_fixes++;
7615 
7616   /* The offset of the veneer.  */
7617   return val;
7618 }
7619 
7620 #define ARM_GLUE_SECTION_FLAGS \
7621   (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7622    | SEC_READONLY | SEC_LINKER_CREATED)
7623 
7624 /* Create a fake section for use by the ARM backend of the linker.  */
7625 
7626 static bfd_boolean
7627 arm_make_glue_section (bfd * abfd, const char * name)
7628 {
7629   asection * sec;
7630 
7631   sec = bfd_get_linker_section (abfd, name);
7632   if (sec != NULL)
7633     /* Already made.  */
7634     return TRUE;
7635 
7636   sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7637 
7638   if (sec == NULL
7639       || !bfd_set_section_alignment (abfd, sec, 2))
7640     return FALSE;
7641 
7642   /* Set the gc mark to prevent the section from being removed by garbage
7643      collection, despite the fact that no relocs refer to this section.  */
7644   sec->gc_mark = 1;
7645 
7646   return TRUE;
7647 }
7648 
7649 /* Set size of .plt entries.  This function is called from the
7650    linker scripts in ld/emultempl/{armelf}.em.  */
7651 
7652 void
7653 bfd_elf32_arm_use_long_plt (void)
7654 {
7655   elf32_arm_use_long_plt_entry = TRUE;
7656 }
7657 
7658 /* Add the glue sections to ABFD.  This function is called from the
7659    linker scripts in ld/emultempl/{armelf}.em.  */
7660 
7661 bfd_boolean
7662 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7663 					struct bfd_link_info *info)
7664 {
7665   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7666   bfd_boolean dostm32l4xx = globals
7667     && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7668   bfd_boolean addglue;
7669 
7670   /* If we are only performing a partial
7671      link do not bother adding the glue.  */
7672   if (bfd_link_relocatable (info))
7673     return TRUE;
7674 
7675   addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7676     && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7677     && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7678     && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7679 
7680   if (!dostm32l4xx)
7681     return addglue;
7682 
7683   return addglue
7684     && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7685 }
7686 
7687 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
7688    ensures they are not marked for deletion by
7689    strip_excluded_output_sections () when veneers are going to be created
7690    later.  Not doing so would trigger assert on empty section size in
7691    lang_size_sections_1 ().  */
7692 
7693 void
7694 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7695 {
7696   enum elf32_arm_stub_type stub_type;
7697 
7698   /* If we are only performing a partial
7699      link do not bother adding the glue.  */
7700   if (bfd_link_relocatable (info))
7701     return;
7702 
7703   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7704     {
7705       asection *out_sec;
7706       const char *out_sec_name;
7707 
7708       if (!arm_dedicated_stub_output_section_required (stub_type))
7709 	continue;
7710 
7711      out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7712      out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7713      if (out_sec != NULL)
7714 	out_sec->flags |= SEC_KEEP;
7715     }
7716 }
7717 
7718 /* Select a BFD to be used to hold the sections used by the glue code.
7719    This function is called from the linker scripts in ld/emultempl/
7720    {armelf/pe}.em.  */
7721 
7722 bfd_boolean
7723 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7724 {
7725   struct elf32_arm_link_hash_table *globals;
7726 
7727   /* If we are only performing a partial link
7728      do not bother getting a bfd to hold the glue.  */
7729   if (bfd_link_relocatable (info))
7730     return TRUE;
7731 
7732   /* Make sure we don't attach the glue sections to a dynamic object.  */
7733   BFD_ASSERT (!(abfd->flags & DYNAMIC));
7734 
7735   globals = elf32_arm_hash_table (info);
7736   BFD_ASSERT (globals != NULL);
7737 
7738   if (globals->bfd_of_glue_owner != NULL)
7739     return TRUE;
7740 
7741   /* Save the bfd for later use.  */
7742   globals->bfd_of_glue_owner = abfd;
7743 
7744   return TRUE;
7745 }
7746 
7747 static void
7748 check_use_blx (struct elf32_arm_link_hash_table *globals)
7749 {
7750   int cpu_arch;
7751 
7752   cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7753 				       Tag_CPU_arch);
7754 
7755   if (globals->fix_arm1176)
7756     {
7757       if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7758 	globals->use_blx = 1;
7759     }
7760   else
7761     {
7762       if (cpu_arch > TAG_CPU_ARCH_V4T)
7763 	globals->use_blx = 1;
7764     }
7765 }
7766 
7767 bfd_boolean
7768 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7769 					 struct bfd_link_info *link_info)
7770 {
7771   Elf_Internal_Shdr *symtab_hdr;
7772   Elf_Internal_Rela *internal_relocs = NULL;
7773   Elf_Internal_Rela *irel, *irelend;
7774   bfd_byte *contents = NULL;
7775 
7776   asection *sec;
7777   struct elf32_arm_link_hash_table *globals;
7778 
7779   /* If we are only performing a partial link do not bother
7780      to construct any glue.  */
7781   if (bfd_link_relocatable (link_info))
7782     return TRUE;
7783 
7784   /* Here we have a bfd that is to be included on the link.  We have a
7785      hook to do reloc rummaging, before section sizes are nailed down.  */
7786   globals = elf32_arm_hash_table (link_info);
7787   BFD_ASSERT (globals != NULL);
7788 
7789   check_use_blx (globals);
7790 
7791   if (globals->byteswap_code && !bfd_big_endian (abfd))
7792     {
7793       _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7794 			  abfd);
7795       return FALSE;
7796     }
7797 
7798   /* PR 5398: If we have not decided to include any loadable sections in
7799      the output then we will not have a glue owner bfd.  This is OK, it
7800      just means that there is nothing else for us to do here.  */
7801   if (globals->bfd_of_glue_owner == NULL)
7802     return TRUE;
7803 
7804   /* Rummage around all the relocs and map the glue vectors.  */
7805   sec = abfd->sections;
7806 
7807   if (sec == NULL)
7808     return TRUE;
7809 
7810   for (; sec != NULL; sec = sec->next)
7811     {
7812       if (sec->reloc_count == 0)
7813 	continue;
7814 
7815       if ((sec->flags & SEC_EXCLUDE) != 0)
7816 	continue;
7817 
7818       symtab_hdr = & elf_symtab_hdr (abfd);
7819 
7820       /* Load the relocs.  */
7821       internal_relocs
7822 	= _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7823 
7824       if (internal_relocs == NULL)
7825 	goto error_return;
7826 
7827       irelend = internal_relocs + sec->reloc_count;
7828       for (irel = internal_relocs; irel < irelend; irel++)
7829 	{
7830 	  long r_type;
7831 	  unsigned long r_index;
7832 
7833 	  struct elf_link_hash_entry *h;
7834 
7835 	  r_type = ELF32_R_TYPE (irel->r_info);
7836 	  r_index = ELF32_R_SYM (irel->r_info);
7837 
7838 	  /* These are the only relocation types we care about.  */
7839 	  if (   r_type != R_ARM_PC24
7840 	      && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7841 	    continue;
7842 
7843 	  /* Get the section contents if we haven't done so already.  */
7844 	  if (contents == NULL)
7845 	    {
7846 	      /* Get cached copy if it exists.  */
7847 	      if (elf_section_data (sec)->this_hdr.contents != NULL)
7848 		contents = elf_section_data (sec)->this_hdr.contents;
7849 	      else
7850 		{
7851 		  /* Go get them off disk.  */
7852 		  if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7853 		    goto error_return;
7854 		}
7855 	    }
7856 
7857 	  if (r_type == R_ARM_V4BX)
7858 	    {
7859 	      int reg;
7860 
7861 	      reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7862 	      record_arm_bx_glue (link_info, reg);
7863 	      continue;
7864 	    }
7865 
7866 	  /* If the relocation is not against a symbol it cannot concern us.  */
7867 	  h = NULL;
7868 
7869 	  /* We don't care about local symbols.  */
7870 	  if (r_index < symtab_hdr->sh_info)
7871 	    continue;
7872 
7873 	  /* This is an external symbol.  */
7874 	  r_index -= symtab_hdr->sh_info;
7875 	  h = (struct elf_link_hash_entry *)
7876 	    elf_sym_hashes (abfd)[r_index];
7877 
7878 	  /* If the relocation is against a static symbol it must be within
7879 	     the current section and so cannot be a cross ARM/Thumb relocation.  */
7880 	  if (h == NULL)
7881 	    continue;
7882 
7883 	  /* If the call will go through a PLT entry then we do not need
7884 	     glue.  */
7885 	  if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7886 	    continue;
7887 
7888 	  switch (r_type)
7889 	    {
7890 	    case R_ARM_PC24:
7891 	      /* This one is a call from arm code.  We need to look up
7892 		 the target of the call.  If it is a thumb target, we
7893 		 insert glue.  */
7894 	      if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7895 		  == ST_BRANCH_TO_THUMB)
7896 		record_arm_to_thumb_glue (link_info, h);
7897 	      break;
7898 
7899 	    default:
7900 	      abort ();
7901 	    }
7902 	}
7903 
7904       if (contents != NULL
7905 	  && elf_section_data (sec)->this_hdr.contents != contents)
7906 	free (contents);
7907       contents = NULL;
7908 
7909       if (internal_relocs != NULL
7910 	  && elf_section_data (sec)->relocs != internal_relocs)
7911 	free (internal_relocs);
7912       internal_relocs = NULL;
7913     }
7914 
7915   return TRUE;
7916 
7917 error_return:
7918   if (contents != NULL
7919       && elf_section_data (sec)->this_hdr.contents != contents)
7920     free (contents);
7921   if (internal_relocs != NULL
7922       && elf_section_data (sec)->relocs != internal_relocs)
7923     free (internal_relocs);
7924 
7925   return FALSE;
7926 }
7927 #endif
7928 
7929 
7930 /* Initialise maps of ARM/Thumb/data for input BFDs.  */
7931 
7932 void
7933 bfd_elf32_arm_init_maps (bfd *abfd)
7934 {
7935   Elf_Internal_Sym *isymbuf;
7936   Elf_Internal_Shdr *hdr;
7937   unsigned int i, localsyms;
7938 
7939   /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
7940   if (! is_arm_elf (abfd))
7941     return;
7942 
7943   if ((abfd->flags & DYNAMIC) != 0)
7944     return;
7945 
7946   hdr = & elf_symtab_hdr (abfd);
7947   localsyms = hdr->sh_info;
7948 
7949   /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
7950      should contain the number of local symbols, which should come before any
7951      global symbols.  Mapping symbols are always local.  */
7952   isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
7953 				  NULL);
7954 
7955   /* No internal symbols read?  Skip this BFD.  */
7956   if (isymbuf == NULL)
7957     return;
7958 
7959   for (i = 0; i < localsyms; i++)
7960     {
7961       Elf_Internal_Sym *isym = &isymbuf[i];
7962       asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
7963       const char *name;
7964 
7965       if (sec != NULL
7966 	  && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
7967 	{
7968 	  name = bfd_elf_string_from_elf_section (abfd,
7969 	    hdr->sh_link, isym->st_name);
7970 
7971 	  if (bfd_is_arm_special_symbol_name (name,
7972 					      BFD_ARM_SPECIAL_SYM_TYPE_MAP))
7973 	    elf32_arm_section_map_add (sec, name[1], isym->st_value);
7974 	}
7975     }
7976 }
7977 
7978 
7979 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
7980    say what they wanted.  */
7981 
7982 void
7983 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
7984 {
7985   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7986   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
7987 
7988   if (globals == NULL)
7989     return;
7990 
7991   if (globals->fix_cortex_a8 == -1)
7992     {
7993       /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
7994       if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
7995 	  && (out_attr[Tag_CPU_arch_profile].i == 'A'
7996 	      || out_attr[Tag_CPU_arch_profile].i == 0))
7997 	globals->fix_cortex_a8 = 1;
7998       else
7999 	globals->fix_cortex_a8 = 0;
8000     }
8001 }
8002 
8003 
8004 void
8005 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8006 {
8007   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8008   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8009 
8010   if (globals == NULL)
8011     return;
8012   /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
8013   if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8014     {
8015       switch (globals->vfp11_fix)
8016 	{
8017 	case BFD_ARM_VFP11_FIX_DEFAULT:
8018 	case BFD_ARM_VFP11_FIX_NONE:
8019 	  globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8020 	  break;
8021 
8022 	default:
8023 	  /* Give a warning, but do as the user requests anyway.  */
8024 	  _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8025 	    "workaround is not necessary for target architecture"), obfd);
8026 	}
8027     }
8028   else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8029     /* For earlier architectures, we might need the workaround, but do not
8030        enable it by default.  If users is running with broken hardware, they
8031        must enable the erratum fix explicitly.  */
8032     globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8033 }
8034 
8035 void
8036 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8037 {
8038   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8039   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8040 
8041   if (globals == NULL)
8042     return;
8043 
8044   /* We assume only Cortex-M4 may require the fix.  */
8045   if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8046       || out_attr[Tag_CPU_arch_profile].i != 'M')
8047     {
8048       if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8049 	/* Give a warning, but do as the user requests anyway.  */
8050 	_bfd_error_handler
8051 	  (_("%pB: warning: selected STM32L4XX erratum "
8052 	     "workaround is not necessary for target architecture"), obfd);
8053     }
8054 }
8055 
8056 enum bfd_arm_vfp11_pipe
8057 {
8058   VFP11_FMAC,
8059   VFP11_LS,
8060   VFP11_DS,
8061   VFP11_BAD
8062 };
8063 
8064 /* Return a VFP register number.  This is encoded as RX:X for single-precision
8065    registers, or X:RX for double-precision registers, where RX is the group of
8066    four bits in the instruction encoding and X is the single extension bit.
8067    RX and X fields are specified using their lowest (starting) bit.  The return
8068    value is:
8069 
8070      0...31: single-precision registers s0...s31
8071      32...63: double-precision registers d0...d31.
8072 
8073    Although X should be zero for VFP11 (encoding d0...d15 only), we might
8074    encounter VFP3 instructions, so we allow the full range for DP registers.  */
8075 
8076 static unsigned int
8077 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8078 		     unsigned int x)
8079 {
8080   if (is_double)
8081     return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8082   else
8083     return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8084 }
8085 
8086 /* Set bits in *WMASK according to a register number REG as encoded by
8087    bfd_arm_vfp11_regno().  Ignore d16-d31.  */
8088 
8089 static void
8090 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8091 {
8092   if (reg < 32)
8093     *wmask |= 1 << reg;
8094   else if (reg < 48)
8095     *wmask |= 3 << ((reg - 32) * 2);
8096 }
8097 
8098 /* Return TRUE if WMASK overwrites anything in REGS.  */
8099 
8100 static bfd_boolean
8101 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8102 {
8103   int i;
8104 
8105   for (i = 0; i < numregs; i++)
8106     {
8107       unsigned int reg = regs[i];
8108 
8109       if (reg < 32 && (wmask & (1 << reg)) != 0)
8110 	return TRUE;
8111 
8112       reg -= 32;
8113 
8114       if (reg >= 16)
8115 	continue;
8116 
8117       if ((wmask & (3 << (reg * 2))) != 0)
8118 	return TRUE;
8119     }
8120 
8121   return FALSE;
8122 }
8123 
8124 /* In this function, we're interested in two things: finding input registers
8125    for VFP data-processing instructions, and finding the set of registers which
8126    arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
8127    hold the written set, so FLDM etc. are easy to deal with (we're only
8128    interested in 32 SP registers or 16 dp registers, due to the VFP version
8129    implemented by the chip in question).  DP registers are marked by setting
8130    both SP registers in the write mask).  */
8131 
8132 static enum bfd_arm_vfp11_pipe
8133 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8134 			   int *numregs)
8135 {
8136   enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8137   bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8138 
8139   if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
8140     {
8141       unsigned int pqrs;
8142       unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8143       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8144 
8145       pqrs = ((insn & 0x00800000) >> 20)
8146 	   | ((insn & 0x00300000) >> 19)
8147 	   | ((insn & 0x00000040) >> 6);
8148 
8149       switch (pqrs)
8150 	{
8151 	case 0: /* fmac[sd].  */
8152 	case 1: /* fnmac[sd].  */
8153 	case 2: /* fmsc[sd].  */
8154 	case 3: /* fnmsc[sd].  */
8155 	  vpipe = VFP11_FMAC;
8156 	  bfd_arm_vfp11_write_mask (destmask, fd);
8157 	  regs[0] = fd;
8158 	  regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
8159 	  regs[2] = fm;
8160 	  *numregs = 3;
8161 	  break;
8162 
8163 	case 4: /* fmul[sd].  */
8164 	case 5: /* fnmul[sd].  */
8165 	case 6: /* fadd[sd].  */
8166 	case 7: /* fsub[sd].  */
8167 	  vpipe = VFP11_FMAC;
8168 	  goto vfp_binop;
8169 
8170 	case 8: /* fdiv[sd].  */
8171 	  vpipe = VFP11_DS;
8172 	  vfp_binop:
8173 	  bfd_arm_vfp11_write_mask (destmask, fd);
8174 	  regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
8175 	  regs[1] = fm;
8176 	  *numregs = 2;
8177 	  break;
8178 
8179 	case 15: /* extended opcode.  */
8180 	  {
8181 	    unsigned int extn = ((insn >> 15) & 0x1e)
8182 			      | ((insn >> 7) & 1);
8183 
8184 	    switch (extn)
8185 	      {
8186 	      case 0: /* fcpy[sd].  */
8187 	      case 1: /* fabs[sd].  */
8188 	      case 2: /* fneg[sd].  */
8189 	      case 8: /* fcmp[sd].  */
8190 	      case 9: /* fcmpe[sd].  */
8191 	      case 10: /* fcmpz[sd].  */
8192 	      case 11: /* fcmpez[sd].  */
8193 	      case 16: /* fuito[sd].  */
8194 	      case 17: /* fsito[sd].  */
8195 	      case 24: /* ftoui[sd].  */
8196 	      case 25: /* ftouiz[sd].  */
8197 	      case 26: /* ftosi[sd].  */
8198 	      case 27: /* ftosiz[sd].  */
8199 		/* These instructions will not bounce due to underflow.  */
8200 		*numregs = 0;
8201 		vpipe = VFP11_FMAC;
8202 		break;
8203 
8204 	      case 3: /* fsqrt[sd].  */
8205 		/* fsqrt cannot underflow, but it can (perhaps) overwrite
8206 		   registers to cause the erratum in previous instructions.  */
8207 		bfd_arm_vfp11_write_mask (destmask, fd);
8208 		vpipe = VFP11_DS;
8209 		break;
8210 
8211 	      case 15: /* fcvt{ds,sd}.  */
8212 		{
8213 		  int rnum = 0;
8214 
8215 		  bfd_arm_vfp11_write_mask (destmask, fd);
8216 
8217 		  /* Only FCVTSD can underflow.  */
8218 		  if ((insn & 0x100) != 0)
8219 		    regs[rnum++] = fm;
8220 
8221 		  *numregs = rnum;
8222 
8223 		  vpipe = VFP11_FMAC;
8224 		}
8225 		break;
8226 
8227 	      default:
8228 		return VFP11_BAD;
8229 	      }
8230 	  }
8231 	  break;
8232 
8233 	default:
8234 	  return VFP11_BAD;
8235 	}
8236     }
8237   /* Two-register transfer.  */
8238   else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8239     {
8240       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8241 
8242       if ((insn & 0x100000) == 0)
8243 	{
8244 	  if (is_double)
8245 	    bfd_arm_vfp11_write_mask (destmask, fm);
8246 	  else
8247 	    {
8248 	      bfd_arm_vfp11_write_mask (destmask, fm);
8249 	      bfd_arm_vfp11_write_mask (destmask, fm + 1);
8250 	    }
8251 	}
8252 
8253       vpipe = VFP11_LS;
8254     }
8255   else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
8256     {
8257       int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8258       unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8259 
8260       switch (puw)
8261 	{
8262 	case 0: /* Two-reg transfer.  We should catch these above.  */
8263 	  abort ();
8264 
8265 	case 2: /* fldm[sdx].  */
8266 	case 3:
8267 	case 5:
8268 	  {
8269 	    unsigned int i, offset = insn & 0xff;
8270 
8271 	    if (is_double)
8272 	      offset >>= 1;
8273 
8274 	    for (i = fd; i < fd + offset; i++)
8275 	      bfd_arm_vfp11_write_mask (destmask, i);
8276 	  }
8277 	  break;
8278 
8279 	case 4: /* fld[sd].  */
8280 	case 6:
8281 	  bfd_arm_vfp11_write_mask (destmask, fd);
8282 	  break;
8283 
8284 	default:
8285 	  return VFP11_BAD;
8286 	}
8287 
8288       vpipe = VFP11_LS;
8289     }
8290   /* Single-register transfer. Note L==0.  */
8291   else if ((insn & 0x0f100e10) == 0x0e000a10)
8292     {
8293       unsigned int opcode = (insn >> 21) & 7;
8294       unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8295 
8296       switch (opcode)
8297 	{
8298 	case 0: /* fmsr/fmdlr.  */
8299 	case 1: /* fmdhr.  */
8300 	  /* Mark fmdhr and fmdlr as writing to the whole of the DP
8301 	     destination register.  I don't know if this is exactly right,
8302 	     but it is the conservative choice.  */
8303 	  bfd_arm_vfp11_write_mask (destmask, fn);
8304 	  break;
8305 
8306 	case 7: /* fmxr.  */
8307 	  break;
8308 	}
8309 
8310       vpipe = VFP11_LS;
8311     }
8312 
8313   return vpipe;
8314 }
8315 
8316 
8317 static int elf32_arm_compare_mapping (const void * a, const void * b);
8318 
8319 
8320 /* Look for potentially-troublesome code sequences which might trigger the
8321    VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
8322    (available from ARM) for details of the erratum.  A short version is
8323    described in ld.texinfo.  */
8324 
8325 bfd_boolean
8326 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8327 {
8328   asection *sec;
8329   bfd_byte *contents = NULL;
8330   int state = 0;
8331   int regs[3], numregs = 0;
8332   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8333   int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8334 
8335   if (globals == NULL)
8336     return FALSE;
8337 
8338   /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8339      The states transition as follows:
8340 
8341        0 -> 1 (vector) or 0 -> 2 (scalar)
8342 	   A VFP FMAC-pipeline instruction has been seen. Fill
8343 	   regs[0]..regs[numregs-1] with its input operands. Remember this
8344 	   instruction in 'first_fmac'.
8345 
8346        1 -> 2
8347 	   Any instruction, except for a VFP instruction which overwrites
8348 	   regs[*].
8349 
8350        1 -> 3 [ -> 0 ]  or
8351        2 -> 3 [ -> 0 ]
8352 	   A VFP instruction has been seen which overwrites any of regs[*].
8353 	   We must make a veneer!  Reset state to 0 before examining next
8354 	   instruction.
8355 
8356        2 -> 0
8357 	   If we fail to match anything in state 2, reset to state 0 and reset
8358 	   the instruction pointer to the instruction after 'first_fmac'.
8359 
8360      If the VFP11 vector mode is in use, there must be at least two unrelated
8361      instructions between anti-dependent VFP11 instructions to properly avoid
8362      triggering the erratum, hence the use of the extra state 1.  */
8363 
8364   /* If we are only performing a partial link do not bother
8365      to construct any glue.  */
8366   if (bfd_link_relocatable (link_info))
8367     return TRUE;
8368 
8369   /* Skip if this bfd does not correspond to an ELF image.  */
8370   if (! is_arm_elf (abfd))
8371     return TRUE;
8372 
8373   /* We should have chosen a fix type by the time we get here.  */
8374   BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8375 
8376   if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8377     return TRUE;
8378 
8379   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8380   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8381     return TRUE;
8382 
8383   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8384     {
8385       unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8386       struct _arm_elf_section_data *sec_data;
8387 
8388       /* If we don't have executable progbits, we're not interested in this
8389 	 section.  Also skip if section is to be excluded.  */
8390       if (elf_section_type (sec) != SHT_PROGBITS
8391 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8392 	  || (sec->flags & SEC_EXCLUDE) != 0
8393 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8394 	  || sec->output_section == bfd_abs_section_ptr
8395 	  || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8396 	continue;
8397 
8398       sec_data = elf32_arm_section_data (sec);
8399 
8400       if (sec_data->mapcount == 0)
8401 	continue;
8402 
8403       if (elf_section_data (sec)->this_hdr.contents != NULL)
8404 	contents = elf_section_data (sec)->this_hdr.contents;
8405       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8406 	goto error_return;
8407 
8408       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8409 	     elf32_arm_compare_mapping);
8410 
8411       for (span = 0; span < sec_data->mapcount; span++)
8412 	{
8413 	  unsigned int span_start = sec_data->map[span].vma;
8414 	  unsigned int span_end = (span == sec_data->mapcount - 1)
8415 				  ? sec->size : sec_data->map[span + 1].vma;
8416 	  char span_type = sec_data->map[span].type;
8417 
8418 	  /* FIXME: Only ARM mode is supported at present.  We may need to
8419 	     support Thumb-2 mode also at some point.  */
8420 	  if (span_type != 'a')
8421 	    continue;
8422 
8423 	  for (i = span_start; i < span_end;)
8424 	    {
8425 	      unsigned int next_i = i + 4;
8426 	      unsigned int insn = bfd_big_endian (abfd)
8427 		? (contents[i] << 24)
8428 		  | (contents[i + 1] << 16)
8429 		  | (contents[i + 2] << 8)
8430 		  | contents[i + 3]
8431 		: (contents[i + 3] << 24)
8432 		  | (contents[i + 2] << 16)
8433 		  | (contents[i + 1] << 8)
8434 		  | contents[i];
8435 	      unsigned int writemask = 0;
8436 	      enum bfd_arm_vfp11_pipe vpipe;
8437 
8438 	      switch (state)
8439 		{
8440 		case 0:
8441 		  vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8442 						    &numregs);
8443 		  /* I'm assuming the VFP11 erratum can trigger with denorm
8444 		     operands on either the FMAC or the DS pipeline. This might
8445 		     lead to slightly overenthusiastic veneer insertion.  */
8446 		  if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8447 		    {
8448 		      state = use_vector ? 1 : 2;
8449 		      first_fmac = i;
8450 		      veneer_of_insn = insn;
8451 		    }
8452 		  break;
8453 
8454 		case 1:
8455 		  {
8456 		    int other_regs[3], other_numregs;
8457 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8458 						      other_regs,
8459 						      &other_numregs);
8460 		    if (vpipe != VFP11_BAD
8461 			&& bfd_arm_vfp11_antidependency (writemask, regs,
8462 							 numregs))
8463 		      state = 3;
8464 		    else
8465 		      state = 2;
8466 		  }
8467 		  break;
8468 
8469 		case 2:
8470 		  {
8471 		    int other_regs[3], other_numregs;
8472 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8473 						      other_regs,
8474 						      &other_numregs);
8475 		    if (vpipe != VFP11_BAD
8476 			&& bfd_arm_vfp11_antidependency (writemask, regs,
8477 							 numregs))
8478 		      state = 3;
8479 		    else
8480 		      {
8481 			state = 0;
8482 			next_i = first_fmac + 4;
8483 		      }
8484 		  }
8485 		  break;
8486 
8487 		case 3:
8488 		  abort ();  /* Should be unreachable.  */
8489 		}
8490 
8491 	      if (state == 3)
8492 		{
8493 		  elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8494 		      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8495 
8496 		  elf32_arm_section_data (sec)->erratumcount += 1;
8497 
8498 		  newerr->u.b.vfp_insn = veneer_of_insn;
8499 
8500 		  switch (span_type)
8501 		    {
8502 		    case 'a':
8503 		      newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8504 		      break;
8505 
8506 		    default:
8507 		      abort ();
8508 		    }
8509 
8510 		  record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8511 					       first_fmac);
8512 
8513 		  newerr->vma = -1;
8514 
8515 		  newerr->next = sec_data->erratumlist;
8516 		  sec_data->erratumlist = newerr;
8517 
8518 		  state = 0;
8519 		}
8520 
8521 	      i = next_i;
8522 	    }
8523 	}
8524 
8525       if (contents != NULL
8526 	  && elf_section_data (sec)->this_hdr.contents != contents)
8527 	free (contents);
8528       contents = NULL;
8529     }
8530 
8531   return TRUE;
8532 
8533 error_return:
8534   if (contents != NULL
8535       && elf_section_data (sec)->this_hdr.contents != contents)
8536     free (contents);
8537 
8538   return FALSE;
8539 }
8540 
8541 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8542    after sections have been laid out, using specially-named symbols.  */
8543 
8544 void
8545 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8546 					  struct bfd_link_info *link_info)
8547 {
8548   asection *sec;
8549   struct elf32_arm_link_hash_table *globals;
8550   char *tmp_name;
8551 
8552   if (bfd_link_relocatable (link_info))
8553     return;
8554 
8555   /* Skip if this bfd does not correspond to an ELF image.  */
8556   if (! is_arm_elf (abfd))
8557     return;
8558 
8559   globals = elf32_arm_hash_table (link_info);
8560   if (globals == NULL)
8561     return;
8562 
8563   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8564 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8565 
8566   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8567     {
8568       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8569       elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8570 
8571       for (; errnode != NULL; errnode = errnode->next)
8572 	{
8573 	  struct elf_link_hash_entry *myh;
8574 	  bfd_vma vma;
8575 
8576 	  switch (errnode->type)
8577 	    {
8578 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8579 	    case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8580 	      /* Find veneer symbol.  */
8581 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8582 		       errnode->u.b.veneer->u.v.id);
8583 
8584 	      myh = elf_link_hash_lookup
8585 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8586 
8587 	      if (myh == NULL)
8588 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8589 				    abfd, "VFP11", tmp_name);
8590 
8591 	      vma = myh->root.u.def.section->output_section->vma
8592 		    + myh->root.u.def.section->output_offset
8593 		    + myh->root.u.def.value;
8594 
8595 	      errnode->u.b.veneer->vma = vma;
8596 	      break;
8597 
8598 	    case VFP11_ERRATUM_ARM_VENEER:
8599 	    case VFP11_ERRATUM_THUMB_VENEER:
8600 	      /* Find return location.  */
8601 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8602 		       errnode->u.v.id);
8603 
8604 	      myh = elf_link_hash_lookup
8605 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8606 
8607 	      if (myh == NULL)
8608 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8609 				    abfd, "VFP11", tmp_name);
8610 
8611 	      vma = myh->root.u.def.section->output_section->vma
8612 		    + myh->root.u.def.section->output_offset
8613 		    + myh->root.u.def.value;
8614 
8615 	      errnode->u.v.branch->vma = vma;
8616 	      break;
8617 
8618 	    default:
8619 	      abort ();
8620 	    }
8621 	}
8622     }
8623 
8624   free (tmp_name);
8625 }
8626 
8627 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8628    return locations after sections have been laid out, using
8629    specially-named symbols.  */
8630 
8631 void
8632 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8633 					      struct bfd_link_info *link_info)
8634 {
8635   asection *sec;
8636   struct elf32_arm_link_hash_table *globals;
8637   char *tmp_name;
8638 
8639   if (bfd_link_relocatable (link_info))
8640     return;
8641 
8642   /* Skip if this bfd does not correspond to an ELF image.  */
8643   if (! is_arm_elf (abfd))
8644     return;
8645 
8646   globals = elf32_arm_hash_table (link_info);
8647   if (globals == NULL)
8648     return;
8649 
8650   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8651 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8652 
8653   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8654     {
8655       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8656       elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8657 
8658       for (; errnode != NULL; errnode = errnode->next)
8659 	{
8660 	  struct elf_link_hash_entry *myh;
8661 	  bfd_vma vma;
8662 
8663 	  switch (errnode->type)
8664 	    {
8665 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8666 	      /* Find veneer symbol.  */
8667 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8668 		       errnode->u.b.veneer->u.v.id);
8669 
8670 	      myh = elf_link_hash_lookup
8671 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8672 
8673 	      if (myh == NULL)
8674 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8675 				    abfd, "STM32L4XX", tmp_name);
8676 
8677 	      vma = myh->root.u.def.section->output_section->vma
8678 		+ myh->root.u.def.section->output_offset
8679 		+ myh->root.u.def.value;
8680 
8681 	      errnode->u.b.veneer->vma = vma;
8682 	      break;
8683 
8684 	    case STM32L4XX_ERRATUM_VENEER:
8685 	      /* Find return location.  */
8686 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8687 		       errnode->u.v.id);
8688 
8689 	      myh = elf_link_hash_lookup
8690 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8691 
8692 	      if (myh == NULL)
8693 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8694 				    abfd, "STM32L4XX", tmp_name);
8695 
8696 	      vma = myh->root.u.def.section->output_section->vma
8697 		+ myh->root.u.def.section->output_offset
8698 		+ myh->root.u.def.value;
8699 
8700 	      errnode->u.v.branch->vma = vma;
8701 	      break;
8702 
8703 	    default:
8704 	      abort ();
8705 	    }
8706 	}
8707     }
8708 
8709   free (tmp_name);
8710 }
8711 
8712 static inline bfd_boolean
8713 is_thumb2_ldmia (const insn32 insn)
8714 {
8715   /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8716      1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
8717   return (insn & 0xffd02000) == 0xe8900000;
8718 }
8719 
8720 static inline bfd_boolean
8721 is_thumb2_ldmdb (const insn32 insn)
8722 {
8723   /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8724      1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
8725   return (insn & 0xffd02000) == 0xe9100000;
8726 }
8727 
8728 static inline bfd_boolean
8729 is_thumb2_vldm (const insn32 insn)
8730 {
8731   /* A6.5 Extension register load or store instruction
8732      A7.7.229
8733      We look for SP 32-bit and DP 64-bit registers.
8734      Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8735      <list> is consecutive 64-bit registers
8736      1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8737      Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8738      <list> is consecutive 32-bit registers
8739      1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8740      if P==0 && U==1 && W==1 && Rn=1101 VPOP
8741      if PUW=010 || PUW=011 || PUW=101 VLDM.  */
8742   return
8743     (((insn & 0xfe100f00) == 0xec100b00) ||
8744      ((insn & 0xfe100f00) == 0xec100a00))
8745     && /* (IA without !).  */
8746     (((((insn << 7) >> 28) & 0xd) == 0x4)
8747      /* (IA with !), includes VPOP (when reg number is SP).  */
8748      || ((((insn << 7) >> 28) & 0xd) == 0x5)
8749      /* (DB with !).  */
8750      || ((((insn << 7) >> 28) & 0xd) == 0x9));
8751 }
8752 
8753 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8754    VLDM opcode and:
8755  - computes the number and the mode of memory accesses
8756  - decides if the replacement should be done:
8757    . replaces only if > 8-word accesses
8758    . or (testing purposes only) replaces all accesses.  */
8759 
8760 static bfd_boolean
8761 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8762 				      bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8763 {
8764   int nb_words = 0;
8765 
8766   /* The field encoding the register list is the same for both LDMIA
8767      and LDMDB encodings.  */
8768   if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8769     nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8770   else if (is_thumb2_vldm (insn))
8771    nb_words = (insn & 0xff);
8772 
8773   /* DEFAULT mode accounts for the real bug condition situation,
8774      ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
8775   return
8776     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8777     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8778 }
8779 
8780 /* Look for potentially-troublesome code sequences which might trigger
8781    the STM STM32L4XX erratum.  */
8782 
8783 bfd_boolean
8784 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8785 				      struct bfd_link_info *link_info)
8786 {
8787   asection *sec;
8788   bfd_byte *contents = NULL;
8789   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8790 
8791   if (globals == NULL)
8792     return FALSE;
8793 
8794   /* If we are only performing a partial link do not bother
8795      to construct any glue.  */
8796   if (bfd_link_relocatable (link_info))
8797     return TRUE;
8798 
8799   /* Skip if this bfd does not correspond to an ELF image.  */
8800   if (! is_arm_elf (abfd))
8801     return TRUE;
8802 
8803   if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8804     return TRUE;
8805 
8806   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8807   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8808     return TRUE;
8809 
8810   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8811     {
8812       unsigned int i, span;
8813       struct _arm_elf_section_data *sec_data;
8814 
8815       /* If we don't have executable progbits, we're not interested in this
8816 	 section.  Also skip if section is to be excluded.  */
8817       if (elf_section_type (sec) != SHT_PROGBITS
8818 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8819 	  || (sec->flags & SEC_EXCLUDE) != 0
8820 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8821 	  || sec->output_section == bfd_abs_section_ptr
8822 	  || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8823 	continue;
8824 
8825       sec_data = elf32_arm_section_data (sec);
8826 
8827       if (sec_data->mapcount == 0)
8828 	continue;
8829 
8830       if (elf_section_data (sec)->this_hdr.contents != NULL)
8831 	contents = elf_section_data (sec)->this_hdr.contents;
8832       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8833 	goto error_return;
8834 
8835       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8836 	     elf32_arm_compare_mapping);
8837 
8838       for (span = 0; span < sec_data->mapcount; span++)
8839 	{
8840 	  unsigned int span_start = sec_data->map[span].vma;
8841 	  unsigned int span_end = (span == sec_data->mapcount - 1)
8842 	    ? sec->size : sec_data->map[span + 1].vma;
8843 	  char span_type = sec_data->map[span].type;
8844 	  int itblock_current_pos = 0;
8845 
8846 	  /* Only Thumb2 mode need be supported with this CM4 specific
8847 	     code, we should not encounter any arm mode eg span_type
8848 	     != 'a'.  */
8849 	  if (span_type != 't')
8850 	    continue;
8851 
8852 	  for (i = span_start; i < span_end;)
8853 	    {
8854 	      unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8855 	      bfd_boolean insn_32bit = FALSE;
8856 	      bfd_boolean is_ldm = FALSE;
8857 	      bfd_boolean is_vldm = FALSE;
8858 	      bfd_boolean is_not_last_in_it_block = FALSE;
8859 
8860 	      /* The first 16-bits of all 32-bit thumb2 instructions start
8861 		 with opcode[15..13]=0b111 and the encoded op1 can be anything
8862 		 except opcode[12..11]!=0b00.
8863 		 See 32-bit Thumb instruction encoding.  */
8864 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8865 		insn_32bit = TRUE;
8866 
8867 	      /* Compute the predicate that tells if the instruction
8868 		 is concerned by the IT block
8869 		 - Creates an error if there is a ldm that is not
8870 		   last in the IT block thus cannot be replaced
8871 		 - Otherwise we can create a branch at the end of the
8872 		   IT block, it will be controlled naturally by IT
8873 		   with the proper pseudo-predicate
8874 		 - So the only interesting predicate is the one that
8875 		   tells that we are not on the last item of an IT
8876 		   block.  */
8877 	      if (itblock_current_pos != 0)
8878 		  is_not_last_in_it_block = !!--itblock_current_pos;
8879 
8880 	      if (insn_32bit)
8881 		{
8882 		  /* Load the rest of the insn (in manual-friendly order).  */
8883 		  insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8884 		  is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8885 		  is_vldm = is_thumb2_vldm (insn);
8886 
8887 		  /* Veneers are created for (v)ldm depending on
8888 		     option flags and memory accesses conditions; but
8889 		     if the instruction is not the last instruction of
8890 		     an IT block, we cannot create a jump there, so we
8891 		     bail out.  */
8892 		    if ((is_ldm || is_vldm)
8893 			&& stm32l4xx_need_create_replacing_stub
8894 			(insn, globals->stm32l4xx_fix))
8895 		      {
8896 			if (is_not_last_in_it_block)
8897 			  {
8898 			    _bfd_error_handler
8899 			      /* xgettext:c-format */
8900 			      (_("%pB(%pA+%#x): error: multiple load detected"
8901 				 " in non-last IT block instruction:"
8902 				 " STM32L4XX veneer cannot be generated; "
8903 				 "use gcc option -mrestrict-it to generate"
8904 				 " only one instruction per IT block"),
8905 			       abfd, sec, i);
8906 			  }
8907 			else
8908 			  {
8909 			    elf32_stm32l4xx_erratum_list *newerr =
8910 			      (elf32_stm32l4xx_erratum_list *)
8911 			      bfd_zmalloc
8912 			      (sizeof (elf32_stm32l4xx_erratum_list));
8913 
8914 			    elf32_arm_section_data (sec)
8915 			      ->stm32l4xx_erratumcount += 1;
8916 			    newerr->u.b.insn = insn;
8917 			    /* We create only thumb branches.  */
8918 			    newerr->type =
8919 			      STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
8920 			    record_stm32l4xx_erratum_veneer
8921 			      (link_info, newerr, abfd, sec,
8922 			       i,
8923 			       is_ldm ?
8924 			       STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
8925 			       STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
8926 			    newerr->vma = -1;
8927 			    newerr->next = sec_data->stm32l4xx_erratumlist;
8928 			    sec_data->stm32l4xx_erratumlist = newerr;
8929 			  }
8930 		      }
8931 		}
8932 	      else
8933 		{
8934 		  /* A7.7.37 IT p208
8935 		     IT blocks are only encoded in T1
8936 		     Encoding T1: IT{x{y{z}}} <firstcond>
8937 		     1 0 1 1 - 1 1 1 1 - firstcond - mask
8938 		     if mask = '0000' then see 'related encodings'
8939 		     We don't deal with UNPREDICTABLE, just ignore these.
8940 		     There can be no nested IT blocks so an IT block
8941 		     is naturally a new one for which it is worth
8942 		     computing its size.  */
8943 		  bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
8944 		    && ((insn & 0x000f) != 0x0000);
8945 		  /* If we have a new IT block we compute its size.  */
8946 		  if (is_newitblock)
8947 		    {
8948 		      /* Compute the number of instructions controlled
8949 			 by the IT block, it will be used to decide
8950 			 whether we are inside an IT block or not.  */
8951 		      unsigned int mask = insn & 0x000f;
8952 		      itblock_current_pos = 4 - ctz (mask);
8953 		    }
8954 		}
8955 
8956 	      i += insn_32bit ? 4 : 2;
8957 	    }
8958 	}
8959 
8960       if (contents != NULL
8961 	  && elf_section_data (sec)->this_hdr.contents != contents)
8962 	free (contents);
8963       contents = NULL;
8964     }
8965 
8966   return TRUE;
8967 
8968 error_return:
8969   if (contents != NULL
8970       && elf_section_data (sec)->this_hdr.contents != contents)
8971     free (contents);
8972 
8973   return FALSE;
8974 }
8975 
8976 /* Set target relocation values needed during linking.  */
8977 
8978 void
8979 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
8980 				 struct bfd_link_info *link_info,
8981 				 struct elf32_arm_params *params)
8982 {
8983   struct elf32_arm_link_hash_table *globals;
8984 
8985   globals = elf32_arm_hash_table (link_info);
8986   if (globals == NULL)
8987     return;
8988 
8989   globals->target1_is_rel = params->target1_is_rel;
8990   if (globals->fdpic_p)
8991     globals->target2_reloc = R_ARM_GOT32;
8992   else if (strcmp (params->target2_type, "rel") == 0)
8993     globals->target2_reloc = R_ARM_REL32;
8994   else if (strcmp (params->target2_type, "abs") == 0)
8995     globals->target2_reloc = R_ARM_ABS32;
8996   else if (strcmp (params->target2_type, "got-rel") == 0)
8997     globals->target2_reloc = R_ARM_GOT_PREL;
8998   else
8999     {
9000       _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9001 			  params->target2_type);
9002     }
9003   globals->fix_v4bx = params->fix_v4bx;
9004   globals->use_blx |= params->use_blx;
9005   globals->vfp11_fix = params->vfp11_denorm_fix;
9006   globals->stm32l4xx_fix = params->stm32l4xx_fix;
9007   if (globals->fdpic_p)
9008     globals->pic_veneer = 1;
9009   else
9010     globals->pic_veneer = params->pic_veneer;
9011   globals->fix_cortex_a8 = params->fix_cortex_a8;
9012   globals->fix_arm1176 = params->fix_arm1176;
9013   globals->cmse_implib = params->cmse_implib;
9014   globals->in_implib_bfd = params->in_implib_bfd;
9015 
9016   BFD_ASSERT (is_arm_elf (output_bfd));
9017   elf_arm_tdata (output_bfd)->no_enum_size_warning
9018     = params->no_enum_size_warning;
9019   elf_arm_tdata (output_bfd)->no_wchar_size_warning
9020     = params->no_wchar_size_warning;
9021 }
9022 
9023 /* Replace the target offset of a Thumb bl or b.w instruction.  */
9024 
9025 static void
9026 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9027 {
9028   bfd_vma upper;
9029   bfd_vma lower;
9030   int reloc_sign;
9031 
9032   BFD_ASSERT ((offset & 1) == 0);
9033 
9034   upper = bfd_get_16 (abfd, insn);
9035   lower = bfd_get_16 (abfd, insn + 2);
9036   reloc_sign = (offset < 0) ? 1 : 0;
9037   upper = (upper & ~(bfd_vma) 0x7ff)
9038 	  | ((offset >> 12) & 0x3ff)
9039 	  | (reloc_sign << 10);
9040   lower = (lower & ~(bfd_vma) 0x2fff)
9041 	  | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9042 	  | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9043 	  | ((offset >> 1) & 0x7ff);
9044   bfd_put_16 (abfd, upper, insn);
9045   bfd_put_16 (abfd, lower, insn + 2);
9046 }
9047 
9048 /* Thumb code calling an ARM function.  */
9049 
9050 static int
9051 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9052 			 const char *		name,
9053 			 bfd *			input_bfd,
9054 			 bfd *			output_bfd,
9055 			 asection *		input_section,
9056 			 bfd_byte *		hit_data,
9057 			 asection *		sym_sec,
9058 			 bfd_vma		offset,
9059 			 bfd_signed_vma		addend,
9060 			 bfd_vma		val,
9061 			 char **error_message)
9062 {
9063   asection * s = 0;
9064   bfd_vma my_offset;
9065   long int ret_offset;
9066   struct elf_link_hash_entry * myh;
9067   struct elf32_arm_link_hash_table * globals;
9068 
9069   myh = find_thumb_glue (info, name, error_message);
9070   if (myh == NULL)
9071     return FALSE;
9072 
9073   globals = elf32_arm_hash_table (info);
9074   BFD_ASSERT (globals != NULL);
9075   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9076 
9077   my_offset = myh->root.u.def.value;
9078 
9079   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9080 			      THUMB2ARM_GLUE_SECTION_NAME);
9081 
9082   BFD_ASSERT (s != NULL);
9083   BFD_ASSERT (s->contents != NULL);
9084   BFD_ASSERT (s->output_section != NULL);
9085 
9086   if ((my_offset & 0x01) == 0x01)
9087     {
9088       if (sym_sec != NULL
9089 	  && sym_sec->owner != NULL
9090 	  && !INTERWORK_FLAG (sym_sec->owner))
9091 	{
9092 	  _bfd_error_handler
9093 	    (_("%pB(%s): warning: interworking not enabled;"
9094 	       " first occurrence: %pB: %s call to %s"),
9095 	     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9096 
9097 	  return FALSE;
9098 	}
9099 
9100       --my_offset;
9101       myh->root.u.def.value = my_offset;
9102 
9103       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9104 		      s->contents + my_offset);
9105 
9106       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9107 		      s->contents + my_offset + 2);
9108 
9109       ret_offset =
9110 	/* Address of destination of the stub.  */
9111 	((bfd_signed_vma) val)
9112 	- ((bfd_signed_vma)
9113 	   /* Offset from the start of the current section
9114 	      to the start of the stubs.  */
9115 	   (s->output_offset
9116 	    /* Offset of the start of this stub from the start of the stubs.  */
9117 	    + my_offset
9118 	    /* Address of the start of the current section.  */
9119 	    + s->output_section->vma)
9120 	   /* The branch instruction is 4 bytes into the stub.  */
9121 	   + 4
9122 	   /* ARM branches work from the pc of the instruction + 8.  */
9123 	   + 8);
9124 
9125       put_arm_insn (globals, output_bfd,
9126 		    (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9127 		    s->contents + my_offset + 4);
9128     }
9129 
9130   BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9131 
9132   /* Now go back and fix up the original BL insn to point to here.  */
9133   ret_offset =
9134     /* Address of where the stub is located.  */
9135     (s->output_section->vma + s->output_offset + my_offset)
9136      /* Address of where the BL is located.  */
9137     - (input_section->output_section->vma + input_section->output_offset
9138        + offset)
9139     /* Addend in the relocation.  */
9140     - addend
9141     /* Biassing for PC-relative addressing.  */
9142     - 8;
9143 
9144   insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9145 
9146   return TRUE;
9147 }
9148 
9149 /* Populate an Arm to Thumb stub.  Returns the stub symbol.  */
9150 
9151 static struct elf_link_hash_entry *
9152 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9153 			     const char *	    name,
9154 			     bfd *		    input_bfd,
9155 			     bfd *		    output_bfd,
9156 			     asection *		    sym_sec,
9157 			     bfd_vma		    val,
9158 			     asection *		    s,
9159 			     char **		    error_message)
9160 {
9161   bfd_vma my_offset;
9162   long int ret_offset;
9163   struct elf_link_hash_entry * myh;
9164   struct elf32_arm_link_hash_table * globals;
9165 
9166   myh = find_arm_glue (info, name, error_message);
9167   if (myh == NULL)
9168     return NULL;
9169 
9170   globals = elf32_arm_hash_table (info);
9171   BFD_ASSERT (globals != NULL);
9172   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9173 
9174   my_offset = myh->root.u.def.value;
9175 
9176   if ((my_offset & 0x01) == 0x01)
9177     {
9178       if (sym_sec != NULL
9179 	  && sym_sec->owner != NULL
9180 	  && !INTERWORK_FLAG (sym_sec->owner))
9181 	{
9182 	  _bfd_error_handler
9183 	    (_("%pB(%s): warning: interworking not enabled;"
9184 	       " first occurrence: %pB: %s call to %s"),
9185 	     sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9186 	}
9187 
9188       --my_offset;
9189       myh->root.u.def.value = my_offset;
9190 
9191       if (bfd_link_pic (info)
9192 	  || globals->root.is_relocatable_executable
9193 	  || globals->pic_veneer)
9194 	{
9195 	  /* For relocatable objects we can't use absolute addresses,
9196 	     so construct the address from a relative offset.  */
9197 	  /* TODO: If the offset is small it's probably worth
9198 	     constructing the address with adds.  */
9199 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9200 			s->contents + my_offset);
9201 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9202 			s->contents + my_offset + 4);
9203 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9204 			s->contents + my_offset + 8);
9205 	  /* Adjust the offset by 4 for the position of the add,
9206 	     and 8 for the pipeline offset.  */
9207 	  ret_offset = (val - (s->output_offset
9208 			       + s->output_section->vma
9209 			       + my_offset + 12))
9210 		       | 1;
9211 	  bfd_put_32 (output_bfd, ret_offset,
9212 		      s->contents + my_offset + 12);
9213 	}
9214       else if (globals->use_blx)
9215 	{
9216 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9217 			s->contents + my_offset);
9218 
9219 	  /* It's a thumb address.  Add the low order bit.  */
9220 	  bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9221 		      s->contents + my_offset + 4);
9222 	}
9223       else
9224 	{
9225 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9226 			s->contents + my_offset);
9227 
9228 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9229 			s->contents + my_offset + 4);
9230 
9231 	  /* It's a thumb address.  Add the low order bit.  */
9232 	  bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9233 		      s->contents + my_offset + 8);
9234 
9235 	  my_offset += 12;
9236 	}
9237     }
9238 
9239   BFD_ASSERT (my_offset <= globals->arm_glue_size);
9240 
9241   return myh;
9242 }
9243 
9244 /* Arm code calling a Thumb function.  */
9245 
9246 static int
9247 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9248 			 const char *		name,
9249 			 bfd *			input_bfd,
9250 			 bfd *			output_bfd,
9251 			 asection *		input_section,
9252 			 bfd_byte *		hit_data,
9253 			 asection *		sym_sec,
9254 			 bfd_vma		offset,
9255 			 bfd_signed_vma		addend,
9256 			 bfd_vma		val,
9257 			 char **error_message)
9258 {
9259   unsigned long int tmp;
9260   bfd_vma my_offset;
9261   asection * s;
9262   long int ret_offset;
9263   struct elf_link_hash_entry * myh;
9264   struct elf32_arm_link_hash_table * globals;
9265 
9266   globals = elf32_arm_hash_table (info);
9267   BFD_ASSERT (globals != NULL);
9268   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9269 
9270   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9271 			      ARM2THUMB_GLUE_SECTION_NAME);
9272   BFD_ASSERT (s != NULL);
9273   BFD_ASSERT (s->contents != NULL);
9274   BFD_ASSERT (s->output_section != NULL);
9275 
9276   myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9277 				     sym_sec, val, s, error_message);
9278   if (!myh)
9279     return FALSE;
9280 
9281   my_offset = myh->root.u.def.value;
9282   tmp = bfd_get_32 (input_bfd, hit_data);
9283   tmp = tmp & 0xFF000000;
9284 
9285   /* Somehow these are both 4 too far, so subtract 8.  */
9286   ret_offset = (s->output_offset
9287 		+ my_offset
9288 		+ s->output_section->vma
9289 		- (input_section->output_offset
9290 		   + input_section->output_section->vma
9291 		   + offset + addend)
9292 		- 8);
9293 
9294   tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9295 
9296   bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9297 
9298   return TRUE;
9299 }
9300 
9301 /* Populate Arm stub for an exported Thumb function.  */
9302 
9303 static bfd_boolean
9304 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9305 {
9306   struct bfd_link_info * info = (struct bfd_link_info *) inf;
9307   asection * s;
9308   struct elf_link_hash_entry * myh;
9309   struct elf32_arm_link_hash_entry *eh;
9310   struct elf32_arm_link_hash_table * globals;
9311   asection *sec;
9312   bfd_vma val;
9313   char *error_message;
9314 
9315   eh = elf32_arm_hash_entry (h);
9316   /* Allocate stubs for exported Thumb functions on v4t.  */
9317   if (eh->export_glue == NULL)
9318     return TRUE;
9319 
9320   globals = elf32_arm_hash_table (info);
9321   BFD_ASSERT (globals != NULL);
9322   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9323 
9324   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9325 			      ARM2THUMB_GLUE_SECTION_NAME);
9326   BFD_ASSERT (s != NULL);
9327   BFD_ASSERT (s->contents != NULL);
9328   BFD_ASSERT (s->output_section != NULL);
9329 
9330   sec = eh->export_glue->root.u.def.section;
9331 
9332   BFD_ASSERT (sec->output_section != NULL);
9333 
9334   val = eh->export_glue->root.u.def.value + sec->output_offset
9335 	+ sec->output_section->vma;
9336 
9337   myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9338 				     h->root.u.def.section->owner,
9339 				     globals->obfd, sec, val, s,
9340 				     &error_message);
9341   BFD_ASSERT (myh);
9342   return TRUE;
9343 }
9344 
9345 /* Populate ARMv4 BX veneers.  Returns the absolute adress of the veneer.  */
9346 
9347 static bfd_vma
9348 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9349 {
9350   bfd_byte *p;
9351   bfd_vma glue_addr;
9352   asection *s;
9353   struct elf32_arm_link_hash_table *globals;
9354 
9355   globals = elf32_arm_hash_table (info);
9356   BFD_ASSERT (globals != NULL);
9357   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9358 
9359   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9360 			      ARM_BX_GLUE_SECTION_NAME);
9361   BFD_ASSERT (s != NULL);
9362   BFD_ASSERT (s->contents != NULL);
9363   BFD_ASSERT (s->output_section != NULL);
9364 
9365   BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9366 
9367   glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9368 
9369   if ((globals->bx_glue_offset[reg] & 1) == 0)
9370     {
9371       p = s->contents + glue_addr;
9372       bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9373       bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9374       bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9375       globals->bx_glue_offset[reg] |= 1;
9376     }
9377 
9378   return glue_addr + s->output_section->vma + s->output_offset;
9379 }
9380 
9381 /* Generate Arm stubs for exported Thumb symbols.  */
9382 static void
9383 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9384 				  struct bfd_link_info *link_info)
9385 {
9386   struct elf32_arm_link_hash_table * globals;
9387 
9388   if (link_info == NULL)
9389     /* Ignore this if we are not called by the ELF backend linker.  */
9390     return;
9391 
9392   globals = elf32_arm_hash_table (link_info);
9393   if (globals == NULL)
9394     return;
9395 
9396   /* If blx is available then exported Thumb symbols are OK and there is
9397      nothing to do.  */
9398   if (globals->use_blx)
9399     return;
9400 
9401   elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9402 			  link_info);
9403 }
9404 
9405 /* Reserve space for COUNT dynamic relocations in relocation selection
9406    SRELOC.  */
9407 
9408 static void
9409 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9410 			      bfd_size_type count)
9411 {
9412   struct elf32_arm_link_hash_table *htab;
9413 
9414   htab = elf32_arm_hash_table (info);
9415   BFD_ASSERT (htab->root.dynamic_sections_created);
9416   if (sreloc == NULL)
9417     abort ();
9418   sreloc->size += RELOC_SIZE (htab) * count;
9419 }
9420 
9421 /* Reserve space for COUNT R_ARM_IRELATIVE relocations.  If the link is
9422    dynamic, the relocations should go in SRELOC, otherwise they should
9423    go in the special .rel.iplt section.  */
9424 
9425 static void
9426 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9427 			    bfd_size_type count)
9428 {
9429   struct elf32_arm_link_hash_table *htab;
9430 
9431   htab = elf32_arm_hash_table (info);
9432   if (!htab->root.dynamic_sections_created)
9433     htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9434   else
9435     {
9436       BFD_ASSERT (sreloc != NULL);
9437       sreloc->size += RELOC_SIZE (htab) * count;
9438     }
9439 }
9440 
9441 /* Add relocation REL to the end of relocation section SRELOC.  */
9442 
9443 static void
9444 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9445 			asection *sreloc, Elf_Internal_Rela *rel)
9446 {
9447   bfd_byte *loc;
9448   struct elf32_arm_link_hash_table *htab;
9449 
9450   htab = elf32_arm_hash_table (info);
9451   if (!htab->root.dynamic_sections_created
9452       && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9453     sreloc = htab->root.irelplt;
9454   if (sreloc == NULL)
9455     abort ();
9456   loc = sreloc->contents;
9457   loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9458   if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9459     abort ();
9460   SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9461 }
9462 
9463 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9464    IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9465    to .plt.  */
9466 
9467 static void
9468 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9469 			      bfd_boolean is_iplt_entry,
9470 			      union gotplt_union *root_plt,
9471 			      struct arm_plt_info *arm_plt)
9472 {
9473   struct elf32_arm_link_hash_table *htab;
9474   asection *splt;
9475   asection *sgotplt;
9476 
9477   htab = elf32_arm_hash_table (info);
9478 
9479   if (is_iplt_entry)
9480     {
9481       splt = htab->root.iplt;
9482       sgotplt = htab->root.igotplt;
9483 
9484       /* NaCl uses a special first entry in .iplt too.  */
9485       if (htab->nacl_p && splt->size == 0)
9486 	splt->size += htab->plt_header_size;
9487 
9488       /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt.  */
9489       elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9490     }
9491   else
9492     {
9493       splt = htab->root.splt;
9494       sgotplt = htab->root.sgotplt;
9495 
9496     if (htab->fdpic_p)
9497       {
9498 	/* Allocate room for R_ARM_FUNCDESC_VALUE.  */
9499 	/* For lazy binding, relocations will be put into .rel.plt, in
9500 	   .rel.got otherwise.  */
9501 	/* FIXME: today we don't support lazy binding so put it in .rel.got */
9502 	if (info->flags & DF_BIND_NOW)
9503 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9504 	else
9505 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9506       }
9507     else
9508       {
9509 	/* Allocate room for an R_JUMP_SLOT relocation in .rel.plt.  */
9510 	elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9511       }
9512 
9513       /* If this is the first .plt entry, make room for the special
9514 	 first entry.  */
9515       if (splt->size == 0)
9516 	splt->size += htab->plt_header_size;
9517 
9518       htab->next_tls_desc_index++;
9519     }
9520 
9521   /* Allocate the PLT entry itself, including any leading Thumb stub.  */
9522   if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9523     splt->size += PLT_THUMB_STUB_SIZE;
9524   root_plt->offset = splt->size;
9525   splt->size += htab->plt_entry_size;
9526 
9527   if (!htab->symbian_p)
9528     {
9529       /* We also need to make an entry in the .got.plt section, which
9530 	 will be placed in the .got section by the linker script.  */
9531       if (is_iplt_entry)
9532 	arm_plt->got_offset = sgotplt->size;
9533       else
9534 	arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9535       if (htab->fdpic_p)
9536 	/* Function descriptor takes 64 bits in GOT.  */
9537         sgotplt->size += 8;
9538       else
9539 	sgotplt->size += 4;
9540     }
9541 }
9542 
9543 static bfd_vma
9544 arm_movw_immediate (bfd_vma value)
9545 {
9546   return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9547 }
9548 
9549 static bfd_vma
9550 arm_movt_immediate (bfd_vma value)
9551 {
9552   return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9553 }
9554 
9555 /* Fill in a PLT entry and its associated GOT slot.  If DYNINDX == -1,
9556    the entry lives in .iplt and resolves to (*SYM_VALUE)().
9557    Otherwise, DYNINDX is the index of the symbol in the dynamic
9558    symbol table and SYM_VALUE is undefined.
9559 
9560    ROOT_PLT points to the offset of the PLT entry from the start of its
9561    section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
9562    bookkeeping information.
9563 
9564    Returns FALSE if there was a problem.  */
9565 
9566 static bfd_boolean
9567 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9568 			      union gotplt_union *root_plt,
9569 			      struct arm_plt_info *arm_plt,
9570 			      int dynindx, bfd_vma sym_value)
9571 {
9572   struct elf32_arm_link_hash_table *htab;
9573   asection *sgot;
9574   asection *splt;
9575   asection *srel;
9576   bfd_byte *loc;
9577   bfd_vma plt_index;
9578   Elf_Internal_Rela rel;
9579   bfd_vma plt_header_size;
9580   bfd_vma got_header_size;
9581 
9582   htab = elf32_arm_hash_table (info);
9583 
9584   /* Pick the appropriate sections and sizes.  */
9585   if (dynindx == -1)
9586     {
9587       splt = htab->root.iplt;
9588       sgot = htab->root.igotplt;
9589       srel = htab->root.irelplt;
9590 
9591       /* There are no reserved entries in .igot.plt, and no special
9592 	 first entry in .iplt.  */
9593       got_header_size = 0;
9594       plt_header_size = 0;
9595     }
9596   else
9597     {
9598       splt = htab->root.splt;
9599       sgot = htab->root.sgotplt;
9600       srel = htab->root.srelplt;
9601 
9602       got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9603       plt_header_size = htab->plt_header_size;
9604     }
9605   BFD_ASSERT (splt != NULL && srel != NULL);
9606 
9607   /* Fill in the entry in the procedure linkage table.  */
9608   if (htab->symbian_p)
9609     {
9610       BFD_ASSERT (dynindx >= 0);
9611       put_arm_insn (htab, output_bfd,
9612 		    elf32_arm_symbian_plt_entry[0],
9613 		    splt->contents + root_plt->offset);
9614       bfd_put_32 (output_bfd,
9615 		  elf32_arm_symbian_plt_entry[1],
9616 		  splt->contents + root_plt->offset + 4);
9617 
9618       /* Fill in the entry in the .rel.plt section.  */
9619       rel.r_offset = (splt->output_section->vma
9620 		      + splt->output_offset
9621 		      + root_plt->offset + 4);
9622       rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9623 
9624       /* Get the index in the procedure linkage table which
9625 	 corresponds to this symbol.  This is the index of this symbol
9626 	 in all the symbols for which we are making plt entries.  The
9627 	 first entry in the procedure linkage table is reserved.  */
9628       plt_index = ((root_plt->offset - plt_header_size)
9629 		   / htab->plt_entry_size);
9630     }
9631   else
9632     {
9633       bfd_vma got_offset, got_address, plt_address;
9634       bfd_vma got_displacement, initial_got_entry;
9635       bfd_byte * ptr;
9636 
9637       BFD_ASSERT (sgot != NULL);
9638 
9639       /* Get the offset into the .(i)got.plt table of the entry that
9640 	 corresponds to this function.  */
9641       got_offset = (arm_plt->got_offset & -2);
9642 
9643       /* Get the index in the procedure linkage table which
9644 	 corresponds to this symbol.  This is the index of this symbol
9645 	 in all the symbols for which we are making plt entries.
9646 	 After the reserved .got.plt entries, all symbols appear in
9647 	 the same order as in .plt.  */
9648       if (htab->fdpic_p)
9649         /* Function descriptor takes 8 bytes.  */
9650         plt_index = (got_offset - got_header_size) / 8;
9651       else
9652         plt_index = (got_offset - got_header_size) / 4;
9653 
9654       /* Calculate the address of the GOT entry.  */
9655       got_address = (sgot->output_section->vma
9656 		     + sgot->output_offset
9657 		     + got_offset);
9658 
9659       /* ...and the address of the PLT entry.  */
9660       plt_address = (splt->output_section->vma
9661 		     + splt->output_offset
9662 		     + root_plt->offset);
9663 
9664       ptr = splt->contents + root_plt->offset;
9665       if (htab->vxworks_p && bfd_link_pic (info))
9666 	{
9667 	  unsigned int i;
9668 	  bfd_vma val;
9669 
9670 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9671 	    {
9672 	      val = elf32_arm_vxworks_shared_plt_entry[i];
9673 	      if (i == 2)
9674 		val |= got_address - sgot->output_section->vma;
9675 	      if (i == 5)
9676 		val |= plt_index * RELOC_SIZE (htab);
9677 	      if (i == 2 || i == 5)
9678 		bfd_put_32 (output_bfd, val, ptr);
9679 	      else
9680 		put_arm_insn (htab, output_bfd, val, ptr);
9681 	    }
9682 	}
9683       else if (htab->vxworks_p)
9684 	{
9685 	  unsigned int i;
9686 	  bfd_vma val;
9687 
9688 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9689 	    {
9690 	      val = elf32_arm_vxworks_exec_plt_entry[i];
9691 	      if (i == 2)
9692 		val |= got_address;
9693 	      if (i == 4)
9694 		val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9695 	      if (i == 5)
9696 		val |= plt_index * RELOC_SIZE (htab);
9697 	      if (i == 2 || i == 5)
9698 		bfd_put_32 (output_bfd, val, ptr);
9699 	      else
9700 		put_arm_insn (htab, output_bfd, val, ptr);
9701 	    }
9702 
9703 	  loc = (htab->srelplt2->contents
9704 		 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9705 
9706 	  /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9707 	     referencing the GOT for this PLT entry.  */
9708 	  rel.r_offset = plt_address + 8;
9709 	  rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9710 	  rel.r_addend = got_offset;
9711 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9712 	  loc += RELOC_SIZE (htab);
9713 
9714 	  /* Create the R_ARM_ABS32 relocation referencing the
9715 	     beginning of the PLT for this GOT entry.  */
9716 	  rel.r_offset = got_address;
9717 	  rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9718 	  rel.r_addend = 0;
9719 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9720 	}
9721       else if (htab->nacl_p)
9722 	{
9723 	  /* Calculate the displacement between the PLT slot and the
9724 	     common tail that's part of the special initial PLT slot.  */
9725 	  int32_t tail_displacement
9726 	    = ((splt->output_section->vma + splt->output_offset
9727 		+ ARM_NACL_PLT_TAIL_OFFSET)
9728 	       - (plt_address + htab->plt_entry_size + 4));
9729 	  BFD_ASSERT ((tail_displacement & 3) == 0);
9730 	  tail_displacement >>= 2;
9731 
9732 	  BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9733 		      || (-tail_displacement & 0xff000000) == 0);
9734 
9735 	  /* Calculate the displacement between the PLT slot and the entry
9736 	     in the GOT.  The offset accounts for the value produced by
9737 	     adding to pc in the penultimate instruction of the PLT stub.  */
9738 	  got_displacement = (got_address
9739 			      - (plt_address + htab->plt_entry_size));
9740 
9741 	  /* NaCl does not support interworking at all.  */
9742 	  BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9743 
9744 	  put_arm_insn (htab, output_bfd,
9745 			elf32_arm_nacl_plt_entry[0]
9746 			| arm_movw_immediate (got_displacement),
9747 			ptr + 0);
9748 	  put_arm_insn (htab, output_bfd,
9749 			elf32_arm_nacl_plt_entry[1]
9750 			| arm_movt_immediate (got_displacement),
9751 			ptr + 4);
9752 	  put_arm_insn (htab, output_bfd,
9753 			elf32_arm_nacl_plt_entry[2],
9754 			ptr + 8);
9755 	  put_arm_insn (htab, output_bfd,
9756 			elf32_arm_nacl_plt_entry[3]
9757 			| (tail_displacement & 0x00ffffff),
9758 			ptr + 12);
9759 	}
9760       else if (htab->fdpic_p)
9761 	{
9762 	  const bfd_vma *plt_entry = using_thumb_only(htab)
9763 	    ? elf32_arm_fdpic_thumb_plt_entry
9764 	    : elf32_arm_fdpic_plt_entry;
9765 
9766 	  /* Fill-up Thumb stub if needed.  */
9767 	  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9768 	    {
9769 	      put_thumb_insn (htab, output_bfd,
9770 			      elf32_arm_plt_thumb_stub[0], ptr - 4);
9771 	      put_thumb_insn (htab, output_bfd,
9772 			      elf32_arm_plt_thumb_stub[1], ptr - 2);
9773 	    }
9774 	  /* As we are using 32 bit instructions even for the Thumb
9775 	     version, we have to use 'put_arm_insn' instead of
9776 	     'put_thumb_insn'.  */
9777 	  put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9778 	  put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9779 	  put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9780 	  put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9781 	  bfd_put_32 (output_bfd, got_offset, ptr + 16);
9782 
9783 	  if (!(info->flags & DF_BIND_NOW))
9784 	    {
9785 	      /* funcdesc_value_reloc_offset.  */
9786 	      bfd_put_32 (output_bfd,
9787 			  htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9788 			  ptr + 20);
9789 	      put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9790 	      put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9791 	      put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9792 	      put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9793 	    }
9794 	}
9795       else if (using_thumb_only (htab))
9796 	{
9797 	  /* PR ld/16017: Generate thumb only PLT entries.  */
9798 	  if (!using_thumb2 (htab))
9799 	    {
9800 	      /* FIXME: We ought to be able to generate thumb-1 PLT
9801 		 instructions...  */
9802 	      _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9803 				  output_bfd);
9804 	      return FALSE;
9805 	    }
9806 
9807 	  /* Calculate the displacement between the PLT slot and the entry in
9808 	     the GOT.  The 12-byte offset accounts for the value produced by
9809 	     adding to pc in the 3rd instruction of the PLT stub.  */
9810 	  got_displacement = got_address - (plt_address + 12);
9811 
9812 	  /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9813 	     instead of 'put_thumb_insn'.  */
9814 	  put_arm_insn (htab, output_bfd,
9815 			elf32_thumb2_plt_entry[0]
9816 			| ((got_displacement & 0x000000ff) << 16)
9817 			| ((got_displacement & 0x00000700) << 20)
9818 			| ((got_displacement & 0x00000800) >>  1)
9819 			| ((got_displacement & 0x0000f000) >> 12),
9820 			ptr + 0);
9821 	  put_arm_insn (htab, output_bfd,
9822 			elf32_thumb2_plt_entry[1]
9823 			| ((got_displacement & 0x00ff0000)      )
9824 			| ((got_displacement & 0x07000000) <<  4)
9825 			| ((got_displacement & 0x08000000) >> 17)
9826 			| ((got_displacement & 0xf0000000) >> 28),
9827 			ptr + 4);
9828 	  put_arm_insn (htab, output_bfd,
9829 			elf32_thumb2_plt_entry[2],
9830 			ptr + 8);
9831 	  put_arm_insn (htab, output_bfd,
9832 			elf32_thumb2_plt_entry[3],
9833 			ptr + 12);
9834 	}
9835       else
9836 	{
9837 	  /* Calculate the displacement between the PLT slot and the
9838 	     entry in the GOT.  The eight-byte offset accounts for the
9839 	     value produced by adding to pc in the first instruction
9840 	     of the PLT stub.  */
9841 	  got_displacement = got_address - (plt_address + 8);
9842 
9843 	  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9844 	    {
9845 	      put_thumb_insn (htab, output_bfd,
9846 			      elf32_arm_plt_thumb_stub[0], ptr - 4);
9847 	      put_thumb_insn (htab, output_bfd,
9848 			      elf32_arm_plt_thumb_stub[1], ptr - 2);
9849 	    }
9850 
9851 	  if (!elf32_arm_use_long_plt_entry)
9852 	    {
9853 	      BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9854 
9855 	      put_arm_insn (htab, output_bfd,
9856 			    elf32_arm_plt_entry_short[0]
9857 			    | ((got_displacement & 0x0ff00000) >> 20),
9858 			    ptr + 0);
9859 	      put_arm_insn (htab, output_bfd,
9860 			    elf32_arm_plt_entry_short[1]
9861 			    | ((got_displacement & 0x000ff000) >> 12),
9862 			    ptr+ 4);
9863 	      put_arm_insn (htab, output_bfd,
9864 			    elf32_arm_plt_entry_short[2]
9865 			    | (got_displacement & 0x00000fff),
9866 			    ptr + 8);
9867 #ifdef FOUR_WORD_PLT
9868 	      bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9869 #endif
9870 	    }
9871 	  else
9872 	    {
9873 	      put_arm_insn (htab, output_bfd,
9874 			    elf32_arm_plt_entry_long[0]
9875 			    | ((got_displacement & 0xf0000000) >> 28),
9876 			    ptr + 0);
9877 	      put_arm_insn (htab, output_bfd,
9878 			    elf32_arm_plt_entry_long[1]
9879 			    | ((got_displacement & 0x0ff00000) >> 20),
9880 			    ptr + 4);
9881 	      put_arm_insn (htab, output_bfd,
9882 			    elf32_arm_plt_entry_long[2]
9883 			    | ((got_displacement & 0x000ff000) >> 12),
9884 			    ptr+ 8);
9885 	      put_arm_insn (htab, output_bfd,
9886 			    elf32_arm_plt_entry_long[3]
9887 			    | (got_displacement & 0x00000fff),
9888 			    ptr + 12);
9889 	    }
9890 	}
9891 
9892       /* Fill in the entry in the .rel(a).(i)plt section.  */
9893       rel.r_offset = got_address;
9894       rel.r_addend = 0;
9895       if (dynindx == -1)
9896 	{
9897 	  /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9898 	     The dynamic linker or static executable then calls SYM_VALUE
9899 	     to determine the correct run-time value of the .igot.plt entry.  */
9900 	  rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9901 	  initial_got_entry = sym_value;
9902 	}
9903       else
9904 	{
9905 	  /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9906 	     used by PLT entry.  */
9907 	  if (htab->fdpic_p)
9908 	    {
9909 	      rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9910 	      initial_got_entry = 0;
9911 	    }
9912 	  else
9913 	    {
9914 	      rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9915 	      initial_got_entry = (splt->output_section->vma
9916 				   + splt->output_offset);
9917 	    }
9918 	}
9919 
9920       /* Fill in the entry in the global offset table.  */
9921       bfd_put_32 (output_bfd, initial_got_entry,
9922 		  sgot->contents + got_offset);
9923 
9924       if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
9925 	{
9926 	  /* Setup initial funcdesc value.  */
9927 	  /* FIXME: we don't support lazy binding because there is a
9928 	     race condition between both words getting written and
9929 	     some other thread attempting to read them. The ARM
9930 	     architecture does not have an atomic 64 bit load/store
9931 	     instruction that could be used to prevent it; it is
9932 	     recommended that threaded FDPIC applications run with the
9933 	     LD_BIND_NOW environment variable set.  */
9934 	  bfd_put_32(output_bfd, plt_address + 0x18,
9935 		     sgot->contents + got_offset);
9936 	  bfd_put_32(output_bfd, -1 /*TODO*/,
9937 		     sgot->contents + got_offset + 4);
9938 	}
9939     }
9940 
9941   if (dynindx == -1)
9942     elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
9943   else
9944     {
9945       if (htab->fdpic_p)
9946 	{
9947 	  /* For FDPIC we put PLT relocationss into .rel.got when not
9948 	     lazy binding otherwise we put them in .rel.plt.  For now,
9949 	     we don't support lazy binding so put it in .rel.got.  */
9950 	  if (info->flags & DF_BIND_NOW)
9951 	    elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
9952 	  else
9953 	    elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
9954 	}
9955       else
9956 	{
9957 	  loc = srel->contents + plt_index * RELOC_SIZE (htab);
9958 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9959 	}
9960     }
9961 
9962   return TRUE;
9963 }
9964 
9965 /* Some relocations map to different relocations depending on the
9966    target.  Return the real relocation.  */
9967 
9968 static int
9969 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
9970 		     int r_type)
9971 {
9972   switch (r_type)
9973     {
9974     case R_ARM_TARGET1:
9975       if (globals->target1_is_rel)
9976 	return R_ARM_REL32;
9977       else
9978 	return R_ARM_ABS32;
9979 
9980     case R_ARM_TARGET2:
9981       return globals->target2_reloc;
9982 
9983     default:
9984       return r_type;
9985     }
9986 }
9987 
9988 /* Return the base VMA address which should be subtracted from real addresses
9989    when resolving @dtpoff relocation.
9990    This is PT_TLS segment p_vaddr.  */
9991 
9992 static bfd_vma
9993 dtpoff_base (struct bfd_link_info *info)
9994 {
9995   /* If tls_sec is NULL, we should have signalled an error already.  */
9996   if (elf_hash_table (info)->tls_sec == NULL)
9997     return 0;
9998   return elf_hash_table (info)->tls_sec->vma;
9999 }
10000 
10001 /* Return the relocation value for @tpoff relocation
10002    if STT_TLS virtual address is ADDRESS.  */
10003 
10004 static bfd_vma
10005 tpoff (struct bfd_link_info *info, bfd_vma address)
10006 {
10007   struct elf_link_hash_table *htab = elf_hash_table (info);
10008   bfd_vma base;
10009 
10010   /* If tls_sec is NULL, we should have signalled an error already.  */
10011   if (htab->tls_sec == NULL)
10012     return 0;
10013   base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10014   return address - htab->tls_sec->vma + base;
10015 }
10016 
10017 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10018    VALUE is the relocation value.  */
10019 
10020 static bfd_reloc_status_type
10021 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10022 {
10023   if (value > 0xfff)
10024     return bfd_reloc_overflow;
10025 
10026   value |= bfd_get_32 (abfd, data) & 0xfffff000;
10027   bfd_put_32 (abfd, value, data);
10028   return bfd_reloc_ok;
10029 }
10030 
10031 /* Handle TLS relaxations.  Relaxing is possible for symbols that use
10032    R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10033    R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10034 
10035    Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10036    is to then call final_link_relocate.  Return other values in the
10037    case of error.
10038 
10039    FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10040    the pre-relaxed code.  It would be nice if the relocs were updated
10041    to match the optimization.   */
10042 
10043 static bfd_reloc_status_type
10044 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10045 		     bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10046 		     Elf_Internal_Rela *rel, unsigned long is_local)
10047 {
10048   unsigned long insn;
10049 
10050   switch (ELF32_R_TYPE (rel->r_info))
10051     {
10052     default:
10053       return bfd_reloc_notsupported;
10054 
10055     case R_ARM_TLS_GOTDESC:
10056       if (is_local)
10057 	insn = 0;
10058       else
10059 	{
10060 	  insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10061 	  if (insn & 1)
10062 	    insn -= 5; /* THUMB */
10063 	  else
10064 	    insn -= 8; /* ARM */
10065 	}
10066       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10067       return bfd_reloc_continue;
10068 
10069     case R_ARM_THM_TLS_DESCSEQ:
10070       /* Thumb insn.  */
10071       insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10072       if ((insn & 0xff78) == 0x4478)	  /* add rx, pc */
10073 	{
10074 	  if (is_local)
10075 	    /* nop */
10076 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10077 	}
10078       else if ((insn & 0xffc0) == 0x6840)  /* ldr rx,[ry,#4] */
10079 	{
10080 	  if (is_local)
10081 	    /* nop */
10082 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10083 	  else
10084 	    /* ldr rx,[ry] */
10085 	    bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10086 	}
10087       else if ((insn & 0xff87) == 0x4780)  /* blx rx */
10088 	{
10089 	  if (is_local)
10090 	    /* nop */
10091 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10092 	  else
10093 	    /* mov r0, rx */
10094 	    bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10095 			contents + rel->r_offset);
10096 	}
10097       else
10098 	{
10099 	  if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10100 	    /* It's a 32 bit instruction, fetch the rest of it for
10101 	       error generation.  */
10102 	    insn = (insn << 16)
10103 	      | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10104 	  _bfd_error_handler
10105 	    /* xgettext:c-format */
10106 	    (_("%pB(%pA+%#" PRIx64 "): "
10107 	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10108 	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10109 	     "Thumb", insn);
10110 	  return bfd_reloc_notsupported;
10111 	}
10112       break;
10113 
10114     case R_ARM_TLS_DESCSEQ:
10115       /* arm insn.  */
10116       insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10117       if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10118 	{
10119 	  if (is_local)
10120 	    /* mov rx, ry */
10121 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10122 			contents + rel->r_offset);
10123 	}
10124       else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10125 	{
10126 	  if (is_local)
10127 	    /* nop */
10128 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10129 	  else
10130 	    /* ldr rx,[ry] */
10131 	    bfd_put_32 (input_bfd, insn & 0xfffff000,
10132 			contents + rel->r_offset);
10133 	}
10134       else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10135 	{
10136 	  if (is_local)
10137 	    /* nop */
10138 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10139 	  else
10140 	    /* mov r0, rx */
10141 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10142 			contents + rel->r_offset);
10143 	}
10144       else
10145 	{
10146 	  _bfd_error_handler
10147 	    /* xgettext:c-format */
10148 	    (_("%pB(%pA+%#" PRIx64 "): "
10149 	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10150 	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10151 	     "ARM", insn);
10152 	  return bfd_reloc_notsupported;
10153 	}
10154       break;
10155 
10156     case R_ARM_TLS_CALL:
10157       /* GD->IE relaxation, turn the instruction into 'nop' or
10158 	 'ldr r0, [pc,r0]'  */
10159       insn = is_local ? 0xe1a00000 : 0xe79f0000;
10160       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10161       break;
10162 
10163     case R_ARM_THM_TLS_CALL:
10164       /* GD->IE relaxation.  */
10165       if (!is_local)
10166 	/* add r0,pc; ldr r0, [r0]  */
10167 	insn = 0x44786800;
10168       else if (using_thumb2 (globals))
10169 	/* nop.w */
10170 	insn = 0xf3af8000;
10171       else
10172 	/* nop; nop */
10173 	insn = 0xbf00bf00;
10174 
10175       bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10176       bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10177       break;
10178     }
10179   return bfd_reloc_ok;
10180 }
10181 
10182 /* For a given value of n, calculate the value of G_n as required to
10183    deal with group relocations.  We return it in the form of an
10184    encoded constant-and-rotation, together with the final residual.  If n is
10185    specified as less than zero, then final_residual is filled with the
10186    input value and no further action is performed.  */
10187 
10188 static bfd_vma
10189 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10190 {
10191   int current_n;
10192   bfd_vma g_n;
10193   bfd_vma encoded_g_n = 0;
10194   bfd_vma residual = value; /* Also known as Y_n.  */
10195 
10196   for (current_n = 0; current_n <= n; current_n++)
10197     {
10198       int shift;
10199 
10200       /* Calculate which part of the value to mask.  */
10201       if (residual == 0)
10202 	shift = 0;
10203       else
10204 	{
10205 	  int msb;
10206 
10207 	  /* Determine the most significant bit in the residual and
10208 	     align the resulting value to a 2-bit boundary.  */
10209 	  for (msb = 30; msb >= 0; msb -= 2)
10210 	    if (residual & (3 << msb))
10211 	      break;
10212 
10213 	  /* The desired shift is now (msb - 6), or zero, whichever
10214 	     is the greater.  */
10215 	  shift = msb - 6;
10216 	  if (shift < 0)
10217 	    shift = 0;
10218 	}
10219 
10220       /* Calculate g_n in 32-bit as well as encoded constant+rotation form.  */
10221       g_n = residual & (0xff << shift);
10222       encoded_g_n = (g_n >> shift)
10223 		    | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10224 
10225       /* Calculate the residual for the next time around.  */
10226       residual &= ~g_n;
10227     }
10228 
10229   *final_residual = residual;
10230 
10231   return encoded_g_n;
10232 }
10233 
10234 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10235    Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise.  */
10236 
10237 static int
10238 identify_add_or_sub (bfd_vma insn)
10239 {
10240   int opcode = insn & 0x1e00000;
10241 
10242   if (opcode == 1 << 23) /* ADD */
10243     return 1;
10244 
10245   if (opcode == 1 << 22) /* SUB */
10246     return -1;
10247 
10248   return 0;
10249 }
10250 
10251 /* Perform a relocation as part of a final link.  */
10252 
10253 static bfd_reloc_status_type
10254 elf32_arm_final_link_relocate (reloc_howto_type *	    howto,
10255 			       bfd *			    input_bfd,
10256 			       bfd *			    output_bfd,
10257 			       asection *		    input_section,
10258 			       bfd_byte *		    contents,
10259 			       Elf_Internal_Rela *	    rel,
10260 			       bfd_vma			    value,
10261 			       struct bfd_link_info *	    info,
10262 			       asection *		    sym_sec,
10263 			       const char *		    sym_name,
10264 			       unsigned char		    st_type,
10265 			       enum arm_st_branch_type	    branch_type,
10266 			       struct elf_link_hash_entry * h,
10267 			       bfd_boolean *		    unresolved_reloc_p,
10268 			       char **			    error_message)
10269 {
10270   unsigned long			r_type = howto->type;
10271   unsigned long			r_symndx;
10272   bfd_byte *			hit_data = contents + rel->r_offset;
10273   bfd_vma *			local_got_offsets;
10274   bfd_vma *			local_tlsdesc_gotents;
10275   asection *			sgot;
10276   asection *			splt;
10277   asection *			sreloc = NULL;
10278   asection *			srelgot;
10279   bfd_vma			addend;
10280   bfd_signed_vma		signed_addend;
10281   unsigned char			dynreloc_st_type;
10282   bfd_vma			dynreloc_value;
10283   struct elf32_arm_link_hash_table * globals;
10284   struct elf32_arm_link_hash_entry *eh;
10285   union gotplt_union	       *root_plt;
10286   struct arm_plt_info	       *arm_plt;
10287   bfd_vma			plt_offset;
10288   bfd_vma			gotplt_offset;
10289   bfd_boolean			has_iplt_entry;
10290   bfd_boolean			resolved_to_zero;
10291 
10292   globals = elf32_arm_hash_table (info);
10293   if (globals == NULL)
10294     return bfd_reloc_notsupported;
10295 
10296   BFD_ASSERT (is_arm_elf (input_bfd));
10297   BFD_ASSERT (howto != NULL);
10298 
10299   /* Some relocation types map to different relocations depending on the
10300      target.  We pick the right one here.  */
10301   r_type = arm_real_reloc_type (globals, r_type);
10302 
10303   /* It is possible to have linker relaxations on some TLS access
10304      models.  Update our information here.  */
10305   r_type = elf32_arm_tls_transition (info, r_type, h);
10306 
10307   if (r_type != howto->type)
10308     howto = elf32_arm_howto_from_type (r_type);
10309 
10310   eh = (struct elf32_arm_link_hash_entry *) h;
10311   sgot = globals->root.sgot;
10312   local_got_offsets = elf_local_got_offsets (input_bfd);
10313   local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10314 
10315   if (globals->root.dynamic_sections_created)
10316     srelgot = globals->root.srelgot;
10317   else
10318     srelgot = NULL;
10319 
10320   r_symndx = ELF32_R_SYM (rel->r_info);
10321 
10322   if (globals->use_rel)
10323     {
10324       addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10325 
10326       if (addend & ((howto->src_mask + 1) >> 1))
10327 	{
10328 	  signed_addend = -1;
10329 	  signed_addend &= ~ howto->src_mask;
10330 	  signed_addend |= addend;
10331 	}
10332       else
10333 	signed_addend = addend;
10334     }
10335   else
10336     addend = signed_addend = rel->r_addend;
10337 
10338   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10339      are resolving a function call relocation.  */
10340   if (using_thumb_only (globals)
10341       && (r_type == R_ARM_THM_CALL
10342 	  || r_type == R_ARM_THM_JUMP24)
10343       && branch_type == ST_BRANCH_TO_ARM)
10344     branch_type = ST_BRANCH_TO_THUMB;
10345 
10346   /* Record the symbol information that should be used in dynamic
10347      relocations.  */
10348   dynreloc_st_type = st_type;
10349   dynreloc_value = value;
10350   if (branch_type == ST_BRANCH_TO_THUMB)
10351     dynreloc_value |= 1;
10352 
10353   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
10354      VALUE appropriately for relocations that we resolve at link time.  */
10355   has_iplt_entry = FALSE;
10356   if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10357 			      &arm_plt)
10358       && root_plt->offset != (bfd_vma) -1)
10359     {
10360       plt_offset = root_plt->offset;
10361       gotplt_offset = arm_plt->got_offset;
10362 
10363       if (h == NULL || eh->is_iplt)
10364 	{
10365 	  has_iplt_entry = TRUE;
10366 	  splt = globals->root.iplt;
10367 
10368 	  /* Populate .iplt entries here, because not all of them will
10369 	     be seen by finish_dynamic_symbol.  The lower bit is set if
10370 	     we have already populated the entry.  */
10371 	  if (plt_offset & 1)
10372 	    plt_offset--;
10373 	  else
10374 	    {
10375 	      if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10376 						-1, dynreloc_value))
10377 		root_plt->offset |= 1;
10378 	      else
10379 		return bfd_reloc_notsupported;
10380 	    }
10381 
10382 	  /* Static relocations always resolve to the .iplt entry.  */
10383 	  st_type = STT_FUNC;
10384 	  value = (splt->output_section->vma
10385 		   + splt->output_offset
10386 		   + plt_offset);
10387 	  branch_type = ST_BRANCH_TO_ARM;
10388 
10389 	  /* If there are non-call relocations that resolve to the .iplt
10390 	     entry, then all dynamic ones must too.  */
10391 	  if (arm_plt->noncall_refcount != 0)
10392 	    {
10393 	      dynreloc_st_type = st_type;
10394 	      dynreloc_value = value;
10395 	    }
10396 	}
10397       else
10398 	/* We populate the .plt entry in finish_dynamic_symbol.  */
10399 	splt = globals->root.splt;
10400     }
10401   else
10402     {
10403       splt = NULL;
10404       plt_offset = (bfd_vma) -1;
10405       gotplt_offset = (bfd_vma) -1;
10406     }
10407 
10408   resolved_to_zero = (h != NULL
10409 		      && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10410 
10411   switch (r_type)
10412     {
10413     case R_ARM_NONE:
10414       /* We don't need to find a value for this symbol.  It's just a
10415 	 marker.  */
10416       *unresolved_reloc_p = FALSE;
10417       return bfd_reloc_ok;
10418 
10419     case R_ARM_ABS12:
10420       if (!globals->vxworks_p)
10421 	return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10422       /* Fall through.  */
10423 
10424     case R_ARM_PC24:
10425     case R_ARM_ABS32:
10426     case R_ARM_ABS32_NOI:
10427     case R_ARM_REL32:
10428     case R_ARM_REL32_NOI:
10429     case R_ARM_CALL:
10430     case R_ARM_JUMP24:
10431     case R_ARM_XPC25:
10432     case R_ARM_PREL31:
10433     case R_ARM_PLT32:
10434       /* Handle relocations which should use the PLT entry.  ABS32/REL32
10435 	 will use the symbol's value, which may point to a PLT entry, but we
10436 	 don't need to handle that here.  If we created a PLT entry, all
10437 	 branches in this object should go to it, except if the PLT is too
10438 	 far away, in which case a long branch stub should be inserted.  */
10439       if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10440 	   && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10441 	   && r_type != R_ARM_CALL
10442 	   && r_type != R_ARM_JUMP24
10443 	   && r_type != R_ARM_PLT32)
10444 	  && plt_offset != (bfd_vma) -1)
10445 	{
10446 	  /* If we've created a .plt section, and assigned a PLT entry
10447 	     to this function, it must either be a STT_GNU_IFUNC reference
10448 	     or not be known to bind locally.  In other cases, we should
10449 	     have cleared the PLT entry by now.  */
10450 	  BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10451 
10452 	  value = (splt->output_section->vma
10453 		   + splt->output_offset
10454 		   + plt_offset);
10455 	  *unresolved_reloc_p = FALSE;
10456 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10457 					   contents, rel->r_offset, value,
10458 					   rel->r_addend);
10459 	}
10460 
10461       /* When generating a shared object or relocatable executable, these
10462 	 relocations are copied into the output file to be resolved at
10463 	 run time.  */
10464       if ((bfd_link_pic (info)
10465 	   || globals->root.is_relocatable_executable
10466 	   || globals->fdpic_p)
10467 	  && (input_section->flags & SEC_ALLOC)
10468 	  && !(globals->vxworks_p
10469 	       && strcmp (input_section->output_section->name,
10470 			  ".tls_vars") == 0)
10471 	  && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10472 	      || !SYMBOL_CALLS_LOCAL (info, h))
10473 	  && !(input_bfd == globals->stub_bfd
10474 	       && strstr (input_section->name, STUB_SUFFIX))
10475 	  && (h == NULL
10476 	      || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10477 		  && !resolved_to_zero)
10478 	      || h->root.type != bfd_link_hash_undefweak)
10479 	  && r_type != R_ARM_PC24
10480 	  && r_type != R_ARM_CALL
10481 	  && r_type != R_ARM_JUMP24
10482 	  && r_type != R_ARM_PREL31
10483 	  && r_type != R_ARM_PLT32)
10484 	{
10485 	  Elf_Internal_Rela outrel;
10486 	  bfd_boolean skip, relocate;
10487 	  int isrofixup = 0;
10488 
10489 	  if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10490 	      && !h->def_regular)
10491 	    {
10492 	      char *v = _("shared object");
10493 
10494 	      if (bfd_link_executable (info))
10495 		v = _("PIE executable");
10496 
10497 	      _bfd_error_handler
10498 		(_("%pB: relocation %s against external or undefined symbol `%s'"
10499 		   " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10500 		 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10501 	      return bfd_reloc_notsupported;
10502 	    }
10503 
10504 	  *unresolved_reloc_p = FALSE;
10505 
10506 	  if (sreloc == NULL && globals->root.dynamic_sections_created)
10507 	    {
10508 	      sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10509 							   ! globals->use_rel);
10510 
10511 	      if (sreloc == NULL)
10512 		return bfd_reloc_notsupported;
10513 	    }
10514 
10515 	  skip = FALSE;
10516 	  relocate = FALSE;
10517 
10518 	  outrel.r_addend = addend;
10519 	  outrel.r_offset =
10520 	    _bfd_elf_section_offset (output_bfd, info, input_section,
10521 				     rel->r_offset);
10522 	  if (outrel.r_offset == (bfd_vma) -1)
10523 	    skip = TRUE;
10524 	  else if (outrel.r_offset == (bfd_vma) -2)
10525 	    skip = TRUE, relocate = TRUE;
10526 	  outrel.r_offset += (input_section->output_section->vma
10527 			      + input_section->output_offset);
10528 
10529 	  if (skip)
10530 	    memset (&outrel, 0, sizeof outrel);
10531 	  else if (h != NULL
10532 		   && h->dynindx != -1
10533 		   && (!bfd_link_pic (info)
10534 		       || !(bfd_link_pie (info)
10535 			    || SYMBOLIC_BIND (info, h))
10536 		       || !h->def_regular))
10537 	    outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10538 	  else
10539 	    {
10540 	      int symbol;
10541 
10542 	      /* This symbol is local, or marked to become local.  */
10543 	      BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10544 			  || (globals->fdpic_p && !bfd_link_pic(info)));
10545 	      if (globals->symbian_p)
10546 		{
10547 		  asection *osec;
10548 
10549 		  /* On Symbian OS, the data segment and text segement
10550 		     can be relocated independently.  Therefore, we
10551 		     must indicate the segment to which this
10552 		     relocation is relative.  The BPABI allows us to
10553 		     use any symbol in the right segment; we just use
10554 		     the section symbol as it is convenient.  (We
10555 		     cannot use the symbol given by "h" directly as it
10556 		     will not appear in the dynamic symbol table.)
10557 
10558 		     Note that the dynamic linker ignores the section
10559 		     symbol value, so we don't subtract osec->vma
10560 		     from the emitted reloc addend.  */
10561 		  if (sym_sec)
10562 		    osec = sym_sec->output_section;
10563 		  else
10564 		    osec = input_section->output_section;
10565 		  symbol = elf_section_data (osec)->dynindx;
10566 		  if (symbol == 0)
10567 		    {
10568 		      struct elf_link_hash_table *htab = elf_hash_table (info);
10569 
10570 		      if ((osec->flags & SEC_READONLY) == 0
10571 			  && htab->data_index_section != NULL)
10572 			osec = htab->data_index_section;
10573 		      else
10574 			osec = htab->text_index_section;
10575 		      symbol = elf_section_data (osec)->dynindx;
10576 		    }
10577 		  BFD_ASSERT (symbol != 0);
10578 		}
10579 	      else
10580 		/* On SVR4-ish systems, the dynamic loader cannot
10581 		   relocate the text and data segments independently,
10582 		   so the symbol does not matter.  */
10583 		symbol = 0;
10584 	      if (dynreloc_st_type == STT_GNU_IFUNC)
10585 		/* We have an STT_GNU_IFUNC symbol that doesn't resolve
10586 		   to the .iplt entry.  Instead, every non-call reference
10587 		   must use an R_ARM_IRELATIVE relocation to obtain the
10588 		   correct run-time address.  */
10589 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10590 	      else if (globals->fdpic_p && !bfd_link_pic(info))
10591 		isrofixup = 1;
10592 	      else
10593 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10594 	      if (globals->use_rel)
10595 		relocate = TRUE;
10596 	      else
10597 		outrel.r_addend += dynreloc_value;
10598 	    }
10599 
10600 	  if (isrofixup)
10601 	    arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10602 	  else
10603 	    elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10604 
10605 	  /* If this reloc is against an external symbol, we do not want to
10606 	     fiddle with the addend.  Otherwise, we need to include the symbol
10607 	     value so that it becomes an addend for the dynamic reloc.  */
10608 	  if (! relocate)
10609 	    return bfd_reloc_ok;
10610 
10611 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10612 					   contents, rel->r_offset,
10613 					   dynreloc_value, (bfd_vma) 0);
10614 	}
10615       else switch (r_type)
10616 	{
10617 	case R_ARM_ABS12:
10618 	  return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10619 
10620 	case R_ARM_XPC25:	  /* Arm BLX instruction.  */
10621 	case R_ARM_CALL:
10622 	case R_ARM_JUMP24:
10623 	case R_ARM_PC24:	  /* Arm B/BL instruction.  */
10624 	case R_ARM_PLT32:
10625 	  {
10626 	  struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10627 
10628 	  if (r_type == R_ARM_XPC25)
10629 	    {
10630 	      /* Check for Arm calling Arm function.  */
10631 	      /* FIXME: Should we translate the instruction into a BL
10632 		 instruction instead ?  */
10633 	      if (branch_type != ST_BRANCH_TO_THUMB)
10634 		_bfd_error_handler
10635 		  (_("\%pB: warning: %s BLX instruction targets"
10636 		     " %s function '%s'"),
10637 		   input_bfd, "ARM",
10638 		   "ARM", h ? h->root.root.string : "(local)");
10639 	    }
10640 	  else if (r_type == R_ARM_PC24)
10641 	    {
10642 	      /* Check for Arm calling Thumb function.  */
10643 	      if (branch_type == ST_BRANCH_TO_THUMB)
10644 		{
10645 		  if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10646 					       output_bfd, input_section,
10647 					       hit_data, sym_sec, rel->r_offset,
10648 					       signed_addend, value,
10649 					       error_message))
10650 		    return bfd_reloc_ok;
10651 		  else
10652 		    return bfd_reloc_dangerous;
10653 		}
10654 	    }
10655 
10656 	  /* Check if a stub has to be inserted because the
10657 	     destination is too far or we are changing mode.  */
10658 	  if (   r_type == R_ARM_CALL
10659 	      || r_type == R_ARM_JUMP24
10660 	      || r_type == R_ARM_PLT32)
10661 	    {
10662 	      enum elf32_arm_stub_type stub_type = arm_stub_none;
10663 	      struct elf32_arm_link_hash_entry *hash;
10664 
10665 	      hash = (struct elf32_arm_link_hash_entry *) h;
10666 	      stub_type = arm_type_of_stub (info, input_section, rel,
10667 					    st_type, &branch_type,
10668 					    hash, value, sym_sec,
10669 					    input_bfd, sym_name);
10670 
10671 	      if (stub_type != arm_stub_none)
10672 		{
10673 		  /* The target is out of reach, so redirect the
10674 		     branch to the local stub for this function.  */
10675 		  stub_entry = elf32_arm_get_stub_entry (input_section,
10676 							 sym_sec, h,
10677 							 rel, globals,
10678 							 stub_type);
10679 		  {
10680 		    if (stub_entry != NULL)
10681 		      value = (stub_entry->stub_offset
10682 			       + stub_entry->stub_sec->output_offset
10683 			       + stub_entry->stub_sec->output_section->vma);
10684 
10685 		    if (plt_offset != (bfd_vma) -1)
10686 		      *unresolved_reloc_p = FALSE;
10687 		  }
10688 		}
10689 	      else
10690 		{
10691 		  /* If the call goes through a PLT entry, make sure to
10692 		     check distance to the right destination address.  */
10693 		  if (plt_offset != (bfd_vma) -1)
10694 		    {
10695 		      value = (splt->output_section->vma
10696 			       + splt->output_offset
10697 			       + plt_offset);
10698 		      *unresolved_reloc_p = FALSE;
10699 		      /* The PLT entry is in ARM mode, regardless of the
10700 			 target function.  */
10701 		      branch_type = ST_BRANCH_TO_ARM;
10702 		    }
10703 		}
10704 	    }
10705 
10706 	  /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10707 	     where:
10708 	      S is the address of the symbol in the relocation.
10709 	      P is address of the instruction being relocated.
10710 	      A is the addend (extracted from the instruction) in bytes.
10711 
10712 	     S is held in 'value'.
10713 	     P is the base address of the section containing the
10714 	       instruction plus the offset of the reloc into that
10715 	       section, ie:
10716 		 (input_section->output_section->vma +
10717 		  input_section->output_offset +
10718 		  rel->r_offset).
10719 	     A is the addend, converted into bytes, ie:
10720 		 (signed_addend * 4)
10721 
10722 	     Note: None of these operations have knowledge of the pipeline
10723 	     size of the processor, thus it is up to the assembler to
10724 	     encode this information into the addend.  */
10725 	  value -= (input_section->output_section->vma
10726 		    + input_section->output_offset);
10727 	  value -= rel->r_offset;
10728 	  if (globals->use_rel)
10729 	    value += (signed_addend << howto->size);
10730 	  else
10731 	    /* RELA addends do not have to be adjusted by howto->size.  */
10732 	    value += signed_addend;
10733 
10734 	  signed_addend = value;
10735 	  signed_addend >>= howto->rightshift;
10736 
10737 	  /* A branch to an undefined weak symbol is turned into a jump to
10738 	     the next instruction unless a PLT entry will be created.
10739 	     Do the same for local undefined symbols (but not for STN_UNDEF).
10740 	     The jump to the next instruction is optimized as a NOP depending
10741 	     on the architecture.  */
10742 	  if (h ? (h->root.type == bfd_link_hash_undefweak
10743 		   && plt_offset == (bfd_vma) -1)
10744 	      : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10745 	    {
10746 	      value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10747 
10748 	      if (arch_has_arm_nop (globals))
10749 		value |= 0x0320f000;
10750 	      else
10751 		value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0.  */
10752 	    }
10753 	  else
10754 	    {
10755 	      /* Perform a signed range check.  */
10756 	      if (   signed_addend >   ((bfd_signed_vma)  (howto->dst_mask >> 1))
10757 		  || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10758 		return bfd_reloc_overflow;
10759 
10760 	      addend = (value & 2);
10761 
10762 	      value = (signed_addend & howto->dst_mask)
10763 		| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10764 
10765 	      if (r_type == R_ARM_CALL)
10766 		{
10767 		  /* Set the H bit in the BLX instruction.  */
10768 		  if (branch_type == ST_BRANCH_TO_THUMB)
10769 		    {
10770 		      if (addend)
10771 			value |= (1 << 24);
10772 		      else
10773 			value &= ~(bfd_vma)(1 << 24);
10774 		    }
10775 
10776 		  /* Select the correct instruction (BL or BLX).  */
10777 		  /* Only if we are not handling a BL to a stub. In this
10778 		     case, mode switching is performed by the stub.  */
10779 		  if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10780 		    value |= (1 << 28);
10781 		  else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10782 		    {
10783 		      value &= ~(bfd_vma)(1 << 28);
10784 		      value |= (1 << 24);
10785 		    }
10786 		}
10787 	    }
10788 	  }
10789 	  break;
10790 
10791 	case R_ARM_ABS32:
10792 	  value += addend;
10793 	  if (branch_type == ST_BRANCH_TO_THUMB)
10794 	    value |= 1;
10795 	  break;
10796 
10797 	case R_ARM_ABS32_NOI:
10798 	  value += addend;
10799 	  break;
10800 
10801 	case R_ARM_REL32:
10802 	  value += addend;
10803 	  if (branch_type == ST_BRANCH_TO_THUMB)
10804 	    value |= 1;
10805 	  value -= (input_section->output_section->vma
10806 		    + input_section->output_offset + rel->r_offset);
10807 	  break;
10808 
10809 	case R_ARM_REL32_NOI:
10810 	  value += addend;
10811 	  value -= (input_section->output_section->vma
10812 		    + input_section->output_offset + rel->r_offset);
10813 	  break;
10814 
10815 	case R_ARM_PREL31:
10816 	  value -= (input_section->output_section->vma
10817 		    + input_section->output_offset + rel->r_offset);
10818 	  value += signed_addend;
10819 	  if (! h || h->root.type != bfd_link_hash_undefweak)
10820 	    {
10821 	      /* Check for overflow.  */
10822 	      if ((value ^ (value >> 1)) & (1 << 30))
10823 		return bfd_reloc_overflow;
10824 	    }
10825 	  value &= 0x7fffffff;
10826 	  value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10827 	  if (branch_type == ST_BRANCH_TO_THUMB)
10828 	    value |= 1;
10829 	  break;
10830 	}
10831 
10832       bfd_put_32 (input_bfd, value, hit_data);
10833       return bfd_reloc_ok;
10834 
10835     case R_ARM_ABS8:
10836       /* PR 16202: Refectch the addend using the correct size.  */
10837       if (globals->use_rel)
10838 	addend = bfd_get_8 (input_bfd, hit_data);
10839       value += addend;
10840 
10841       /* There is no way to tell whether the user intended to use a signed or
10842 	 unsigned addend.  When checking for overflow we accept either,
10843 	 as specified by the AAELF.  */
10844       if ((long) value > 0xff || (long) value < -0x80)
10845 	return bfd_reloc_overflow;
10846 
10847       bfd_put_8 (input_bfd, value, hit_data);
10848       return bfd_reloc_ok;
10849 
10850     case R_ARM_ABS16:
10851       /* PR 16202: Refectch the addend using the correct size.  */
10852       if (globals->use_rel)
10853 	addend = bfd_get_16 (input_bfd, hit_data);
10854       value += addend;
10855 
10856       /* See comment for R_ARM_ABS8.  */
10857       if ((long) value > 0xffff || (long) value < -0x8000)
10858 	return bfd_reloc_overflow;
10859 
10860       bfd_put_16 (input_bfd, value, hit_data);
10861       return bfd_reloc_ok;
10862 
10863     case R_ARM_THM_ABS5:
10864       /* Support ldr and str instructions for the thumb.  */
10865       if (globals->use_rel)
10866 	{
10867 	  /* Need to refetch addend.  */
10868 	  addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10869 	  /* ??? Need to determine shift amount from operand size.  */
10870 	  addend >>= howto->rightshift;
10871 	}
10872       value += addend;
10873 
10874       /* ??? Isn't value unsigned?  */
10875       if ((long) value > 0x1f || (long) value < -0x10)
10876 	return bfd_reloc_overflow;
10877 
10878       /* ??? Value needs to be properly shifted into place first.  */
10879       value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10880       bfd_put_16 (input_bfd, value, hit_data);
10881       return bfd_reloc_ok;
10882 
10883     case R_ARM_THM_ALU_PREL_11_0:
10884       /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw).  */
10885       {
10886 	bfd_vma insn;
10887 	bfd_signed_vma relocation;
10888 
10889 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10890 	     | bfd_get_16 (input_bfd, hit_data + 2);
10891 
10892 	if (globals->use_rel)
10893 	  {
10894 	    signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10895 			  | ((insn & (1 << 26)) >> 15);
10896 	    if (insn & 0xf00000)
10897 	      signed_addend = -signed_addend;
10898 	  }
10899 
10900 	relocation = value + signed_addend;
10901 	relocation -= Pa (input_section->output_section->vma
10902 			  + input_section->output_offset
10903 			  + rel->r_offset);
10904 
10905 	/* PR 21523: Use an absolute value.  The user of this reloc will
10906 	   have already selected an ADD or SUB insn appropriately.  */
10907 	value = llabs (relocation);
10908 
10909 	if (value >= 0x1000)
10910 	  return bfd_reloc_overflow;
10911 
10912 	/* Destination is Thumb.  Force bit 0 to 1 to reflect this.  */
10913 	if (branch_type == ST_BRANCH_TO_THUMB)
10914 	  value |= 1;
10915 
10916 	insn = (insn & 0xfb0f8f00) | (value & 0xff)
10917 	     | ((value & 0x700) << 4)
10918 	     | ((value & 0x800) << 15);
10919 	if (relocation < 0)
10920 	  insn |= 0xa00000;
10921 
10922 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10923 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10924 
10925 	return bfd_reloc_ok;
10926       }
10927 
10928     case R_ARM_THM_PC8:
10929       /* PR 10073:  This reloc is not generated by the GNU toolchain,
10930 	 but it is supported for compatibility with third party libraries
10931 	 generated by other compilers, specifically the ARM/IAR.  */
10932       {
10933 	bfd_vma insn;
10934 	bfd_signed_vma relocation;
10935 
10936 	insn = bfd_get_16 (input_bfd, hit_data);
10937 
10938 	if (globals->use_rel)
10939 	  addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
10940 
10941 	relocation = value + addend;
10942 	relocation -= Pa (input_section->output_section->vma
10943 			  + input_section->output_offset
10944 			  + rel->r_offset);
10945 
10946 	value = relocation;
10947 
10948 	/* We do not check for overflow of this reloc.  Although strictly
10949 	   speaking this is incorrect, it appears to be necessary in order
10950 	   to work with IAR generated relocs.  Since GCC and GAS do not
10951 	   generate R_ARM_THM_PC8 relocs, the lack of a check should not be
10952 	   a problem for them.  */
10953 	value &= 0x3fc;
10954 
10955 	insn = (insn & 0xff00) | (value >> 2);
10956 
10957 	bfd_put_16 (input_bfd, insn, hit_data);
10958 
10959 	return bfd_reloc_ok;
10960       }
10961 
10962     case R_ARM_THM_PC12:
10963       /* Corresponds to: ldr.w reg, [pc, #offset].  */
10964       {
10965 	bfd_vma insn;
10966 	bfd_signed_vma relocation;
10967 
10968 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10969 	     | bfd_get_16 (input_bfd, hit_data + 2);
10970 
10971 	if (globals->use_rel)
10972 	  {
10973 	    signed_addend = insn & 0xfff;
10974 	    if (!(insn & (1 << 23)))
10975 	      signed_addend = -signed_addend;
10976 	  }
10977 
10978 	relocation = value + signed_addend;
10979 	relocation -= Pa (input_section->output_section->vma
10980 			  + input_section->output_offset
10981 			  + rel->r_offset);
10982 
10983 	value = relocation;
10984 
10985 	if (value >= 0x1000)
10986 	  return bfd_reloc_overflow;
10987 
10988 	insn = (insn & 0xff7ff000) | value;
10989 	if (relocation >= 0)
10990 	  insn |= (1 << 23);
10991 
10992 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10993 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10994 
10995 	return bfd_reloc_ok;
10996       }
10997 
10998     case R_ARM_THM_XPC22:
10999     case R_ARM_THM_CALL:
11000     case R_ARM_THM_JUMP24:
11001       /* Thumb BL (branch long instruction).  */
11002       {
11003 	bfd_vma relocation;
11004 	bfd_vma reloc_sign;
11005 	bfd_boolean overflow = FALSE;
11006 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11007 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11008 	bfd_signed_vma reloc_signed_max;
11009 	bfd_signed_vma reloc_signed_min;
11010 	bfd_vma check;
11011 	bfd_signed_vma signed_check;
11012 	int bitsize;
11013 	const int thumb2 = using_thumb2 (globals);
11014 	const int thumb2_bl = using_thumb2_bl (globals);
11015 
11016 	/* A branch to an undefined weak symbol is turned into a jump to
11017 	   the next instruction unless a PLT entry will be created.
11018 	   The jump to the next instruction is optimized as a NOP.W for
11019 	   Thumb-2 enabled architectures.  */
11020 	if (h && h->root.type == bfd_link_hash_undefweak
11021 	    && plt_offset == (bfd_vma) -1)
11022 	  {
11023 	    if (thumb2)
11024 	      {
11025 		bfd_put_16 (input_bfd, 0xf3af, hit_data);
11026 		bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11027 	      }
11028 	    else
11029 	      {
11030 		bfd_put_16 (input_bfd, 0xe000, hit_data);
11031 		bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11032 	      }
11033 	    return bfd_reloc_ok;
11034 	  }
11035 
11036 	/* Fetch the addend.  We use the Thumb-2 encoding (backwards compatible
11037 	   with Thumb-1) involving the J1 and J2 bits.  */
11038 	if (globals->use_rel)
11039 	  {
11040 	    bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11041 	    bfd_vma upper = upper_insn & 0x3ff;
11042 	    bfd_vma lower = lower_insn & 0x7ff;
11043 	    bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11044 	    bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11045 	    bfd_vma i1 = j1 ^ s ? 0 : 1;
11046 	    bfd_vma i2 = j2 ^ s ? 0 : 1;
11047 
11048 	    addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11049 	    /* Sign extend.  */
11050 	    addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11051 
11052 	    signed_addend = addend;
11053 	  }
11054 
11055 	if (r_type == R_ARM_THM_XPC22)
11056 	  {
11057 	    /* Check for Thumb to Thumb call.  */
11058 	    /* FIXME: Should we translate the instruction into a BL
11059 	       instruction instead ?  */
11060 	    if (branch_type == ST_BRANCH_TO_THUMB)
11061 	      _bfd_error_handler
11062 		(_("%pB: warning: %s BLX instruction targets"
11063 		   " %s function '%s'"),
11064 		 input_bfd, "Thumb",
11065 		 "Thumb", h ? h->root.root.string : "(local)");
11066 	  }
11067 	else
11068 	  {
11069 	    /* If it is not a call to Thumb, assume call to Arm.
11070 	       If it is a call relative to a section name, then it is not a
11071 	       function call at all, but rather a long jump.  Calls through
11072 	       the PLT do not require stubs.  */
11073 	    if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11074 	      {
11075 		if (globals->use_blx && r_type == R_ARM_THM_CALL)
11076 		  {
11077 		    /* Convert BL to BLX.  */
11078 		    lower_insn = (lower_insn & ~0x1000) | 0x0800;
11079 		  }
11080 		else if ((   r_type != R_ARM_THM_CALL)
11081 			 && (r_type != R_ARM_THM_JUMP24))
11082 		  {
11083 		    if (elf32_thumb_to_arm_stub
11084 			(info, sym_name, input_bfd, output_bfd, input_section,
11085 			 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11086 			 error_message))
11087 		      return bfd_reloc_ok;
11088 		    else
11089 		      return bfd_reloc_dangerous;
11090 		  }
11091 	      }
11092 	    else if (branch_type == ST_BRANCH_TO_THUMB
11093 		     && globals->use_blx
11094 		     && r_type == R_ARM_THM_CALL)
11095 	      {
11096 		/* Make sure this is a BL.  */
11097 		lower_insn |= 0x1800;
11098 	      }
11099 	  }
11100 
11101 	enum elf32_arm_stub_type stub_type = arm_stub_none;
11102 	if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11103 	  {
11104 	    /* Check if a stub has to be inserted because the destination
11105 	       is too far.  */
11106 	    struct elf32_arm_stub_hash_entry *stub_entry;
11107 	    struct elf32_arm_link_hash_entry *hash;
11108 
11109 	    hash = (struct elf32_arm_link_hash_entry *) h;
11110 
11111 	    stub_type = arm_type_of_stub (info, input_section, rel,
11112 					  st_type, &branch_type,
11113 					  hash, value, sym_sec,
11114 					  input_bfd, sym_name);
11115 
11116 	    if (stub_type != arm_stub_none)
11117 	      {
11118 		/* The target is out of reach or we are changing modes, so
11119 		   redirect the branch to the local stub for this
11120 		   function.  */
11121 		stub_entry = elf32_arm_get_stub_entry (input_section,
11122 						       sym_sec, h,
11123 						       rel, globals,
11124 						       stub_type);
11125 		if (stub_entry != NULL)
11126 		  {
11127 		    value = (stub_entry->stub_offset
11128 			     + stub_entry->stub_sec->output_offset
11129 			     + stub_entry->stub_sec->output_section->vma);
11130 
11131 		    if (plt_offset != (bfd_vma) -1)
11132 		      *unresolved_reloc_p = FALSE;
11133 		  }
11134 
11135 		/* If this call becomes a call to Arm, force BLX.  */
11136 		if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11137 		  {
11138 		    if ((stub_entry
11139 			 && !arm_stub_is_thumb (stub_entry->stub_type))
11140 			|| branch_type != ST_BRANCH_TO_THUMB)
11141 		      lower_insn = (lower_insn & ~0x1000) | 0x0800;
11142 		  }
11143 	      }
11144 	  }
11145 
11146 	/* Handle calls via the PLT.  */
11147 	if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11148 	  {
11149 	    value = (splt->output_section->vma
11150 		     + splt->output_offset
11151 		     + plt_offset);
11152 
11153 	    if (globals->use_blx
11154 		&& r_type == R_ARM_THM_CALL
11155 		&& ! using_thumb_only (globals))
11156 	      {
11157 		/* If the Thumb BLX instruction is available, convert
11158 		   the BL to a BLX instruction to call the ARM-mode
11159 		   PLT entry.  */
11160 		lower_insn = (lower_insn & ~0x1000) | 0x0800;
11161 		branch_type = ST_BRANCH_TO_ARM;
11162 	      }
11163 	    else
11164 	      {
11165 		if (! using_thumb_only (globals))
11166 		  /* Target the Thumb stub before the ARM PLT entry.  */
11167 		  value -= PLT_THUMB_STUB_SIZE;
11168 		branch_type = ST_BRANCH_TO_THUMB;
11169 	      }
11170 	    *unresolved_reloc_p = FALSE;
11171 	  }
11172 
11173 	relocation = value + signed_addend;
11174 
11175 	relocation -= (input_section->output_section->vma
11176 		       + input_section->output_offset
11177 		       + rel->r_offset);
11178 
11179 	check = relocation >> howto->rightshift;
11180 
11181 	/* If this is a signed value, the rightshift just dropped
11182 	   leading 1 bits (assuming twos complement).  */
11183 	if ((bfd_signed_vma) relocation >= 0)
11184 	  signed_check = check;
11185 	else
11186 	  signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11187 
11188 	/* Calculate the permissable maximum and minimum values for
11189 	   this relocation according to whether we're relocating for
11190 	   Thumb-2 or not.  */
11191 	bitsize = howto->bitsize;
11192 	if (!thumb2_bl)
11193 	  bitsize -= 2;
11194 	reloc_signed_max = (1 << (bitsize - 1)) - 1;
11195 	reloc_signed_min = ~reloc_signed_max;
11196 
11197 	/* Assumes two's complement.  */
11198 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11199 	  overflow = TRUE;
11200 
11201 	if ((lower_insn & 0x5000) == 0x4000)
11202 	  /* For a BLX instruction, make sure that the relocation is rounded up
11203 	     to a word boundary.  This follows the semantics of the instruction
11204 	     which specifies that bit 1 of the target address will come from bit
11205 	     1 of the base address.  */
11206 	  relocation = (relocation + 2) & ~ 3;
11207 
11208 	/* Put RELOCATION back into the insn.  Assumes two's complement.
11209 	   We use the Thumb-2 encoding, which is safe even if dealing with
11210 	   a Thumb-1 instruction by virtue of our overflow check above.  */
11211 	reloc_sign = (signed_check < 0) ? 1 : 0;
11212 	upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11213 		     | ((relocation >> 12) & 0x3ff)
11214 		     | (reloc_sign << 10);
11215 	lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11216 		     | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11217 		     | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11218 		     | ((relocation >> 1) & 0x7ff);
11219 
11220 	/* Put the relocated value back in the object file:  */
11221 	bfd_put_16 (input_bfd, upper_insn, hit_data);
11222 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11223 
11224 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11225       }
11226       break;
11227 
11228     case R_ARM_THM_JUMP19:
11229       /* Thumb32 conditional branch instruction.  */
11230       {
11231 	bfd_vma relocation;
11232 	bfd_boolean overflow = FALSE;
11233 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11234 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11235 	bfd_signed_vma reloc_signed_max = 0xffffe;
11236 	bfd_signed_vma reloc_signed_min = -0x100000;
11237 	bfd_signed_vma signed_check;
11238 	enum elf32_arm_stub_type stub_type = arm_stub_none;
11239 	struct elf32_arm_stub_hash_entry *stub_entry;
11240 	struct elf32_arm_link_hash_entry *hash;
11241 
11242 	/* Need to refetch the addend, reconstruct the top three bits,
11243 	   and squish the two 11 bit pieces together.  */
11244 	if (globals->use_rel)
11245 	  {
11246 	    bfd_vma S     = (upper_insn & 0x0400) >> 10;
11247 	    bfd_vma upper = (upper_insn & 0x003f);
11248 	    bfd_vma J1    = (lower_insn & 0x2000) >> 13;
11249 	    bfd_vma J2    = (lower_insn & 0x0800) >> 11;
11250 	    bfd_vma lower = (lower_insn & 0x07ff);
11251 
11252 	    upper |= J1 << 6;
11253 	    upper |= J2 << 7;
11254 	    upper |= (!S) << 8;
11255 	    upper -= 0x0100; /* Sign extend.  */
11256 
11257 	    addend = (upper << 12) | (lower << 1);
11258 	    signed_addend = addend;
11259 	  }
11260 
11261 	/* Handle calls via the PLT.  */
11262 	if (plt_offset != (bfd_vma) -1)
11263 	  {
11264 	    value = (splt->output_section->vma
11265 		     + splt->output_offset
11266 		     + plt_offset);
11267 	    /* Target the Thumb stub before the ARM PLT entry.  */
11268 	    value -= PLT_THUMB_STUB_SIZE;
11269 	    *unresolved_reloc_p = FALSE;
11270 	  }
11271 
11272 	hash = (struct elf32_arm_link_hash_entry *)h;
11273 
11274 	stub_type = arm_type_of_stub (info, input_section, rel,
11275 				      st_type, &branch_type,
11276 				      hash, value, sym_sec,
11277 				      input_bfd, sym_name);
11278 	if (stub_type != arm_stub_none)
11279 	  {
11280 	    stub_entry = elf32_arm_get_stub_entry (input_section,
11281 						   sym_sec, h,
11282 						   rel, globals,
11283 						   stub_type);
11284 	    if (stub_entry != NULL)
11285 	      {
11286 		value = (stub_entry->stub_offset
11287 			+ stub_entry->stub_sec->output_offset
11288 			+ stub_entry->stub_sec->output_section->vma);
11289 	      }
11290 	  }
11291 
11292 	relocation = value + signed_addend;
11293 	relocation -= (input_section->output_section->vma
11294 		       + input_section->output_offset
11295 		       + rel->r_offset);
11296 	signed_check = (bfd_signed_vma) relocation;
11297 
11298 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11299 	  overflow = TRUE;
11300 
11301 	/* Put RELOCATION back into the insn.  */
11302 	{
11303 	  bfd_vma S  = (relocation & 0x00100000) >> 20;
11304 	  bfd_vma J2 = (relocation & 0x00080000) >> 19;
11305 	  bfd_vma J1 = (relocation & 0x00040000) >> 18;
11306 	  bfd_vma hi = (relocation & 0x0003f000) >> 12;
11307 	  bfd_vma lo = (relocation & 0x00000ffe) >>  1;
11308 
11309 	  upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11310 	  lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11311 	}
11312 
11313 	/* Put the relocated value back in the object file:  */
11314 	bfd_put_16 (input_bfd, upper_insn, hit_data);
11315 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11316 
11317 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11318       }
11319 
11320     case R_ARM_THM_JUMP11:
11321     case R_ARM_THM_JUMP8:
11322     case R_ARM_THM_JUMP6:
11323       /* Thumb B (branch) instruction).  */
11324       {
11325 	bfd_signed_vma relocation;
11326 	bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11327 	bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11328 	bfd_signed_vma signed_check;
11329 
11330 	/* CZB cannot jump backward.  */
11331 	if (r_type == R_ARM_THM_JUMP6)
11332 	  reloc_signed_min = 0;
11333 
11334 	if (globals->use_rel)
11335 	  {
11336 	    /* Need to refetch addend.  */
11337 	    addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11338 	    if (addend & ((howto->src_mask + 1) >> 1))
11339 	      {
11340 		signed_addend = -1;
11341 		signed_addend &= ~ howto->src_mask;
11342 		signed_addend |= addend;
11343 	      }
11344 	    else
11345 	      signed_addend = addend;
11346 	    /* The value in the insn has been right shifted.  We need to
11347 	       undo this, so that we can perform the address calculation
11348 	       in terms of bytes.  */
11349 	    signed_addend <<= howto->rightshift;
11350 	  }
11351 	relocation = value + signed_addend;
11352 
11353 	relocation -= (input_section->output_section->vma
11354 		       + input_section->output_offset
11355 		       + rel->r_offset);
11356 
11357 	relocation >>= howto->rightshift;
11358 	signed_check = relocation;
11359 
11360 	if (r_type == R_ARM_THM_JUMP6)
11361 	  relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11362 	else
11363 	  relocation &= howto->dst_mask;
11364 	relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11365 
11366 	bfd_put_16 (input_bfd, relocation, hit_data);
11367 
11368 	/* Assumes two's complement.  */
11369 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11370 	  return bfd_reloc_overflow;
11371 
11372 	return bfd_reloc_ok;
11373       }
11374 
11375     case R_ARM_ALU_PCREL7_0:
11376     case R_ARM_ALU_PCREL15_8:
11377     case R_ARM_ALU_PCREL23_15:
11378       {
11379 	bfd_vma insn;
11380 	bfd_vma relocation;
11381 
11382 	insn = bfd_get_32 (input_bfd, hit_data);
11383 	if (globals->use_rel)
11384 	  {
11385 	    /* Extract the addend.  */
11386 	    addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11387 	    signed_addend = addend;
11388 	  }
11389 	relocation = value + signed_addend;
11390 
11391 	relocation -= (input_section->output_section->vma
11392 		       + input_section->output_offset
11393 		       + rel->r_offset);
11394 	insn = (insn & ~0xfff)
11395 	       | ((howto->bitpos << 7) & 0xf00)
11396 	       | ((relocation >> howto->bitpos) & 0xff);
11397 	bfd_put_32 (input_bfd, value, hit_data);
11398       }
11399       return bfd_reloc_ok;
11400 
11401     case R_ARM_GNU_VTINHERIT:
11402     case R_ARM_GNU_VTENTRY:
11403       return bfd_reloc_ok;
11404 
11405     case R_ARM_GOTOFF32:
11406       /* Relocation is relative to the start of the
11407 	 global offset table.  */
11408 
11409       BFD_ASSERT (sgot != NULL);
11410       if (sgot == NULL)
11411 	return bfd_reloc_notsupported;
11412 
11413       /* If we are addressing a Thumb function, we need to adjust the
11414 	 address by one, so that attempts to call the function pointer will
11415 	 correctly interpret it as Thumb code.  */
11416       if (branch_type == ST_BRANCH_TO_THUMB)
11417 	value += 1;
11418 
11419       /* Note that sgot->output_offset is not involved in this
11420 	 calculation.  We always want the start of .got.  If we
11421 	 define _GLOBAL_OFFSET_TABLE in a different way, as is
11422 	 permitted by the ABI, we might have to change this
11423 	 calculation.  */
11424       value -= sgot->output_section->vma;
11425       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11426 				       contents, rel->r_offset, value,
11427 				       rel->r_addend);
11428 
11429     case R_ARM_GOTPC:
11430       /* Use global offset table as symbol value.  */
11431       BFD_ASSERT (sgot != NULL);
11432 
11433       if (sgot == NULL)
11434 	return bfd_reloc_notsupported;
11435 
11436       *unresolved_reloc_p = FALSE;
11437       value = sgot->output_section->vma;
11438       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11439 				       contents, rel->r_offset, value,
11440 				       rel->r_addend);
11441 
11442     case R_ARM_GOT32:
11443     case R_ARM_GOT_PREL:
11444       /* Relocation is to the entry for this symbol in the
11445 	 global offset table.  */
11446       if (sgot == NULL)
11447 	return bfd_reloc_notsupported;
11448 
11449       if (dynreloc_st_type == STT_GNU_IFUNC
11450 	  && plt_offset != (bfd_vma) -1
11451 	  && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11452 	{
11453 	  /* We have a relocation against a locally-binding STT_GNU_IFUNC
11454 	     symbol, and the relocation resolves directly to the runtime
11455 	     target rather than to the .iplt entry.  This means that any
11456 	     .got entry would be the same value as the .igot.plt entry,
11457 	     so there's no point creating both.  */
11458 	  sgot = globals->root.igotplt;
11459 	  value = sgot->output_offset + gotplt_offset;
11460 	}
11461       else if (h != NULL)
11462 	{
11463 	  bfd_vma off;
11464 
11465 	  off = h->got.offset;
11466 	  BFD_ASSERT (off != (bfd_vma) -1);
11467 	  if ((off & 1) != 0)
11468 	    {
11469 	      /* We have already processsed one GOT relocation against
11470 		 this symbol.  */
11471 	      off &= ~1;
11472 	      if (globals->root.dynamic_sections_created
11473 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11474 		*unresolved_reloc_p = FALSE;
11475 	    }
11476 	  else
11477 	    {
11478 	      Elf_Internal_Rela outrel;
11479 	      int isrofixup = 0;
11480 
11481 	      if (((h->dynindx != -1) || globals->fdpic_p)
11482 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11483 		{
11484 		  /* If the symbol doesn't resolve locally in a static
11485 		     object, we have an undefined reference.  If the
11486 		     symbol doesn't resolve locally in a dynamic object,
11487 		     it should be resolved by the dynamic linker.  */
11488 		  if (globals->root.dynamic_sections_created)
11489 		    {
11490 		      outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11491 		      *unresolved_reloc_p = FALSE;
11492 		    }
11493 		  else
11494 		    outrel.r_info = 0;
11495 		  outrel.r_addend = 0;
11496 		}
11497 	      else
11498 		{
11499 		  if (dynreloc_st_type == STT_GNU_IFUNC)
11500 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11501 		  else if (bfd_link_pic (info)
11502 			   && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11503 			       || h->root.type != bfd_link_hash_undefweak))
11504 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11505 		  else
11506 		    {
11507 		      outrel.r_info = 0;
11508 		      if (globals->fdpic_p)
11509 			isrofixup = 1;
11510 		    }
11511 		  outrel.r_addend = dynreloc_value;
11512 		}
11513 
11514 	      /* The GOT entry is initialized to zero by default.
11515 		 See if we should install a different value.  */
11516 	      if (outrel.r_addend != 0
11517 		  && (globals->use_rel || outrel.r_info == 0))
11518 		{
11519 		  bfd_put_32 (output_bfd, outrel.r_addend,
11520 			      sgot->contents + off);
11521 		  outrel.r_addend = 0;
11522 		}
11523 
11524 	      if (isrofixup)
11525 		arm_elf_add_rofixup (output_bfd,
11526 				     elf32_arm_hash_table(info)->srofixup,
11527 				     sgot->output_section->vma
11528 				     + sgot->output_offset + off);
11529 
11530 	      else if (outrel.r_info != 0)
11531 		{
11532 		  outrel.r_offset = (sgot->output_section->vma
11533 				     + sgot->output_offset
11534 				     + off);
11535 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11536 		}
11537 
11538 	      h->got.offset |= 1;
11539 	    }
11540 	  value = sgot->output_offset + off;
11541 	}
11542       else
11543 	{
11544 	  bfd_vma off;
11545 
11546 	  BFD_ASSERT (local_got_offsets != NULL
11547 		      && local_got_offsets[r_symndx] != (bfd_vma) -1);
11548 
11549 	  off = local_got_offsets[r_symndx];
11550 
11551 	  /* The offset must always be a multiple of 4.  We use the
11552 	     least significant bit to record whether we have already
11553 	     generated the necessary reloc.  */
11554 	  if ((off & 1) != 0)
11555 	    off &= ~1;
11556 	  else
11557 	    {
11558 	      Elf_Internal_Rela outrel;
11559 	      int isrofixup = 0;
11560 
11561 	      if (dynreloc_st_type == STT_GNU_IFUNC)
11562 		outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11563 	      else if (bfd_link_pic (info))
11564 		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11565 	      else
11566 		{
11567 		  outrel.r_info = 0;
11568 		  if (globals->fdpic_p)
11569 		    isrofixup = 1;
11570 		}
11571 
11572 	      /* The GOT entry is initialized to zero by default.
11573 		 See if we should install a different value.  */
11574 	      if (globals->use_rel || outrel.r_info == 0)
11575 		bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11576 
11577 	      if (isrofixup)
11578 		arm_elf_add_rofixup (output_bfd,
11579 				     globals->srofixup,
11580 				     sgot->output_section->vma
11581 				     + sgot->output_offset + off);
11582 
11583 	      else if (outrel.r_info != 0)
11584 		{
11585 		  outrel.r_addend = addend + dynreloc_value;
11586 		  outrel.r_offset = (sgot->output_section->vma
11587 				     + sgot->output_offset
11588 				     + off);
11589 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11590 		}
11591 
11592 	      local_got_offsets[r_symndx] |= 1;
11593 	    }
11594 
11595 	  value = sgot->output_offset + off;
11596 	}
11597       if (r_type != R_ARM_GOT32)
11598 	value += sgot->output_section->vma;
11599 
11600       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11601 				       contents, rel->r_offset, value,
11602 				       rel->r_addend);
11603 
11604     case R_ARM_TLS_LDO32:
11605       value = value - dtpoff_base (info);
11606 
11607       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11608 				       contents, rel->r_offset, value,
11609 				       rel->r_addend);
11610 
11611     case R_ARM_TLS_LDM32:
11612     case R_ARM_TLS_LDM32_FDPIC:
11613       {
11614 	bfd_vma off;
11615 
11616 	if (sgot == NULL)
11617 	  abort ();
11618 
11619 	off = globals->tls_ldm_got.offset;
11620 
11621 	if ((off & 1) != 0)
11622 	  off &= ~1;
11623 	else
11624 	  {
11625 	    /* If we don't know the module number, create a relocation
11626 	       for it.  */
11627 	    if (bfd_link_pic (info))
11628 	      {
11629 		Elf_Internal_Rela outrel;
11630 
11631 		if (srelgot == NULL)
11632 		  abort ();
11633 
11634 		outrel.r_addend = 0;
11635 		outrel.r_offset = (sgot->output_section->vma
11636 				   + sgot->output_offset + off);
11637 		outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11638 
11639 		if (globals->use_rel)
11640 		  bfd_put_32 (output_bfd, outrel.r_addend,
11641 			      sgot->contents + off);
11642 
11643 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11644 	      }
11645 	    else
11646 	      bfd_put_32 (output_bfd, 1, sgot->contents + off);
11647 
11648 	    globals->tls_ldm_got.offset |= 1;
11649 	  }
11650 
11651 	if (r_type == R_ARM_TLS_LDM32_FDPIC)
11652 	  {
11653 	    bfd_put_32(output_bfd,
11654 		       globals->root.sgot->output_offset + off,
11655 		       contents + rel->r_offset);
11656 
11657 	    return bfd_reloc_ok;
11658 	  }
11659 	else
11660 	  {
11661 	    value = sgot->output_section->vma + sgot->output_offset + off
11662 	      - (input_section->output_section->vma
11663 		 + input_section->output_offset + rel->r_offset);
11664 
11665 	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
11666 					     contents, rel->r_offset, value,
11667 					     rel->r_addend);
11668 	  }
11669       }
11670 
11671     case R_ARM_TLS_CALL:
11672     case R_ARM_THM_TLS_CALL:
11673     case R_ARM_TLS_GD32:
11674     case R_ARM_TLS_GD32_FDPIC:
11675     case R_ARM_TLS_IE32:
11676     case R_ARM_TLS_IE32_FDPIC:
11677     case R_ARM_TLS_GOTDESC:
11678     case R_ARM_TLS_DESCSEQ:
11679     case R_ARM_THM_TLS_DESCSEQ:
11680       {
11681 	bfd_vma off, offplt;
11682 	int indx = 0;
11683 	char tls_type;
11684 
11685 	BFD_ASSERT (sgot != NULL);
11686 
11687 	if (h != NULL)
11688 	  {
11689 	    bfd_boolean dyn;
11690 	    dyn = globals->root.dynamic_sections_created;
11691 	    if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11692 						 bfd_link_pic (info),
11693 						 h)
11694 		&& (!bfd_link_pic (info)
11695 		    || !SYMBOL_REFERENCES_LOCAL (info, h)))
11696 	      {
11697 		*unresolved_reloc_p = FALSE;
11698 		indx = h->dynindx;
11699 	      }
11700 	    off = h->got.offset;
11701 	    offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11702 	    tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11703 	  }
11704 	else
11705 	  {
11706 	    BFD_ASSERT (local_got_offsets != NULL);
11707 	    off = local_got_offsets[r_symndx];
11708 	    offplt = local_tlsdesc_gotents[r_symndx];
11709 	    tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11710 	  }
11711 
11712 	/* Linker relaxations happens from one of the
11713 	   R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE.  */
11714 	if (ELF32_R_TYPE(rel->r_info) != r_type)
11715 	  tls_type = GOT_TLS_IE;
11716 
11717 	BFD_ASSERT (tls_type != GOT_UNKNOWN);
11718 
11719 	if ((off & 1) != 0)
11720 	  off &= ~1;
11721 	else
11722 	  {
11723 	    bfd_boolean need_relocs = FALSE;
11724 	    Elf_Internal_Rela outrel;
11725 	    int cur_off = off;
11726 
11727 	    /* The GOT entries have not been initialized yet.  Do it
11728 	       now, and emit any relocations.  If both an IE GOT and a
11729 	       GD GOT are necessary, we emit the GD first.  */
11730 
11731 	    if ((bfd_link_pic (info) || indx != 0)
11732 		&& (h == NULL
11733 		    || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11734 			&& !resolved_to_zero)
11735 		    || h->root.type != bfd_link_hash_undefweak))
11736 	      {
11737 		need_relocs = TRUE;
11738 		BFD_ASSERT (srelgot != NULL);
11739 	      }
11740 
11741 	    if (tls_type & GOT_TLS_GDESC)
11742 	      {
11743 		bfd_byte *loc;
11744 
11745 		/* We should have relaxed, unless this is an undefined
11746 		   weak symbol.  */
11747 		BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11748 			    || bfd_link_pic (info));
11749 		BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11750 			    <= globals->root.sgotplt->size);
11751 
11752 		outrel.r_addend = 0;
11753 		outrel.r_offset = (globals->root.sgotplt->output_section->vma
11754 				   + globals->root.sgotplt->output_offset
11755 				   + offplt
11756 				   + globals->sgotplt_jump_table_size);
11757 
11758 		outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11759 		sreloc = globals->root.srelplt;
11760 		loc = sreloc->contents;
11761 		loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11762 		BFD_ASSERT (loc + RELOC_SIZE (globals)
11763 			   <= sreloc->contents + sreloc->size);
11764 
11765 		SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11766 
11767 		/* For globals, the first word in the relocation gets
11768 		   the relocation index and the top bit set, or zero,
11769 		   if we're binding now.  For locals, it gets the
11770 		   symbol's offset in the tls section.  */
11771 		bfd_put_32 (output_bfd,
11772 			    !h ? value - elf_hash_table (info)->tls_sec->vma
11773 			    : info->flags & DF_BIND_NOW ? 0
11774 			    : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11775 			    globals->root.sgotplt->contents + offplt
11776 			    + globals->sgotplt_jump_table_size);
11777 
11778 		/* Second word in the relocation is always zero.  */
11779 		bfd_put_32 (output_bfd, 0,
11780 			    globals->root.sgotplt->contents + offplt
11781 			    + globals->sgotplt_jump_table_size + 4);
11782 	      }
11783 	    if (tls_type & GOT_TLS_GD)
11784 	      {
11785 		if (need_relocs)
11786 		  {
11787 		    outrel.r_addend = 0;
11788 		    outrel.r_offset = (sgot->output_section->vma
11789 				       + sgot->output_offset
11790 				       + cur_off);
11791 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11792 
11793 		    if (globals->use_rel)
11794 		      bfd_put_32 (output_bfd, outrel.r_addend,
11795 				  sgot->contents + cur_off);
11796 
11797 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11798 
11799 		    if (indx == 0)
11800 		      bfd_put_32 (output_bfd, value - dtpoff_base (info),
11801 				  sgot->contents + cur_off + 4);
11802 		    else
11803 		      {
11804 			outrel.r_addend = 0;
11805 			outrel.r_info = ELF32_R_INFO (indx,
11806 						      R_ARM_TLS_DTPOFF32);
11807 			outrel.r_offset += 4;
11808 
11809 			if (globals->use_rel)
11810 			  bfd_put_32 (output_bfd, outrel.r_addend,
11811 				      sgot->contents + cur_off + 4);
11812 
11813 			elf32_arm_add_dynreloc (output_bfd, info,
11814 						srelgot, &outrel);
11815 		      }
11816 		  }
11817 		else
11818 		  {
11819 		    /* If we are not emitting relocations for a
11820 		       general dynamic reference, then we must be in a
11821 		       static link or an executable link with the
11822 		       symbol binding locally.  Mark it as belonging
11823 		       to module 1, the executable.  */
11824 		    bfd_put_32 (output_bfd, 1,
11825 				sgot->contents + cur_off);
11826 		    bfd_put_32 (output_bfd, value - dtpoff_base (info),
11827 				sgot->contents + cur_off + 4);
11828 		  }
11829 
11830 		cur_off += 8;
11831 	      }
11832 
11833 	    if (tls_type & GOT_TLS_IE)
11834 	      {
11835 		if (need_relocs)
11836 		  {
11837 		    if (indx == 0)
11838 		      outrel.r_addend = value - dtpoff_base (info);
11839 		    else
11840 		      outrel.r_addend = 0;
11841 		    outrel.r_offset = (sgot->output_section->vma
11842 				       + sgot->output_offset
11843 				       + cur_off);
11844 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11845 
11846 		    if (globals->use_rel)
11847 		      bfd_put_32 (output_bfd, outrel.r_addend,
11848 				  sgot->contents + cur_off);
11849 
11850 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11851 		  }
11852 		else
11853 		  bfd_put_32 (output_bfd, tpoff (info, value),
11854 			      sgot->contents + cur_off);
11855 		cur_off += 4;
11856 	      }
11857 
11858 	    if (h != NULL)
11859 	      h->got.offset |= 1;
11860 	    else
11861 	      local_got_offsets[r_symndx] |= 1;
11862 	  }
11863 
11864 	if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11865 	  off += 8;
11866 	else if (tls_type & GOT_TLS_GDESC)
11867 	  off = offplt;
11868 
11869 	if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11870 	    || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11871 	  {
11872 	    bfd_signed_vma offset;
11873 	    /* TLS stubs are arm mode.  The original symbol is a
11874 	       data object, so branch_type is bogus.  */
11875 	    branch_type = ST_BRANCH_TO_ARM;
11876 	    enum elf32_arm_stub_type stub_type
11877 	      = arm_type_of_stub (info, input_section, rel,
11878 				  st_type, &branch_type,
11879 				  (struct elf32_arm_link_hash_entry *)h,
11880 				  globals->tls_trampoline, globals->root.splt,
11881 				  input_bfd, sym_name);
11882 
11883 	    if (stub_type != arm_stub_none)
11884 	      {
11885 		struct elf32_arm_stub_hash_entry *stub_entry
11886 		  = elf32_arm_get_stub_entry
11887 		  (input_section, globals->root.splt, 0, rel,
11888 		   globals, stub_type);
11889 		offset = (stub_entry->stub_offset
11890 			  + stub_entry->stub_sec->output_offset
11891 			  + stub_entry->stub_sec->output_section->vma);
11892 	      }
11893 	    else
11894 	      offset = (globals->root.splt->output_section->vma
11895 			+ globals->root.splt->output_offset
11896 			+ globals->tls_trampoline);
11897 
11898 	    if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11899 	      {
11900 		unsigned long inst;
11901 
11902 		offset -= (input_section->output_section->vma
11903 			   + input_section->output_offset
11904 			   + rel->r_offset + 8);
11905 
11906 		inst = offset >> 2;
11907 		inst &= 0x00ffffff;
11908 		value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11909 	      }
11910 	    else
11911 	      {
11912 		/* Thumb blx encodes the offset in a complicated
11913 		   fashion.  */
11914 		unsigned upper_insn, lower_insn;
11915 		unsigned neg;
11916 
11917 		offset -= (input_section->output_section->vma
11918 			   + input_section->output_offset
11919 			   + rel->r_offset + 4);
11920 
11921 		if (stub_type != arm_stub_none
11922 		    && arm_stub_is_thumb (stub_type))
11923 		  {
11924 		    lower_insn = 0xd000;
11925 		  }
11926 		else
11927 		  {
11928 		    lower_insn = 0xc000;
11929 		    /* Round up the offset to a word boundary.  */
11930 		    offset = (offset + 2) & ~2;
11931 		  }
11932 
11933 		neg = offset < 0;
11934 		upper_insn = (0xf000
11935 			      | ((offset >> 12) & 0x3ff)
11936 			      | (neg << 10));
11937 		lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
11938 			      | (((!((offset >> 22) & 1)) ^ neg) << 11)
11939 			      | ((offset >> 1) & 0x7ff);
11940 		bfd_put_16 (input_bfd, upper_insn, hit_data);
11941 		bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11942 		return bfd_reloc_ok;
11943 	      }
11944 	  }
11945 	/* These relocations needs special care, as besides the fact
11946 	   they point somewhere in .gotplt, the addend must be
11947 	   adjusted accordingly depending on the type of instruction
11948 	   we refer to.  */
11949 	else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
11950 	  {
11951 	    unsigned long data, insn;
11952 	    unsigned thumb;
11953 
11954 	    data = bfd_get_32 (input_bfd, hit_data);
11955 	    thumb = data & 1;
11956 	    data &= ~1u;
11957 
11958 	    if (thumb)
11959 	      {
11960 		insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
11961 		if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
11962 		  insn = (insn << 16)
11963 		    | bfd_get_16 (input_bfd,
11964 				  contents + rel->r_offset - data + 2);
11965 		if ((insn & 0xf800c000) == 0xf000c000)
11966 		  /* bl/blx */
11967 		  value = -6;
11968 		else if ((insn & 0xffffff00) == 0x4400)
11969 		  /* add */
11970 		  value = -5;
11971 		else
11972 		  {
11973 		    _bfd_error_handler
11974 		      /* xgettext:c-format */
11975 		      (_("%pB(%pA+%#" PRIx64 "): "
11976 			 "unexpected %s instruction '%#lx' "
11977 			 "referenced by TLS_GOTDESC"),
11978 		       input_bfd, input_section, (uint64_t) rel->r_offset,
11979 		       "Thumb", insn);
11980 		    return bfd_reloc_notsupported;
11981 		  }
11982 	      }
11983 	    else
11984 	      {
11985 		insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
11986 
11987 		switch (insn >> 24)
11988 		  {
11989 		  case 0xeb:  /* bl */
11990 		  case 0xfa:  /* blx */
11991 		    value = -4;
11992 		    break;
11993 
11994 		  case 0xe0:	/* add */
11995 		    value = -8;
11996 		    break;
11997 
11998 		  default:
11999 		    _bfd_error_handler
12000 		      /* xgettext:c-format */
12001 		      (_("%pB(%pA+%#" PRIx64 "): "
12002 			 "unexpected %s instruction '%#lx' "
12003 			 "referenced by TLS_GOTDESC"),
12004 		       input_bfd, input_section, (uint64_t) rel->r_offset,
12005 		       "ARM", insn);
12006 		    return bfd_reloc_notsupported;
12007 		  }
12008 	      }
12009 
12010 	    value += ((globals->root.sgotplt->output_section->vma
12011 		       + globals->root.sgotplt->output_offset + off)
12012 		      - (input_section->output_section->vma
12013 			 + input_section->output_offset
12014 			 + rel->r_offset)
12015 		      + globals->sgotplt_jump_table_size);
12016 	  }
12017 	else
12018 	  value = ((globals->root.sgot->output_section->vma
12019 		    + globals->root.sgot->output_offset + off)
12020 		   - (input_section->output_section->vma
12021 		      + input_section->output_offset + rel->r_offset));
12022 
12023 	if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12024 				 r_type == R_ARM_TLS_IE32_FDPIC))
12025 	  {
12026 	    /* For FDPIC relocations, resolve to the offset of the GOT
12027 	       entry from the start of GOT.  */
12028 	    bfd_put_32(output_bfd,
12029 		       globals->root.sgot->output_offset + off,
12030 		       contents + rel->r_offset);
12031 
12032 	    return bfd_reloc_ok;
12033 	  }
12034 	else
12035 	  {
12036 	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
12037 					     contents, rel->r_offset, value,
12038 					     rel->r_addend);
12039 	  }
12040       }
12041 
12042     case R_ARM_TLS_LE32:
12043       if (bfd_link_dll (info))
12044 	{
12045 	  _bfd_error_handler
12046 	    /* xgettext:c-format */
12047 	    (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12048 	       "in shared object"),
12049 	     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12050 	  return bfd_reloc_notsupported;
12051 	}
12052       else
12053 	value = tpoff (info, value);
12054 
12055       return _bfd_final_link_relocate (howto, input_bfd, input_section,
12056 				       contents, rel->r_offset, value,
12057 				       rel->r_addend);
12058 
12059     case R_ARM_V4BX:
12060       if (globals->fix_v4bx)
12061 	{
12062 	  bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12063 
12064 	  /* Ensure that we have a BX instruction.  */
12065 	  BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12066 
12067 	  if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12068 	    {
12069 	      /* Branch to veneer.  */
12070 	      bfd_vma glue_addr;
12071 	      glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12072 	      glue_addr -= input_section->output_section->vma
12073 			   + input_section->output_offset
12074 			   + rel->r_offset + 8;
12075 	      insn = (insn & 0xf0000000) | 0x0a000000
12076 		     | ((glue_addr >> 2) & 0x00ffffff);
12077 	    }
12078 	  else
12079 	    {
12080 	      /* Preserve Rm (lowest four bits) and the condition code
12081 		 (highest four bits). Other bits encode MOV PC,Rm.  */
12082 	      insn = (insn & 0xf000000f) | 0x01a0f000;
12083 	    }
12084 
12085 	  bfd_put_32 (input_bfd, insn, hit_data);
12086 	}
12087       return bfd_reloc_ok;
12088 
12089     case R_ARM_MOVW_ABS_NC:
12090     case R_ARM_MOVT_ABS:
12091     case R_ARM_MOVW_PREL_NC:
12092     case R_ARM_MOVT_PREL:
12093     /* Until we properly support segment-base-relative addressing then
12094        we assume the segment base to be zero, as for the group relocations.
12095        Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12096        and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS.  */
12097     case R_ARM_MOVW_BREL_NC:
12098     case R_ARM_MOVW_BREL:
12099     case R_ARM_MOVT_BREL:
12100       {
12101 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12102 
12103 	if (globals->use_rel)
12104 	  {
12105 	    addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12106 	    signed_addend = (addend ^ 0x8000) - 0x8000;
12107 	  }
12108 
12109 	value += signed_addend;
12110 
12111 	if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12112 	  value -= (input_section->output_section->vma
12113 		    + input_section->output_offset + rel->r_offset);
12114 
12115 	if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12116 	  return bfd_reloc_overflow;
12117 
12118 	if (branch_type == ST_BRANCH_TO_THUMB)
12119 	  value |= 1;
12120 
12121 	if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12122 	    || r_type == R_ARM_MOVT_BREL)
12123 	  value >>= 16;
12124 
12125 	insn &= 0xfff0f000;
12126 	insn |= value & 0xfff;
12127 	insn |= (value & 0xf000) << 4;
12128 	bfd_put_32 (input_bfd, insn, hit_data);
12129       }
12130       return bfd_reloc_ok;
12131 
12132     case R_ARM_THM_MOVW_ABS_NC:
12133     case R_ARM_THM_MOVT_ABS:
12134     case R_ARM_THM_MOVW_PREL_NC:
12135     case R_ARM_THM_MOVT_PREL:
12136     /* Until we properly support segment-base-relative addressing then
12137        we assume the segment base to be zero, as for the above relocations.
12138        Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12139        R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12140        as R_ARM_THM_MOVT_ABS.  */
12141     case R_ARM_THM_MOVW_BREL_NC:
12142     case R_ARM_THM_MOVW_BREL:
12143     case R_ARM_THM_MOVT_BREL:
12144       {
12145 	bfd_vma insn;
12146 
12147 	insn = bfd_get_16 (input_bfd, hit_data) << 16;
12148 	insn |= bfd_get_16 (input_bfd, hit_data + 2);
12149 
12150 	if (globals->use_rel)
12151 	  {
12152 	    addend = ((insn >> 4)  & 0xf000)
12153 		   | ((insn >> 15) & 0x0800)
12154 		   | ((insn >> 4)  & 0x0700)
12155 		   | (insn	   & 0x00ff);
12156 	    signed_addend = (addend ^ 0x8000) - 0x8000;
12157 	  }
12158 
12159 	value += signed_addend;
12160 
12161 	if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12162 	  value -= (input_section->output_section->vma
12163 		    + input_section->output_offset + rel->r_offset);
12164 
12165 	if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12166 	  return bfd_reloc_overflow;
12167 
12168 	if (branch_type == ST_BRANCH_TO_THUMB)
12169 	  value |= 1;
12170 
12171 	if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12172 	    || r_type == R_ARM_THM_MOVT_BREL)
12173 	  value >>= 16;
12174 
12175 	insn &= 0xfbf08f00;
12176 	insn |= (value & 0xf000) << 4;
12177 	insn |= (value & 0x0800) << 15;
12178 	insn |= (value & 0x0700) << 4;
12179 	insn |= (value & 0x00ff);
12180 
12181 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
12182 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12183       }
12184       return bfd_reloc_ok;
12185 
12186     case R_ARM_ALU_PC_G0_NC:
12187     case R_ARM_ALU_PC_G1_NC:
12188     case R_ARM_ALU_PC_G0:
12189     case R_ARM_ALU_PC_G1:
12190     case R_ARM_ALU_PC_G2:
12191     case R_ARM_ALU_SB_G0_NC:
12192     case R_ARM_ALU_SB_G1_NC:
12193     case R_ARM_ALU_SB_G0:
12194     case R_ARM_ALU_SB_G1:
12195     case R_ARM_ALU_SB_G2:
12196       {
12197 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12198 	bfd_vma pc = input_section->output_section->vma
12199 		     + input_section->output_offset + rel->r_offset;
12200 	/* sb is the origin of the *segment* containing the symbol.  */
12201 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12202 	bfd_vma residual;
12203 	bfd_vma g_n;
12204 	bfd_signed_vma signed_value;
12205 	int group = 0;
12206 
12207 	/* Determine which group of bits to select.  */
12208 	switch (r_type)
12209 	  {
12210 	  case R_ARM_ALU_PC_G0_NC:
12211 	  case R_ARM_ALU_PC_G0:
12212 	  case R_ARM_ALU_SB_G0_NC:
12213 	  case R_ARM_ALU_SB_G0:
12214 	    group = 0;
12215 	    break;
12216 
12217 	  case R_ARM_ALU_PC_G1_NC:
12218 	  case R_ARM_ALU_PC_G1:
12219 	  case R_ARM_ALU_SB_G1_NC:
12220 	  case R_ARM_ALU_SB_G1:
12221 	    group = 1;
12222 	    break;
12223 
12224 	  case R_ARM_ALU_PC_G2:
12225 	  case R_ARM_ALU_SB_G2:
12226 	    group = 2;
12227 	    break;
12228 
12229 	  default:
12230 	    abort ();
12231 	  }
12232 
12233 	/* If REL, extract the addend from the insn.  If RELA, it will
12234 	   have already been fetched for us.  */
12235 	if (globals->use_rel)
12236 	  {
12237 	    int negative;
12238 	    bfd_vma constant = insn & 0xff;
12239 	    bfd_vma rotation = (insn & 0xf00) >> 8;
12240 
12241 	    if (rotation == 0)
12242 	      signed_addend = constant;
12243 	    else
12244 	      {
12245 		/* Compensate for the fact that in the instruction, the
12246 		   rotation is stored in multiples of 2 bits.  */
12247 		rotation *= 2;
12248 
12249 		/* Rotate "constant" right by "rotation" bits.  */
12250 		signed_addend = (constant >> rotation) |
12251 				(constant << (8 * sizeof (bfd_vma) - rotation));
12252 	      }
12253 
12254 	    /* Determine if the instruction is an ADD or a SUB.
12255 	       (For REL, this determines the sign of the addend.)  */
12256 	    negative = identify_add_or_sub (insn);
12257 	    if (negative == 0)
12258 	      {
12259 		_bfd_error_handler
12260 		  /* xgettext:c-format */
12261 		  (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12262 		     "are allowed for ALU group relocations"),
12263 		  input_bfd, input_section, (uint64_t) rel->r_offset);
12264 		return bfd_reloc_overflow;
12265 	      }
12266 
12267 	    signed_addend *= negative;
12268 	  }
12269 
12270 	/* Compute the value (X) to go in the place.  */
12271 	if (r_type == R_ARM_ALU_PC_G0_NC
12272 	    || r_type == R_ARM_ALU_PC_G1_NC
12273 	    || r_type == R_ARM_ALU_PC_G0
12274 	    || r_type == R_ARM_ALU_PC_G1
12275 	    || r_type == R_ARM_ALU_PC_G2)
12276 	  /* PC relative.  */
12277 	  signed_value = value - pc + signed_addend;
12278 	else
12279 	  /* Section base relative.  */
12280 	  signed_value = value - sb + signed_addend;
12281 
12282 	/* If the target symbol is a Thumb function, then set the
12283 	   Thumb bit in the address.  */
12284 	if (branch_type == ST_BRANCH_TO_THUMB)
12285 	  signed_value |= 1;
12286 
12287 	/* Calculate the value of the relevant G_n, in encoded
12288 	   constant-with-rotation format.  */
12289 	g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12290 					  group, &residual);
12291 
12292 	/* Check for overflow if required.  */
12293 	if ((r_type == R_ARM_ALU_PC_G0
12294 	     || r_type == R_ARM_ALU_PC_G1
12295 	     || r_type == R_ARM_ALU_PC_G2
12296 	     || r_type == R_ARM_ALU_SB_G0
12297 	     || r_type == R_ARM_ALU_SB_G1
12298 	     || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12299 	  {
12300 	    _bfd_error_handler
12301 	      /* xgettext:c-format */
12302 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12303 		 "splitting %#" PRIx64 " for group relocation %s"),
12304 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12305 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12306 	       howto->name);
12307 	    return bfd_reloc_overflow;
12308 	  }
12309 
12310 	/* Mask out the value and the ADD/SUB part of the opcode; take care
12311 	   not to destroy the S bit.  */
12312 	insn &= 0xff1ff000;
12313 
12314 	/* Set the opcode according to whether the value to go in the
12315 	   place is negative.  */
12316 	if (signed_value < 0)
12317 	  insn |= 1 << 22;
12318 	else
12319 	  insn |= 1 << 23;
12320 
12321 	/* Encode the offset.  */
12322 	insn |= g_n;
12323 
12324 	bfd_put_32 (input_bfd, insn, hit_data);
12325       }
12326       return bfd_reloc_ok;
12327 
12328     case R_ARM_LDR_PC_G0:
12329     case R_ARM_LDR_PC_G1:
12330     case R_ARM_LDR_PC_G2:
12331     case R_ARM_LDR_SB_G0:
12332     case R_ARM_LDR_SB_G1:
12333     case R_ARM_LDR_SB_G2:
12334       {
12335 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12336 	bfd_vma pc = input_section->output_section->vma
12337 		     + input_section->output_offset + rel->r_offset;
12338 	/* sb is the origin of the *segment* containing the symbol.  */
12339 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12340 	bfd_vma residual;
12341 	bfd_signed_vma signed_value;
12342 	int group = 0;
12343 
12344 	/* Determine which groups of bits to calculate.  */
12345 	switch (r_type)
12346 	  {
12347 	  case R_ARM_LDR_PC_G0:
12348 	  case R_ARM_LDR_SB_G0:
12349 	    group = 0;
12350 	    break;
12351 
12352 	  case R_ARM_LDR_PC_G1:
12353 	  case R_ARM_LDR_SB_G1:
12354 	    group = 1;
12355 	    break;
12356 
12357 	  case R_ARM_LDR_PC_G2:
12358 	  case R_ARM_LDR_SB_G2:
12359 	    group = 2;
12360 	    break;
12361 
12362 	  default:
12363 	    abort ();
12364 	  }
12365 
12366 	/* If REL, extract the addend from the insn.  If RELA, it will
12367 	   have already been fetched for us.  */
12368 	if (globals->use_rel)
12369 	  {
12370 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12371 	    signed_addend = negative * (insn & 0xfff);
12372 	  }
12373 
12374 	/* Compute the value (X) to go in the place.  */
12375 	if (r_type == R_ARM_LDR_PC_G0
12376 	    || r_type == R_ARM_LDR_PC_G1
12377 	    || r_type == R_ARM_LDR_PC_G2)
12378 	  /* PC relative.  */
12379 	  signed_value = value - pc + signed_addend;
12380 	else
12381 	  /* Section base relative.  */
12382 	  signed_value = value - sb + signed_addend;
12383 
12384 	/* Calculate the value of the relevant G_{n-1} to obtain
12385 	   the residual at that stage.  */
12386 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12387 				    group - 1, &residual);
12388 
12389 	/* Check for overflow.  */
12390 	if (residual >= 0x1000)
12391 	  {
12392 	    _bfd_error_handler
12393 	      /* xgettext:c-format */
12394 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12395 		 "splitting %#" PRIx64 " for group relocation %s"),
12396 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12397 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12398 	       howto->name);
12399 	    return bfd_reloc_overflow;
12400 	  }
12401 
12402 	/* Mask out the value and U bit.  */
12403 	insn &= 0xff7ff000;
12404 
12405 	/* Set the U bit if the value to go in the place is non-negative.  */
12406 	if (signed_value >= 0)
12407 	  insn |= 1 << 23;
12408 
12409 	/* Encode the offset.  */
12410 	insn |= residual;
12411 
12412 	bfd_put_32 (input_bfd, insn, hit_data);
12413       }
12414       return bfd_reloc_ok;
12415 
12416     case R_ARM_LDRS_PC_G0:
12417     case R_ARM_LDRS_PC_G1:
12418     case R_ARM_LDRS_PC_G2:
12419     case R_ARM_LDRS_SB_G0:
12420     case R_ARM_LDRS_SB_G1:
12421     case R_ARM_LDRS_SB_G2:
12422       {
12423 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12424 	bfd_vma pc = input_section->output_section->vma
12425 		     + input_section->output_offset + rel->r_offset;
12426 	/* sb is the origin of the *segment* containing the symbol.  */
12427 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12428 	bfd_vma residual;
12429 	bfd_signed_vma signed_value;
12430 	int group = 0;
12431 
12432 	/* Determine which groups of bits to calculate.  */
12433 	switch (r_type)
12434 	  {
12435 	  case R_ARM_LDRS_PC_G0:
12436 	  case R_ARM_LDRS_SB_G0:
12437 	    group = 0;
12438 	    break;
12439 
12440 	  case R_ARM_LDRS_PC_G1:
12441 	  case R_ARM_LDRS_SB_G1:
12442 	    group = 1;
12443 	    break;
12444 
12445 	  case R_ARM_LDRS_PC_G2:
12446 	  case R_ARM_LDRS_SB_G2:
12447 	    group = 2;
12448 	    break;
12449 
12450 	  default:
12451 	    abort ();
12452 	  }
12453 
12454 	/* If REL, extract the addend from the insn.  If RELA, it will
12455 	   have already been fetched for us.  */
12456 	if (globals->use_rel)
12457 	  {
12458 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12459 	    signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12460 	  }
12461 
12462 	/* Compute the value (X) to go in the place.  */
12463 	if (r_type == R_ARM_LDRS_PC_G0
12464 	    || r_type == R_ARM_LDRS_PC_G1
12465 	    || r_type == R_ARM_LDRS_PC_G2)
12466 	  /* PC relative.  */
12467 	  signed_value = value - pc + signed_addend;
12468 	else
12469 	  /* Section base relative.  */
12470 	  signed_value = value - sb + signed_addend;
12471 
12472 	/* Calculate the value of the relevant G_{n-1} to obtain
12473 	   the residual at that stage.  */
12474 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12475 				    group - 1, &residual);
12476 
12477 	/* Check for overflow.  */
12478 	if (residual >= 0x100)
12479 	  {
12480 	    _bfd_error_handler
12481 	      /* xgettext:c-format */
12482 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12483 		 "splitting %#" PRIx64 " for group relocation %s"),
12484 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12485 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12486 	       howto->name);
12487 	    return bfd_reloc_overflow;
12488 	  }
12489 
12490 	/* Mask out the value and U bit.  */
12491 	insn &= 0xff7ff0f0;
12492 
12493 	/* Set the U bit if the value to go in the place is non-negative.  */
12494 	if (signed_value >= 0)
12495 	  insn |= 1 << 23;
12496 
12497 	/* Encode the offset.  */
12498 	insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12499 
12500 	bfd_put_32 (input_bfd, insn, hit_data);
12501       }
12502       return bfd_reloc_ok;
12503 
12504     case R_ARM_LDC_PC_G0:
12505     case R_ARM_LDC_PC_G1:
12506     case R_ARM_LDC_PC_G2:
12507     case R_ARM_LDC_SB_G0:
12508     case R_ARM_LDC_SB_G1:
12509     case R_ARM_LDC_SB_G2:
12510       {
12511 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12512 	bfd_vma pc = input_section->output_section->vma
12513 		     + input_section->output_offset + rel->r_offset;
12514 	/* sb is the origin of the *segment* containing the symbol.  */
12515 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12516 	bfd_vma residual;
12517 	bfd_signed_vma signed_value;
12518 	int group = 0;
12519 
12520 	/* Determine which groups of bits to calculate.  */
12521 	switch (r_type)
12522 	  {
12523 	  case R_ARM_LDC_PC_G0:
12524 	  case R_ARM_LDC_SB_G0:
12525 	    group = 0;
12526 	    break;
12527 
12528 	  case R_ARM_LDC_PC_G1:
12529 	  case R_ARM_LDC_SB_G1:
12530 	    group = 1;
12531 	    break;
12532 
12533 	  case R_ARM_LDC_PC_G2:
12534 	  case R_ARM_LDC_SB_G2:
12535 	    group = 2;
12536 	    break;
12537 
12538 	  default:
12539 	    abort ();
12540 	  }
12541 
12542 	/* If REL, extract the addend from the insn.  If RELA, it will
12543 	   have already been fetched for us.  */
12544 	if (globals->use_rel)
12545 	  {
12546 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12547 	    signed_addend = negative * ((insn & 0xff) << 2);
12548 	  }
12549 
12550 	/* Compute the value (X) to go in the place.  */
12551 	if (r_type == R_ARM_LDC_PC_G0
12552 	    || r_type == R_ARM_LDC_PC_G1
12553 	    || r_type == R_ARM_LDC_PC_G2)
12554 	  /* PC relative.  */
12555 	  signed_value = value - pc + signed_addend;
12556 	else
12557 	  /* Section base relative.  */
12558 	  signed_value = value - sb + signed_addend;
12559 
12560 	/* Calculate the value of the relevant G_{n-1} to obtain
12561 	   the residual at that stage.  */
12562 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12563 				    group - 1, &residual);
12564 
12565 	/* Check for overflow.  (The absolute value to go in the place must be
12566 	   divisible by four and, after having been divided by four, must
12567 	   fit in eight bits.)  */
12568 	if ((residual & 0x3) != 0 || residual >= 0x400)
12569 	  {
12570 	    _bfd_error_handler
12571 	      /* xgettext:c-format */
12572 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12573 		 "splitting %#" PRIx64 " for group relocation %s"),
12574 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12575 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12576 	       howto->name);
12577 	    return bfd_reloc_overflow;
12578 	  }
12579 
12580 	/* Mask out the value and U bit.  */
12581 	insn &= 0xff7fff00;
12582 
12583 	/* Set the U bit if the value to go in the place is non-negative.  */
12584 	if (signed_value >= 0)
12585 	  insn |= 1 << 23;
12586 
12587 	/* Encode the offset.  */
12588 	insn |= residual >> 2;
12589 
12590 	bfd_put_32 (input_bfd, insn, hit_data);
12591       }
12592       return bfd_reloc_ok;
12593 
12594     case R_ARM_THM_ALU_ABS_G0_NC:
12595     case R_ARM_THM_ALU_ABS_G1_NC:
12596     case R_ARM_THM_ALU_ABS_G2_NC:
12597     case R_ARM_THM_ALU_ABS_G3_NC:
12598 	{
12599 	    const int shift_array[4] = {0, 8, 16, 24};
12600 	    bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12601 	    bfd_vma addr = value;
12602 	    int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12603 
12604 	    /* Compute address.  */
12605 	    if (globals->use_rel)
12606 		signed_addend = insn & 0xff;
12607 	    addr += signed_addend;
12608 	    if (branch_type == ST_BRANCH_TO_THUMB)
12609 		addr |= 1;
12610 	    /* Clean imm8 insn.  */
12611 	    insn &= 0xff00;
12612 	    /* And update with correct part of address.  */
12613 	    insn |= (addr >> shift) & 0xff;
12614 	    /* Update insn.  */
12615 	    bfd_put_16 (input_bfd, insn, hit_data);
12616 	}
12617 
12618 	*unresolved_reloc_p = FALSE;
12619 	return bfd_reloc_ok;
12620 
12621     case R_ARM_GOTOFFFUNCDESC:
12622       {
12623         if (h == NULL)
12624 	  {
12625 	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12626 	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12627 	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12628 	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12629 	    bfd_vma seg = -1;
12630 
12631 	    if (bfd_link_pic(info) && dynindx == 0)
12632 	      abort();
12633 
12634 	    /* Resolve relocation.  */
12635 	    bfd_put_32(output_bfd, (offset + sgot->output_offset)
12636 		       , contents + rel->r_offset);
12637 	    /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12638 	       not done yet.  */
12639 	    arm_elf_fill_funcdesc(output_bfd, info,
12640 				  &local_fdpic_cnts[r_symndx].funcdesc_offset,
12641 				  dynindx, offset, addr, dynreloc_value, seg);
12642 	  }
12643 	else
12644 	  {
12645 	    int dynindx;
12646 	    int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12647 	    bfd_vma addr;
12648 	    bfd_vma seg = -1;
12649 
12650 	    /* For static binaries, sym_sec can be null.  */
12651 	    if (sym_sec)
12652 	      {
12653 		dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12654 		addr = dynreloc_value - sym_sec->output_section->vma;
12655 	      }
12656 	    else
12657 	      {
12658 		dynindx = 0;
12659 		addr = 0;
12660 	      }
12661 
12662 	    if (bfd_link_pic(info) && dynindx == 0)
12663 	      abort();
12664 
12665 	    /* This case cannot occur since funcdesc is allocated by
12666 	       the dynamic loader so we cannot resolve the relocation.  */
12667 	    if (h->dynindx != -1)
12668 	      abort();
12669 
12670 	    /* Resolve relocation.  */
12671 	    bfd_put_32(output_bfd, (offset + sgot->output_offset),
12672 		       contents + rel->r_offset);
12673 	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12674 	    arm_elf_fill_funcdesc(output_bfd, info,
12675 				  &eh->fdpic_cnts.funcdesc_offset,
12676 				  dynindx, offset, addr, dynreloc_value, seg);
12677 	  }
12678       }
12679       *unresolved_reloc_p = FALSE;
12680       return bfd_reloc_ok;
12681 
12682     case R_ARM_GOTFUNCDESC:
12683       {
12684         if (h != NULL)
12685 	  {
12686 	    Elf_Internal_Rela outrel;
12687 
12688 	    /* Resolve relocation.  */
12689 	    bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12690 				    + sgot->output_offset),
12691 		       contents + rel->r_offset);
12692 	    /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE.  */
12693 	    if(h->dynindx == -1)
12694 	      {
12695 		int dynindx;
12696 		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12697 		bfd_vma addr;
12698 		bfd_vma seg = -1;
12699 
12700 		/* For static binaries sym_sec can be null.  */
12701 		if (sym_sec)
12702 		  {
12703 		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12704 		    addr = dynreloc_value - sym_sec->output_section->vma;
12705 		  }
12706 		else
12707 		  {
12708 		    dynindx = 0;
12709 		    addr = 0;
12710 		  }
12711 
12712 		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12713 		arm_elf_fill_funcdesc(output_bfd, info,
12714 				      &eh->fdpic_cnts.funcdesc_offset,
12715 				      dynindx, offset, addr, dynreloc_value, seg);
12716 	      }
12717 
12718 	    /* Add a dynamic relocation on GOT entry if not already done.  */
12719 	    if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12720 	      {
12721 		if (h->dynindx == -1)
12722 		  {
12723 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12724 		    if (h->root.type == bfd_link_hash_undefweak)
12725 		      bfd_put_32(output_bfd, 0, sgot->contents
12726 				 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12727 		    else
12728 		      bfd_put_32(output_bfd, sgot->output_section->vma
12729 				 + sgot->output_offset
12730 				 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12731 				 sgot->contents
12732 				 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12733 		  }
12734 		else
12735 		  {
12736 		    outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12737 		  }
12738 		outrel.r_offset = sgot->output_section->vma
12739 		  + sgot->output_offset
12740 		  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12741 		outrel.r_addend = 0;
12742 		if (h->dynindx == -1 && !bfd_link_pic(info))
12743 		  if (h->root.type == bfd_link_hash_undefweak)
12744                     arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12745 		  else
12746                     arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12747 		else
12748 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12749 		eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12750 	      }
12751 	  }
12752 	else
12753 	  {
12754 	    /* Such relocation on static function should not have been
12755 	       emitted by the compiler.  */
12756 	    abort();
12757 	  }
12758       }
12759       *unresolved_reloc_p = FALSE;
12760       return bfd_reloc_ok;
12761 
12762     case R_ARM_FUNCDESC:
12763       {
12764         if (h == NULL)
12765 	  {
12766 	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12767 	    Elf_Internal_Rela outrel;
12768 	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12769 	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12770 	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12771 	    bfd_vma seg = -1;
12772 
12773 	    if (bfd_link_pic(info) && dynindx == 0)
12774 	      abort();
12775 
12776 	    /* Replace static FUNCDESC relocation with a
12777 	       R_ARM_RELATIVE dynamic relocation or with a rofixup for
12778 	       executable.  */
12779 	    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12780 	    outrel.r_offset = input_section->output_section->vma
12781 	      + input_section->output_offset + rel->r_offset;
12782 	    outrel.r_addend = 0;
12783 	    if (bfd_link_pic(info))
12784 	      elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12785 	    else
12786 	      arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12787 
12788 	    bfd_put_32 (input_bfd, sgot->output_section->vma
12789 			+ sgot->output_offset + offset, hit_data);
12790 
12791 	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12792 	    arm_elf_fill_funcdesc(output_bfd, info,
12793 				  &local_fdpic_cnts[r_symndx].funcdesc_offset,
12794 				  dynindx, offset, addr, dynreloc_value, seg);
12795 	  }
12796 	else
12797 	  {
12798 	    if (h->dynindx == -1)
12799 	      {
12800 		int dynindx;
12801 		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12802 		bfd_vma addr;
12803 		bfd_vma seg = -1;
12804 		Elf_Internal_Rela outrel;
12805 
12806 		/* For static binaries sym_sec can be null.  */
12807 		if (sym_sec)
12808 		  {
12809 		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12810 		    addr = dynreloc_value - sym_sec->output_section->vma;
12811 		  }
12812 		else
12813 		  {
12814 		    dynindx = 0;
12815 		    addr = 0;
12816 		  }
12817 
12818 		if (bfd_link_pic(info) && dynindx == 0)
12819 		  abort();
12820 
12821 		/* Replace static FUNCDESC relocation with a
12822 		   R_ARM_RELATIVE dynamic relocation.  */
12823 		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12824 		outrel.r_offset = input_section->output_section->vma
12825 		  + input_section->output_offset + rel->r_offset;
12826 		outrel.r_addend = 0;
12827 		if (bfd_link_pic(info))
12828 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12829 		else
12830 		  arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12831 
12832 		bfd_put_32 (input_bfd, sgot->output_section->vma
12833 			    + sgot->output_offset + offset, hit_data);
12834 
12835 		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12836 		arm_elf_fill_funcdesc(output_bfd, info,
12837 				      &eh->fdpic_cnts.funcdesc_offset,
12838 				      dynindx, offset, addr, dynreloc_value, seg);
12839 	      }
12840 	    else
12841 	      {
12842 		Elf_Internal_Rela outrel;
12843 
12844 		/* Add a dynamic relocation.  */
12845 		outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12846 		outrel.r_offset = input_section->output_section->vma
12847 		  + input_section->output_offset + rel->r_offset;
12848 		outrel.r_addend = 0;
12849 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12850 	      }
12851 	  }
12852       }
12853       *unresolved_reloc_p = FALSE;
12854       return bfd_reloc_ok;
12855 
12856     default:
12857       return bfd_reloc_notsupported;
12858     }
12859 }
12860 
12861 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS.  */
12862 static void
12863 arm_add_to_rel (bfd *		   abfd,
12864 		bfd_byte *	   address,
12865 		reloc_howto_type * howto,
12866 		bfd_signed_vma	   increment)
12867 {
12868   bfd_signed_vma addend;
12869 
12870   if (howto->type == R_ARM_THM_CALL
12871       || howto->type == R_ARM_THM_JUMP24)
12872     {
12873       int upper_insn, lower_insn;
12874       int upper, lower;
12875 
12876       upper_insn = bfd_get_16 (abfd, address);
12877       lower_insn = bfd_get_16 (abfd, address + 2);
12878       upper = upper_insn & 0x7ff;
12879       lower = lower_insn & 0x7ff;
12880 
12881       addend = (upper << 12) | (lower << 1);
12882       addend += increment;
12883       addend >>= 1;
12884 
12885       upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
12886       lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
12887 
12888       bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
12889       bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
12890     }
12891   else
12892     {
12893       bfd_vma	     contents;
12894 
12895       contents = bfd_get_32 (abfd, address);
12896 
12897       /* Get the (signed) value from the instruction.  */
12898       addend = contents & howto->src_mask;
12899       if (addend & ((howto->src_mask + 1) >> 1))
12900 	{
12901 	  bfd_signed_vma mask;
12902 
12903 	  mask = -1;
12904 	  mask &= ~ howto->src_mask;
12905 	  addend |= mask;
12906 	}
12907 
12908       /* Add in the increment, (which is a byte value).  */
12909       switch (howto->type)
12910 	{
12911 	default:
12912 	  addend += increment;
12913 	  break;
12914 
12915 	case R_ARM_PC24:
12916 	case R_ARM_PLT32:
12917 	case R_ARM_CALL:
12918 	case R_ARM_JUMP24:
12919 	  addend <<= howto->size;
12920 	  addend += increment;
12921 
12922 	  /* Should we check for overflow here ?  */
12923 
12924 	  /* Drop any undesired bits.  */
12925 	  addend >>= howto->rightshift;
12926 	  break;
12927 	}
12928 
12929       contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
12930 
12931       bfd_put_32 (abfd, contents, address);
12932     }
12933 }
12934 
12935 #define IS_ARM_TLS_RELOC(R_TYPE)	\
12936   ((R_TYPE) == R_ARM_TLS_GD32		\
12937    || (R_TYPE) == R_ARM_TLS_GD32_FDPIC  \
12938    || (R_TYPE) == R_ARM_TLS_LDO32	\
12939    || (R_TYPE) == R_ARM_TLS_LDM32	\
12940    || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC	\
12941    || (R_TYPE) == R_ARM_TLS_DTPOFF32	\
12942    || (R_TYPE) == R_ARM_TLS_DTPMOD32	\
12943    || (R_TYPE) == R_ARM_TLS_TPOFF32	\
12944    || (R_TYPE) == R_ARM_TLS_LE32	\
12945    || (R_TYPE) == R_ARM_TLS_IE32	\
12946    || (R_TYPE) == R_ARM_TLS_IE32_FDPIC	\
12947    || IS_ARM_TLS_GNU_RELOC (R_TYPE))
12948 
12949 /* Specific set of relocations for the gnu tls dialect.  */
12950 #define IS_ARM_TLS_GNU_RELOC(R_TYPE)	\
12951   ((R_TYPE) == R_ARM_TLS_GOTDESC	\
12952    || (R_TYPE) == R_ARM_TLS_CALL	\
12953    || (R_TYPE) == R_ARM_THM_TLS_CALL	\
12954    || (R_TYPE) == R_ARM_TLS_DESCSEQ	\
12955    || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
12956 
12957 /* Relocate an ARM ELF section.  */
12958 
12959 static bfd_boolean
12960 elf32_arm_relocate_section (bfd *		   output_bfd,
12961 			    struct bfd_link_info * info,
12962 			    bfd *		   input_bfd,
12963 			    asection *		   input_section,
12964 			    bfd_byte *		   contents,
12965 			    Elf_Internal_Rela *	   relocs,
12966 			    Elf_Internal_Sym *	   local_syms,
12967 			    asection **		   local_sections)
12968 {
12969   Elf_Internal_Shdr *symtab_hdr;
12970   struct elf_link_hash_entry **sym_hashes;
12971   Elf_Internal_Rela *rel;
12972   Elf_Internal_Rela *relend;
12973   const char *name;
12974   struct elf32_arm_link_hash_table * globals;
12975 
12976   globals = elf32_arm_hash_table (info);
12977   if (globals == NULL)
12978     return FALSE;
12979 
12980   symtab_hdr = & elf_symtab_hdr (input_bfd);
12981   sym_hashes = elf_sym_hashes (input_bfd);
12982 
12983   rel = relocs;
12984   relend = relocs + input_section->reloc_count;
12985   for (; rel < relend; rel++)
12986     {
12987       int			   r_type;
12988       reloc_howto_type *	   howto;
12989       unsigned long		   r_symndx;
12990       Elf_Internal_Sym *	   sym;
12991       asection *		   sec;
12992       struct elf_link_hash_entry * h;
12993       bfd_vma			   relocation;
12994       bfd_reloc_status_type	   r;
12995       arelent			   bfd_reloc;
12996       char			   sym_type;
12997       bfd_boolean		   unresolved_reloc = FALSE;
12998       char *error_message = NULL;
12999 
13000       r_symndx = ELF32_R_SYM (rel->r_info);
13001       r_type   = ELF32_R_TYPE (rel->r_info);
13002       r_type   = arm_real_reloc_type (globals, r_type);
13003 
13004       if (   r_type == R_ARM_GNU_VTENTRY
13005 	  || r_type == R_ARM_GNU_VTINHERIT)
13006 	continue;
13007 
13008       howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13009 
13010       if (howto == NULL)
13011 	return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13012 
13013       h = NULL;
13014       sym = NULL;
13015       sec = NULL;
13016 
13017       if (r_symndx < symtab_hdr->sh_info)
13018 	{
13019 	  sym = local_syms + r_symndx;
13020 	  sym_type = ELF32_ST_TYPE (sym->st_info);
13021 	  sec = local_sections[r_symndx];
13022 
13023 	  /* An object file might have a reference to a local
13024 	     undefined symbol.  This is a daft object file, but we
13025 	     should at least do something about it.  V4BX & NONE
13026 	     relocations do not use the symbol and are explicitly
13027 	     allowed to use the undefined symbol, so allow those.
13028 	     Likewise for relocations against STN_UNDEF.  */
13029 	  if (r_type != R_ARM_V4BX
13030 	      && r_type != R_ARM_NONE
13031 	      && r_symndx != STN_UNDEF
13032 	      && bfd_is_und_section (sec)
13033 	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13034 	    (*info->callbacks->undefined_symbol)
13035 	      (info, bfd_elf_string_from_elf_section
13036 	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
13037 	       input_bfd, input_section,
13038 	       rel->r_offset, TRUE);
13039 
13040 	  if (globals->use_rel)
13041 	    {
13042 	      relocation = (sec->output_section->vma
13043 			    + sec->output_offset
13044 			    + sym->st_value);
13045 	      if (!bfd_link_relocatable (info)
13046 		  && (sec->flags & SEC_MERGE)
13047 		  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13048 		{
13049 		  asection *msec;
13050 		  bfd_vma addend, value;
13051 
13052 		  switch (r_type)
13053 		    {
13054 		    case R_ARM_MOVW_ABS_NC:
13055 		    case R_ARM_MOVT_ABS:
13056 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13057 		      addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13058 		      addend = (addend ^ 0x8000) - 0x8000;
13059 		      break;
13060 
13061 		    case R_ARM_THM_MOVW_ABS_NC:
13062 		    case R_ARM_THM_MOVT_ABS:
13063 		      value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13064 			      << 16;
13065 		      value |= bfd_get_16 (input_bfd,
13066 					   contents + rel->r_offset + 2);
13067 		      addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13068 			       | ((value & 0x04000000) >> 15);
13069 		      addend = (addend ^ 0x8000) - 0x8000;
13070 		      break;
13071 
13072 		    default:
13073 		      if (howto->rightshift
13074 			  || (howto->src_mask & (howto->src_mask + 1)))
13075 			{
13076 			  _bfd_error_handler
13077 			    /* xgettext:c-format */
13078 			    (_("%pB(%pA+%#" PRIx64 "): "
13079 			       "%s relocation against SEC_MERGE section"),
13080 			     input_bfd, input_section,
13081 			     (uint64_t) rel->r_offset, howto->name);
13082 			  return FALSE;
13083 			}
13084 
13085 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13086 
13087 		      /* Get the (signed) value from the instruction.  */
13088 		      addend = value & howto->src_mask;
13089 		      if (addend & ((howto->src_mask + 1) >> 1))
13090 			{
13091 			  bfd_signed_vma mask;
13092 
13093 			  mask = -1;
13094 			  mask &= ~ howto->src_mask;
13095 			  addend |= mask;
13096 			}
13097 		      break;
13098 		    }
13099 
13100 		  msec = sec;
13101 		  addend =
13102 		    _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13103 		    - relocation;
13104 		  addend += msec->output_section->vma + msec->output_offset;
13105 
13106 		  /* Cases here must match those in the preceding
13107 		     switch statement.  */
13108 		  switch (r_type)
13109 		    {
13110 		    case R_ARM_MOVW_ABS_NC:
13111 		    case R_ARM_MOVT_ABS:
13112 		      value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13113 			      | (addend & 0xfff);
13114 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13115 		      break;
13116 
13117 		    case R_ARM_THM_MOVW_ABS_NC:
13118 		    case R_ARM_THM_MOVT_ABS:
13119 		      value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13120 			      | (addend & 0xff) | ((addend & 0x0800) << 15);
13121 		      bfd_put_16 (input_bfd, value >> 16,
13122 				  contents + rel->r_offset);
13123 		      bfd_put_16 (input_bfd, value,
13124 				  contents + rel->r_offset + 2);
13125 		      break;
13126 
13127 		    default:
13128 		      value = (value & ~ howto->dst_mask)
13129 			      | (addend & howto->dst_mask);
13130 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13131 		      break;
13132 		    }
13133 		}
13134 	    }
13135 	  else
13136 	    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13137 	}
13138       else
13139 	{
13140 	  bfd_boolean warned, ignored;
13141 
13142 	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13143 				   r_symndx, symtab_hdr, sym_hashes,
13144 				   h, sec, relocation,
13145 				   unresolved_reloc, warned, ignored);
13146 
13147 	  sym_type = h->type;
13148 	}
13149 
13150       if (sec != NULL && discarded_section (sec))
13151 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13152 					 rel, 1, relend, howto, 0, contents);
13153 
13154       if (bfd_link_relocatable (info))
13155 	{
13156 	  /* This is a relocatable link.  We don't have to change
13157 	     anything, unless the reloc is against a section symbol,
13158 	     in which case we have to adjust according to where the
13159 	     section symbol winds up in the output section.  */
13160 	  if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13161 	    {
13162 	      if (globals->use_rel)
13163 		arm_add_to_rel (input_bfd, contents + rel->r_offset,
13164 				howto, (bfd_signed_vma) sec->output_offset);
13165 	      else
13166 		rel->r_addend += sec->output_offset;
13167 	    }
13168 	  continue;
13169 	}
13170 
13171       if (h != NULL)
13172 	name = h->root.root.string;
13173       else
13174 	{
13175 	  name = (bfd_elf_string_from_elf_section
13176 		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
13177 	  if (name == NULL || *name == '\0')
13178 	    name = bfd_section_name (input_bfd, sec);
13179 	}
13180 
13181       if (r_symndx != STN_UNDEF
13182 	  && r_type != R_ARM_NONE
13183 	  && (h == NULL
13184 	      || h->root.type == bfd_link_hash_defined
13185 	      || h->root.type == bfd_link_hash_defweak)
13186 	  && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13187 	{
13188 	  _bfd_error_handler
13189 	    ((sym_type == STT_TLS
13190 	      /* xgettext:c-format */
13191 	      ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13192 	      /* xgettext:c-format */
13193 	      : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13194 	     input_bfd,
13195 	     input_section,
13196 	     (uint64_t) rel->r_offset,
13197 	     howto->name,
13198 	     name);
13199 	}
13200 
13201       /* We call elf32_arm_final_link_relocate unless we're completely
13202 	 done, i.e., the relaxation produced the final output we want,
13203 	 and we won't let anybody mess with it. Also, we have to do
13204 	 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13205 	 both in relaxed and non-relaxed cases.  */
13206       if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13207 	  || (IS_ARM_TLS_GNU_RELOC (r_type)
13208 	      && !((h ? elf32_arm_hash_entry (h)->tls_type :
13209 		    elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13210 		   & GOT_TLS_GDESC)))
13211 	{
13212 	  r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13213 				   contents, rel, h == NULL);
13214 	  /* This may have been marked unresolved because it came from
13215 	     a shared library.  But we've just dealt with that.  */
13216 	  unresolved_reloc = 0;
13217 	}
13218       else
13219 	r = bfd_reloc_continue;
13220 
13221       if (r == bfd_reloc_continue)
13222 	{
13223 	  unsigned char branch_type =
13224 	    h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13225 	      : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13226 
13227 	  r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13228 					     input_section, contents, rel,
13229 					     relocation, info, sec, name,
13230 					     sym_type, branch_type, h,
13231 					     &unresolved_reloc,
13232 					     &error_message);
13233 	}
13234 
13235       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13236 	 because such sections are not SEC_ALLOC and thus ld.so will
13237 	 not process them.  */
13238       if (unresolved_reloc
13239 	  && !((input_section->flags & SEC_DEBUGGING) != 0
13240 	       && h->def_dynamic)
13241 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
13242 				      rel->r_offset) != (bfd_vma) -1)
13243 	{
13244 	  _bfd_error_handler
13245 	    /* xgettext:c-format */
13246 	    (_("%pB(%pA+%#" PRIx64 "): "
13247 	       "unresolvable %s relocation against symbol `%s'"),
13248 	     input_bfd,
13249 	     input_section,
13250 	     (uint64_t) rel->r_offset,
13251 	     howto->name,
13252 	     h->root.root.string);
13253 	  return FALSE;
13254 	}
13255 
13256       if (r != bfd_reloc_ok)
13257 	{
13258 	  switch (r)
13259 	    {
13260 	    case bfd_reloc_overflow:
13261 	      /* If the overflowing reloc was to an undefined symbol,
13262 		 we have already printed one error message and there
13263 		 is no point complaining again.  */
13264 	      if (!h || h->root.type != bfd_link_hash_undefined)
13265 		(*info->callbacks->reloc_overflow)
13266 		  (info, (h ? &h->root : NULL), name, howto->name,
13267 		   (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13268 	      break;
13269 
13270 	    case bfd_reloc_undefined:
13271 	      (*info->callbacks->undefined_symbol)
13272 		(info, name, input_bfd, input_section, rel->r_offset, TRUE);
13273 	      break;
13274 
13275 	    case bfd_reloc_outofrange:
13276 	      error_message = _("out of range");
13277 	      goto common_error;
13278 
13279 	    case bfd_reloc_notsupported:
13280 	      error_message = _("unsupported relocation");
13281 	      goto common_error;
13282 
13283 	    case bfd_reloc_dangerous:
13284 	      /* error_message should already be set.  */
13285 	      goto common_error;
13286 
13287 	    default:
13288 	      error_message = _("unknown error");
13289 	      /* Fall through.  */
13290 
13291 	    common_error:
13292 	      BFD_ASSERT (error_message != NULL);
13293 	      (*info->callbacks->reloc_dangerous)
13294 		(info, error_message, input_bfd, input_section, rel->r_offset);
13295 	      break;
13296 	    }
13297 	}
13298     }
13299 
13300   return TRUE;
13301 }
13302 
13303 /* Add a new unwind edit to the list described by HEAD, TAIL.  If TINDEX is zero,
13304    adds the edit to the start of the list.  (The list must be built in order of
13305    ascending TINDEX: the function's callers are primarily responsible for
13306    maintaining that condition).  */
13307 
13308 static void
13309 add_unwind_table_edit (arm_unwind_table_edit **head,
13310 		       arm_unwind_table_edit **tail,
13311 		       arm_unwind_edit_type type,
13312 		       asection *linked_section,
13313 		       unsigned int tindex)
13314 {
13315   arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13316       xmalloc (sizeof (arm_unwind_table_edit));
13317 
13318   new_edit->type = type;
13319   new_edit->linked_section = linked_section;
13320   new_edit->index = tindex;
13321 
13322   if (tindex > 0)
13323     {
13324       new_edit->next = NULL;
13325 
13326       if (*tail)
13327 	(*tail)->next = new_edit;
13328 
13329       (*tail) = new_edit;
13330 
13331       if (!*head)
13332 	(*head) = new_edit;
13333     }
13334   else
13335     {
13336       new_edit->next = *head;
13337 
13338       if (!*tail)
13339 	*tail = new_edit;
13340 
13341       *head = new_edit;
13342     }
13343 }
13344 
13345 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13346 
13347 /* Increase the size of EXIDX_SEC by ADJUST bytes.  ADJUST mau be negative.  */
13348 static void
13349 adjust_exidx_size(asection *exidx_sec, int adjust)
13350 {
13351   asection *out_sec;
13352 
13353   if (!exidx_sec->rawsize)
13354     exidx_sec->rawsize = exidx_sec->size;
13355 
13356   bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
13357   out_sec = exidx_sec->output_section;
13358   /* Adjust size of output section.  */
13359   bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
13360 }
13361 
13362 /* Insert an EXIDX_CANTUNWIND marker at the end of a section.  */
13363 static void
13364 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13365 {
13366   struct _arm_elf_section_data *exidx_arm_data;
13367 
13368   exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13369   add_unwind_table_edit (
13370     &exidx_arm_data->u.exidx.unwind_edit_list,
13371     &exidx_arm_data->u.exidx.unwind_edit_tail,
13372     INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13373 
13374   exidx_arm_data->additional_reloc_count++;
13375 
13376   adjust_exidx_size(exidx_sec, 8);
13377 }
13378 
13379 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13380    made to those tables, such that:
13381 
13382      1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13383      2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13384 	codes which have been inlined into the index).
13385 
13386    If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13387 
13388    The edits are applied when the tables are written
13389    (in elf32_arm_write_section).  */
13390 
13391 bfd_boolean
13392 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13393 			      unsigned int num_text_sections,
13394 			      struct bfd_link_info *info,
13395 			      bfd_boolean merge_exidx_entries)
13396 {
13397   bfd *inp;
13398   unsigned int last_second_word = 0, i;
13399   asection *last_exidx_sec = NULL;
13400   asection *last_text_sec = NULL;
13401   int last_unwind_type = -1;
13402 
13403   /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13404      text sections.  */
13405   for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13406     {
13407       asection *sec;
13408 
13409       for (sec = inp->sections; sec != NULL; sec = sec->next)
13410 	{
13411 	  struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13412 	  Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13413 
13414 	  if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13415 	    continue;
13416 
13417 	  if (elf_sec->linked_to)
13418 	    {
13419 	      Elf_Internal_Shdr *linked_hdr
13420 		= &elf_section_data (elf_sec->linked_to)->this_hdr;
13421 	      struct _arm_elf_section_data *linked_sec_arm_data
13422 		= get_arm_elf_section_data (linked_hdr->bfd_section);
13423 
13424 	      if (linked_sec_arm_data == NULL)
13425 		continue;
13426 
13427 	      /* Link this .ARM.exidx section back from the text section it
13428 		 describes.  */
13429 	      linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13430 	    }
13431 	}
13432     }
13433 
13434   /* Walk all text sections in order of increasing VMA.  Eilminate duplicate
13435      index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13436      and add EXIDX_CANTUNWIND entries for sections with no unwind table data.  */
13437 
13438   for (i = 0; i < num_text_sections; i++)
13439     {
13440       asection *sec = text_section_order[i];
13441       asection *exidx_sec;
13442       struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13443       struct _arm_elf_section_data *exidx_arm_data;
13444       bfd_byte *contents = NULL;
13445       int deleted_exidx_bytes = 0;
13446       bfd_vma j;
13447       arm_unwind_table_edit *unwind_edit_head = NULL;
13448       arm_unwind_table_edit *unwind_edit_tail = NULL;
13449       Elf_Internal_Shdr *hdr;
13450       bfd *ibfd;
13451 
13452       if (arm_data == NULL)
13453 	continue;
13454 
13455       exidx_sec = arm_data->u.text.arm_exidx_sec;
13456       if (exidx_sec == NULL)
13457 	{
13458 	  /* Section has no unwind data.  */
13459 	  if (last_unwind_type == 0 || !last_exidx_sec)
13460 	    continue;
13461 
13462 	  /* Ignore zero sized sections.  */
13463 	  if (sec->size == 0)
13464 	    continue;
13465 
13466 	  insert_cantunwind_after(last_text_sec, last_exidx_sec);
13467 	  last_unwind_type = 0;
13468 	  continue;
13469 	}
13470 
13471       /* Skip /DISCARD/ sections.  */
13472       if (bfd_is_abs_section (exidx_sec->output_section))
13473 	continue;
13474 
13475       hdr = &elf_section_data (exidx_sec)->this_hdr;
13476       if (hdr->sh_type != SHT_ARM_EXIDX)
13477 	continue;
13478 
13479       exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13480       if (exidx_arm_data == NULL)
13481 	continue;
13482 
13483       ibfd = exidx_sec->owner;
13484 
13485       if (hdr->contents != NULL)
13486 	contents = hdr->contents;
13487       else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13488 	/* An error?  */
13489 	continue;
13490 
13491       if (last_unwind_type > 0)
13492 	{
13493 	  unsigned int first_word = bfd_get_32 (ibfd, contents);
13494 	  /* Add cantunwind if first unwind item does not match section
13495 	     start.  */
13496 	  if (first_word != sec->vma)
13497 	    {
13498 	      insert_cantunwind_after (last_text_sec, last_exidx_sec);
13499 	      last_unwind_type = 0;
13500 	    }
13501 	}
13502 
13503       for (j = 0; j < hdr->sh_size; j += 8)
13504 	{
13505 	  unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13506 	  int unwind_type;
13507 	  int elide = 0;
13508 
13509 	  /* An EXIDX_CANTUNWIND entry.  */
13510 	  if (second_word == 1)
13511 	    {
13512 	      if (last_unwind_type == 0)
13513 		elide = 1;
13514 	      unwind_type = 0;
13515 	    }
13516 	  /* Inlined unwinding data.  Merge if equal to previous.  */
13517 	  else if ((second_word & 0x80000000) != 0)
13518 	    {
13519 	      if (merge_exidx_entries
13520 		   && last_second_word == second_word && last_unwind_type == 1)
13521 		elide = 1;
13522 	      unwind_type = 1;
13523 	      last_second_word = second_word;
13524 	    }
13525 	  /* Normal table entry.  In theory we could merge these too,
13526 	     but duplicate entries are likely to be much less common.  */
13527 	  else
13528 	    unwind_type = 2;
13529 
13530 	  if (elide && !bfd_link_relocatable (info))
13531 	    {
13532 	      add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13533 				     DELETE_EXIDX_ENTRY, NULL, j / 8);
13534 
13535 	      deleted_exidx_bytes += 8;
13536 	    }
13537 
13538 	  last_unwind_type = unwind_type;
13539 	}
13540 
13541       /* Free contents if we allocated it ourselves.  */
13542       if (contents != hdr->contents)
13543 	free (contents);
13544 
13545       /* Record edits to be applied later (in elf32_arm_write_section).  */
13546       exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13547       exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13548 
13549       if (deleted_exidx_bytes > 0)
13550 	adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13551 
13552       last_exidx_sec = exidx_sec;
13553       last_text_sec = sec;
13554     }
13555 
13556   /* Add terminating CANTUNWIND entry.  */
13557   if (!bfd_link_relocatable (info) && last_exidx_sec
13558       && last_unwind_type != 0)
13559     insert_cantunwind_after(last_text_sec, last_exidx_sec);
13560 
13561   return TRUE;
13562 }
13563 
13564 static bfd_boolean
13565 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13566 			       bfd *ibfd, const char *name)
13567 {
13568   asection *sec, *osec;
13569 
13570   sec = bfd_get_linker_section (ibfd, name);
13571   if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13572     return TRUE;
13573 
13574   osec = sec->output_section;
13575   if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13576     return TRUE;
13577 
13578   if (! bfd_set_section_contents (obfd, osec, sec->contents,
13579 				  sec->output_offset, sec->size))
13580     return FALSE;
13581 
13582   return TRUE;
13583 }
13584 
13585 static bfd_boolean
13586 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13587 {
13588   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13589   asection *sec, *osec;
13590 
13591   if (globals == NULL)
13592     return FALSE;
13593 
13594   /* Invoke the regular ELF backend linker to do all the work.  */
13595   if (!bfd_elf_final_link (abfd, info))
13596     return FALSE;
13597 
13598   /* Process stub sections (eg BE8 encoding, ...).  */
13599   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13600   unsigned int i;
13601   for (i=0; i<htab->top_id; i++)
13602     {
13603       sec = htab->stub_group[i].stub_sec;
13604       /* Only process it once, in its link_sec slot.  */
13605       if (sec && i == htab->stub_group[i].link_sec->id)
13606 	{
13607 	  osec = sec->output_section;
13608 	  elf32_arm_write_section (abfd, info, sec, sec->contents);
13609 	  if (! bfd_set_section_contents (abfd, osec, sec->contents,
13610 					  sec->output_offset, sec->size))
13611 	    return FALSE;
13612 	}
13613     }
13614 
13615   /* Write out any glue sections now that we have created all the
13616      stubs.  */
13617   if (globals->bfd_of_glue_owner != NULL)
13618     {
13619       if (! elf32_arm_output_glue_section (info, abfd,
13620 					   globals->bfd_of_glue_owner,
13621 					   ARM2THUMB_GLUE_SECTION_NAME))
13622 	return FALSE;
13623 
13624       if (! elf32_arm_output_glue_section (info, abfd,
13625 					   globals->bfd_of_glue_owner,
13626 					   THUMB2ARM_GLUE_SECTION_NAME))
13627 	return FALSE;
13628 
13629       if (! elf32_arm_output_glue_section (info, abfd,
13630 					   globals->bfd_of_glue_owner,
13631 					   VFP11_ERRATUM_VENEER_SECTION_NAME))
13632 	return FALSE;
13633 
13634       if (! elf32_arm_output_glue_section (info, abfd,
13635 					   globals->bfd_of_glue_owner,
13636 					   STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13637 	return FALSE;
13638 
13639       if (! elf32_arm_output_glue_section (info, abfd,
13640 					   globals->bfd_of_glue_owner,
13641 					   ARM_BX_GLUE_SECTION_NAME))
13642 	return FALSE;
13643     }
13644 
13645   return TRUE;
13646 }
13647 
13648 /* Return a best guess for the machine number based on the attributes.  */
13649 
13650 static unsigned int
13651 bfd_arm_get_mach_from_attributes (bfd * abfd)
13652 {
13653   int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13654 
13655   switch (arch)
13656     {
13657     case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13658     case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13659     case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13660     case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13661 
13662     case TAG_CPU_ARCH_V5TE:
13663       {
13664 	char * name;
13665 
13666 	BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13667 	name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13668 
13669 	if (name)
13670 	  {
13671 	    if (strcmp (name, "IWMMXT2") == 0)
13672 	      return bfd_mach_arm_iWMMXt2;
13673 
13674 	    if (strcmp (name, "IWMMXT") == 0)
13675 	      return bfd_mach_arm_iWMMXt;
13676 
13677 	    if (strcmp (name, "XSCALE") == 0)
13678 	      {
13679 		int wmmx;
13680 
13681 		BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13682 		wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13683 		switch (wmmx)
13684 		  {
13685 		  case 1: return bfd_mach_arm_iWMMXt;
13686 		  case 2: return bfd_mach_arm_iWMMXt2;
13687 		  default: return bfd_mach_arm_XScale;
13688 		  }
13689 	      }
13690 	  }
13691 
13692 	return bfd_mach_arm_5TE;
13693       }
13694 
13695     case TAG_CPU_ARCH_V5TEJ:
13696 	return bfd_mach_arm_5TEJ;
13697     case TAG_CPU_ARCH_V6:
13698 	return bfd_mach_arm_6;
13699     case TAG_CPU_ARCH_V6KZ:
13700 	return bfd_mach_arm_6KZ;
13701     case TAG_CPU_ARCH_V6T2:
13702 	return bfd_mach_arm_6T2;
13703     case TAG_CPU_ARCH_V6K:
13704 	return bfd_mach_arm_6K;
13705     case TAG_CPU_ARCH_V7:
13706 	return bfd_mach_arm_7;
13707     case TAG_CPU_ARCH_V6_M:
13708 	return bfd_mach_arm_6M;
13709     case TAG_CPU_ARCH_V6S_M:
13710 	return bfd_mach_arm_6SM;
13711     case TAG_CPU_ARCH_V7E_M:
13712 	return bfd_mach_arm_7EM;
13713     case TAG_CPU_ARCH_V8:
13714 	return bfd_mach_arm_8;
13715     case TAG_CPU_ARCH_V8R:
13716 	return bfd_mach_arm_8R;
13717     case TAG_CPU_ARCH_V8M_BASE:
13718 	return bfd_mach_arm_8M_BASE;
13719     case TAG_CPU_ARCH_V8M_MAIN:
13720 	return bfd_mach_arm_8M_MAIN;
13721 
13722     default:
13723       /* Force entry to be added for any new known Tag_CPU_arch value.  */
13724       BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13725 
13726       /* Unknown Tag_CPU_arch value.  */
13727       return bfd_mach_arm_unknown;
13728     }
13729 }
13730 
13731 /* Set the right machine number.  */
13732 
13733 static bfd_boolean
13734 elf32_arm_object_p (bfd *abfd)
13735 {
13736   unsigned int mach;
13737 
13738   mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13739 
13740   if (mach == bfd_mach_arm_unknown)
13741     {
13742       if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13743 	mach = bfd_mach_arm_ep9312;
13744       else
13745 	mach = bfd_arm_get_mach_from_attributes (abfd);
13746     }
13747 
13748   bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13749   return TRUE;
13750 }
13751 
13752 /* Function to keep ARM specific flags in the ELF header.  */
13753 
13754 static bfd_boolean
13755 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13756 {
13757   if (elf_flags_init (abfd)
13758       && elf_elfheader (abfd)->e_flags != flags)
13759     {
13760       if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13761 	{
13762 	  if (flags & EF_ARM_INTERWORK)
13763 	    _bfd_error_handler
13764 	      (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13765 	       abfd);
13766 	  else
13767 	    _bfd_error_handler
13768 	      (_("warning: clearing the interworking flag of %pB due to outside request"),
13769 	       abfd);
13770 	}
13771     }
13772   else
13773     {
13774       elf_elfheader (abfd)->e_flags = flags;
13775       elf_flags_init (abfd) = TRUE;
13776     }
13777 
13778   return TRUE;
13779 }
13780 
13781 /* Copy backend specific data from one object module to another.  */
13782 
13783 static bfd_boolean
13784 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13785 {
13786   flagword in_flags;
13787   flagword out_flags;
13788 
13789   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13790     return TRUE;
13791 
13792   in_flags  = elf_elfheader (ibfd)->e_flags;
13793   out_flags = elf_elfheader (obfd)->e_flags;
13794 
13795   if (elf_flags_init (obfd)
13796       && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
13797       && in_flags != out_flags)
13798     {
13799       /* Cannot mix APCS26 and APCS32 code.  */
13800       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
13801 	return FALSE;
13802 
13803       /* Cannot mix float APCS and non-float APCS code.  */
13804       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
13805 	return FALSE;
13806 
13807       /* If the src and dest have different interworking flags
13808 	 then turn off the interworking bit.  */
13809       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
13810 	{
13811 	  if (out_flags & EF_ARM_INTERWORK)
13812 	    _bfd_error_handler
13813 	      (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
13814 	       obfd, ibfd);
13815 
13816 	  in_flags &= ~EF_ARM_INTERWORK;
13817 	}
13818 
13819       /* Likewise for PIC, though don't warn for this case.  */
13820       if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
13821 	in_flags &= ~EF_ARM_PIC;
13822     }
13823 
13824   elf_elfheader (obfd)->e_flags = in_flags;
13825   elf_flags_init (obfd) = TRUE;
13826 
13827   return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
13828 }
13829 
13830 /* Values for Tag_ABI_PCS_R9_use.  */
13831 enum
13832 {
13833   AEABI_R9_V6,
13834   AEABI_R9_SB,
13835   AEABI_R9_TLS,
13836   AEABI_R9_unused
13837 };
13838 
13839 /* Values for Tag_ABI_PCS_RW_data.  */
13840 enum
13841 {
13842   AEABI_PCS_RW_data_absolute,
13843   AEABI_PCS_RW_data_PCrel,
13844   AEABI_PCS_RW_data_SBrel,
13845   AEABI_PCS_RW_data_unused
13846 };
13847 
13848 /* Values for Tag_ABI_enum_size.  */
13849 enum
13850 {
13851   AEABI_enum_unused,
13852   AEABI_enum_short,
13853   AEABI_enum_wide,
13854   AEABI_enum_forced_wide
13855 };
13856 
13857 /* Determine whether an object attribute tag takes an integer, a
13858    string or both.  */
13859 
13860 static int
13861 elf32_arm_obj_attrs_arg_type (int tag)
13862 {
13863   if (tag == Tag_compatibility)
13864     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
13865   else if (tag == Tag_nodefaults)
13866     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
13867   else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
13868     return ATTR_TYPE_FLAG_STR_VAL;
13869   else if (tag < 32)
13870     return ATTR_TYPE_FLAG_INT_VAL;
13871   else
13872     return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
13873 }
13874 
13875 /* The ABI defines that Tag_conformance should be emitted first, and that
13876    Tag_nodefaults should be second (if either is defined).  This sets those
13877    two positions, and bumps up the position of all the remaining tags to
13878    compensate.  */
13879 static int
13880 elf32_arm_obj_attrs_order (int num)
13881 {
13882   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
13883     return Tag_conformance;
13884   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
13885     return Tag_nodefaults;
13886   if ((num - 2) < Tag_nodefaults)
13887     return num - 2;
13888   if ((num - 1) < Tag_conformance)
13889     return num - 1;
13890   return num;
13891 }
13892 
13893 /* Attribute numbers >=64 (mod 128) can be safely ignored.  */
13894 static bfd_boolean
13895 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
13896 {
13897   if ((tag & 127) < 64)
13898     {
13899       _bfd_error_handler
13900 	(_("%pB: unknown mandatory EABI object attribute %d"),
13901 	 abfd, tag);
13902       bfd_set_error (bfd_error_bad_value);
13903       return FALSE;
13904     }
13905   else
13906     {
13907       _bfd_error_handler
13908 	(_("warning: %pB: unknown EABI object attribute %d"),
13909 	 abfd, tag);
13910       return TRUE;
13911     }
13912 }
13913 
13914 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
13915    Returns -1 if no architecture could be read.  */
13916 
13917 static int
13918 get_secondary_compatible_arch (bfd *abfd)
13919 {
13920   obj_attribute *attr =
13921     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13922 
13923   /* Note: the tag and its argument below are uleb128 values, though
13924      currently-defined values fit in one byte for each.  */
13925   if (attr->s
13926       && attr->s[0] == Tag_CPU_arch
13927       && (attr->s[1] & 128) != 128
13928       && attr->s[2] == 0)
13929    return attr->s[1];
13930 
13931   /* This tag is "safely ignorable", so don't complain if it looks funny.  */
13932   return -1;
13933 }
13934 
13935 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
13936    The tag is removed if ARCH is -1.  */
13937 
13938 static void
13939 set_secondary_compatible_arch (bfd *abfd, int arch)
13940 {
13941   obj_attribute *attr =
13942     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
13943 
13944   if (arch == -1)
13945     {
13946       attr->s = NULL;
13947       return;
13948     }
13949 
13950   /* Note: the tag and its argument below are uleb128 values, though
13951      currently-defined values fit in one byte for each.  */
13952   if (!attr->s)
13953     attr->s = (char *) bfd_alloc (abfd, 3);
13954   attr->s[0] = Tag_CPU_arch;
13955   attr->s[1] = arch;
13956   attr->s[2] = '\0';
13957 }
13958 
13959 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
13960    into account.  */
13961 
13962 static int
13963 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
13964 		      int newtag, int secondary_compat)
13965 {
13966 #define T(X) TAG_CPU_ARCH_##X
13967   int tagl, tagh, result;
13968   const int v6t2[] =
13969     {
13970       T(V6T2),   /* PRE_V4.  */
13971       T(V6T2),   /* V4.  */
13972       T(V6T2),   /* V4T.  */
13973       T(V6T2),   /* V5T.  */
13974       T(V6T2),   /* V5TE.  */
13975       T(V6T2),   /* V5TEJ.  */
13976       T(V6T2),   /* V6.  */
13977       T(V7),     /* V6KZ.  */
13978       T(V6T2)    /* V6T2.  */
13979     };
13980   const int v6k[] =
13981     {
13982       T(V6K),    /* PRE_V4.  */
13983       T(V6K),    /* V4.  */
13984       T(V6K),    /* V4T.  */
13985       T(V6K),    /* V5T.  */
13986       T(V6K),    /* V5TE.  */
13987       T(V6K),    /* V5TEJ.  */
13988       T(V6K),    /* V6.  */
13989       T(V6KZ),   /* V6KZ.  */
13990       T(V7),     /* V6T2.  */
13991       T(V6K)     /* V6K.  */
13992     };
13993   const int v7[] =
13994     {
13995       T(V7),     /* PRE_V4.  */
13996       T(V7),     /* V4.  */
13997       T(V7),     /* V4T.  */
13998       T(V7),     /* V5T.  */
13999       T(V7),     /* V5TE.  */
14000       T(V7),     /* V5TEJ.  */
14001       T(V7),     /* V6.  */
14002       T(V7),     /* V6KZ.  */
14003       T(V7),     /* V6T2.  */
14004       T(V7),     /* V6K.  */
14005       T(V7)      /* V7.  */
14006     };
14007   const int v6_m[] =
14008     {
14009       -1,	 /* PRE_V4.  */
14010       -1,	 /* V4.  */
14011       T(V6K),    /* V4T.  */
14012       T(V6K),    /* V5T.  */
14013       T(V6K),    /* V5TE.  */
14014       T(V6K),    /* V5TEJ.  */
14015       T(V6K),    /* V6.  */
14016       T(V6KZ),   /* V6KZ.  */
14017       T(V7),     /* V6T2.  */
14018       T(V6K),    /* V6K.  */
14019       T(V7),     /* V7.  */
14020       T(V6_M)    /* V6_M.  */
14021     };
14022   const int v6s_m[] =
14023     {
14024       -1,	 /* PRE_V4.  */
14025       -1,	 /* V4.  */
14026       T(V6K),    /* V4T.  */
14027       T(V6K),    /* V5T.  */
14028       T(V6K),    /* V5TE.  */
14029       T(V6K),    /* V5TEJ.  */
14030       T(V6K),    /* V6.  */
14031       T(V6KZ),   /* V6KZ.  */
14032       T(V7),     /* V6T2.  */
14033       T(V6K),    /* V6K.  */
14034       T(V7),     /* V7.  */
14035       T(V6S_M),  /* V6_M.  */
14036       T(V6S_M)   /* V6S_M.  */
14037     };
14038   const int v7e_m[] =
14039     {
14040       -1,	 /* PRE_V4.  */
14041       -1,	 /* V4.  */
14042       T(V7E_M),  /* V4T.  */
14043       T(V7E_M),  /* V5T.  */
14044       T(V7E_M),  /* V5TE.  */
14045       T(V7E_M),  /* V5TEJ.  */
14046       T(V7E_M),  /* V6.  */
14047       T(V7E_M),  /* V6KZ.  */
14048       T(V7E_M),  /* V6T2.  */
14049       T(V7E_M),  /* V6K.  */
14050       T(V7E_M),  /* V7.  */
14051       T(V7E_M),  /* V6_M.  */
14052       T(V7E_M),  /* V6S_M.  */
14053       T(V7E_M)   /* V7E_M.  */
14054     };
14055   const int v8[] =
14056     {
14057       T(V8),		/* PRE_V4.  */
14058       T(V8),		/* V4.  */
14059       T(V8),		/* V4T.  */
14060       T(V8),		/* V5T.  */
14061       T(V8),		/* V5TE.  */
14062       T(V8),		/* V5TEJ.  */
14063       T(V8),		/* V6.  */
14064       T(V8),		/* V6KZ.  */
14065       T(V8),		/* V6T2.  */
14066       T(V8),		/* V6K.  */
14067       T(V8),		/* V7.  */
14068       T(V8),		/* V6_M.  */
14069       T(V8),		/* V6S_M.  */
14070       T(V8),		/* V7E_M.  */
14071       T(V8)		/* V8.  */
14072     };
14073   const int v8r[] =
14074     {
14075       T(V8R),		/* PRE_V4.  */
14076       T(V8R),		/* V4.  */
14077       T(V8R),		/* V4T.  */
14078       T(V8R),		/* V5T.  */
14079       T(V8R),		/* V5TE.  */
14080       T(V8R),		/* V5TEJ.  */
14081       T(V8R),		/* V6.  */
14082       T(V8R),		/* V6KZ.  */
14083       T(V8R),		/* V6T2.  */
14084       T(V8R),		/* V6K.  */
14085       T(V8R),		/* V7.  */
14086       T(V8R),		/* V6_M.  */
14087       T(V8R),		/* V6S_M.  */
14088       T(V8R),		/* V7E_M.  */
14089       T(V8),		/* V8.  */
14090       T(V8R),		/* V8R.  */
14091     };
14092   const int v8m_baseline[] =
14093     {
14094       -1,		/* PRE_V4.  */
14095       -1,		/* V4.  */
14096       -1,		/* V4T.  */
14097       -1,		/* V5T.  */
14098       -1,		/* V5TE.  */
14099       -1,		/* V5TEJ.  */
14100       -1,		/* V6.  */
14101       -1,		/* V6KZ.  */
14102       -1,		/* V6T2.  */
14103       -1,		/* V6K.  */
14104       -1,		/* V7.  */
14105       T(V8M_BASE),	/* V6_M.  */
14106       T(V8M_BASE),	/* V6S_M.  */
14107       -1,		/* V7E_M.  */
14108       -1,		/* V8.  */
14109       -1,		/* V8R.  */
14110       T(V8M_BASE)	/* V8-M BASELINE.  */
14111     };
14112   const int v8m_mainline[] =
14113     {
14114       -1,		/* PRE_V4.  */
14115       -1,		/* V4.  */
14116       -1,		/* V4T.  */
14117       -1,		/* V5T.  */
14118       -1,		/* V5TE.  */
14119       -1,		/* V5TEJ.  */
14120       -1,		/* V6.  */
14121       -1,		/* V6KZ.  */
14122       -1,		/* V6T2.  */
14123       -1,		/* V6K.  */
14124       T(V8M_MAIN),	/* V7.  */
14125       T(V8M_MAIN),	/* V6_M.  */
14126       T(V8M_MAIN),	/* V6S_M.  */
14127       T(V8M_MAIN),	/* V7E_M.  */
14128       -1,		/* V8.  */
14129       -1,		/* V8R.  */
14130       T(V8M_MAIN),	/* V8-M BASELINE.  */
14131       T(V8M_MAIN)	/* V8-M MAINLINE.  */
14132     };
14133   const int v4t_plus_v6_m[] =
14134     {
14135       -1,		/* PRE_V4.  */
14136       -1,		/* V4.  */
14137       T(V4T),		/* V4T.  */
14138       T(V5T),		/* V5T.  */
14139       T(V5TE),		/* V5TE.  */
14140       T(V5TEJ),		/* V5TEJ.  */
14141       T(V6),		/* V6.  */
14142       T(V6KZ),		/* V6KZ.  */
14143       T(V6T2),		/* V6T2.  */
14144       T(V6K),		/* V6K.  */
14145       T(V7),		/* V7.  */
14146       T(V6_M),		/* V6_M.  */
14147       T(V6S_M),		/* V6S_M.  */
14148       T(V7E_M),		/* V7E_M.  */
14149       T(V8),		/* V8.  */
14150       -1,		/* V8R.  */
14151       T(V8M_BASE),	/* V8-M BASELINE.  */
14152       T(V8M_MAIN),	/* V8-M MAINLINE.  */
14153       T(V4T_PLUS_V6_M)	/* V4T plus V6_M.  */
14154     };
14155   const int *comb[] =
14156     {
14157       v6t2,
14158       v6k,
14159       v7,
14160       v6_m,
14161       v6s_m,
14162       v7e_m,
14163       v8,
14164       v8r,
14165       v8m_baseline,
14166       v8m_mainline,
14167       /* Pseudo-architecture.  */
14168       v4t_plus_v6_m
14169     };
14170 
14171   /* Check we've not got a higher architecture than we know about.  */
14172 
14173   if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14174     {
14175       _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14176       return -1;
14177     }
14178 
14179   /* Override old tag if we have a Tag_also_compatible_with on the output.  */
14180 
14181   if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14182       || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14183     oldtag = T(V4T_PLUS_V6_M);
14184 
14185   /* And override the new tag if we have a Tag_also_compatible_with on the
14186      input.  */
14187 
14188   if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14189       || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14190     newtag = T(V4T_PLUS_V6_M);
14191 
14192   tagl = (oldtag < newtag) ? oldtag : newtag;
14193   result = tagh = (oldtag > newtag) ? oldtag : newtag;
14194 
14195   /* Architectures before V6KZ add features monotonically.  */
14196   if (tagh <= TAG_CPU_ARCH_V6KZ)
14197     return result;
14198 
14199   result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14200 
14201   /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14202      as the canonical version.  */
14203   if (result == T(V4T_PLUS_V6_M))
14204     {
14205       result = T(V4T);
14206       *secondary_compat_out = T(V6_M);
14207     }
14208   else
14209     *secondary_compat_out = -1;
14210 
14211   if (result == -1)
14212     {
14213       _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14214 			  ibfd, oldtag, newtag);
14215       return -1;
14216     }
14217 
14218   return result;
14219 #undef T
14220 }
14221 
14222 /* Query attributes object to see if integer divide instructions may be
14223    present in an object.  */
14224 static bfd_boolean
14225 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14226 {
14227   int arch = attr[Tag_CPU_arch].i;
14228   int profile = attr[Tag_CPU_arch_profile].i;
14229 
14230   switch (attr[Tag_DIV_use].i)
14231     {
14232     case 0:
14233       /* Integer divide allowed if instruction contained in archetecture.  */
14234       if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14235 	return TRUE;
14236       else if (arch >= TAG_CPU_ARCH_V7E_M)
14237 	return TRUE;
14238       else
14239 	return FALSE;
14240 
14241     case 1:
14242       /* Integer divide explicitly prohibited.  */
14243       return FALSE;
14244 
14245     default:
14246       /* Unrecognised case - treat as allowing divide everywhere.  */
14247     case 2:
14248       /* Integer divide allowed in ARM state.  */
14249       return TRUE;
14250     }
14251 }
14252 
14253 /* Query attributes object to see if integer divide instructions are
14254    forbidden to be in the object.  This is not the inverse of
14255    elf32_arm_attributes_accept_div.  */
14256 static bfd_boolean
14257 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14258 {
14259   return attr[Tag_DIV_use].i == 1;
14260 }
14261 
14262 /* Merge EABI object attributes from IBFD into OBFD.  Raise an error if there
14263    are conflicting attributes.  */
14264 
14265 static bfd_boolean
14266 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14267 {
14268   bfd *obfd = info->output_bfd;
14269   obj_attribute *in_attr;
14270   obj_attribute *out_attr;
14271   /* Some tags have 0 = don't care, 1 = strong requirement,
14272      2 = weak requirement.  */
14273   static const int order_021[3] = {0, 2, 1};
14274   int i;
14275   bfd_boolean result = TRUE;
14276   const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14277 
14278   /* Skip the linker stubs file.  This preserves previous behavior
14279      of accepting unknown attributes in the first input file - but
14280      is that a bug?  */
14281   if (ibfd->flags & BFD_LINKER_CREATED)
14282     return TRUE;
14283 
14284   /* Skip any input that hasn't attribute section.
14285      This enables to link object files without attribute section with
14286      any others.  */
14287   if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14288     return TRUE;
14289 
14290   if (!elf_known_obj_attributes_proc (obfd)[0].i)
14291     {
14292       /* This is the first object.  Copy the attributes.  */
14293       _bfd_elf_copy_obj_attributes (ibfd, obfd);
14294 
14295       out_attr = elf_known_obj_attributes_proc (obfd);
14296 
14297       /* Use the Tag_null value to indicate the attributes have been
14298 	 initialized.  */
14299       out_attr[0].i = 1;
14300 
14301       /* We do not output objects with Tag_MPextension_use_legacy - we move
14302 	 the attribute's value to Tag_MPextension_use.  */
14303       if (out_attr[Tag_MPextension_use_legacy].i != 0)
14304 	{
14305 	  if (out_attr[Tag_MPextension_use].i != 0
14306 	      && out_attr[Tag_MPextension_use_legacy].i
14307 		!= out_attr[Tag_MPextension_use].i)
14308 	    {
14309 	      _bfd_error_handler
14310 		(_("Error: %pB has both the current and legacy "
14311 		   "Tag_MPextension_use attributes"), ibfd);
14312 	      result = FALSE;
14313 	    }
14314 
14315 	  out_attr[Tag_MPextension_use] =
14316 	    out_attr[Tag_MPextension_use_legacy];
14317 	  out_attr[Tag_MPextension_use_legacy].type = 0;
14318 	  out_attr[Tag_MPextension_use_legacy].i = 0;
14319 	}
14320 
14321       return result;
14322     }
14323 
14324   in_attr = elf_known_obj_attributes_proc (ibfd);
14325   out_attr = elf_known_obj_attributes_proc (obfd);
14326   /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
14327   if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14328     {
14329       /* Ignore mismatches if the object doesn't use floating point or is
14330 	 floating point ABI independent.  */
14331       if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14332 	  || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14333 	      && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14334 	out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14335       else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14336 	       && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14337 	{
14338 	  _bfd_error_handler
14339 	    (_("error: %pB uses VFP register arguments, %pB does not"),
14340 	     in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14341 	     in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14342 	  result = FALSE;
14343 	}
14344     }
14345 
14346   for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14347     {
14348       /* Merge this attribute with existing attributes.  */
14349       switch (i)
14350 	{
14351 	case Tag_CPU_raw_name:
14352 	case Tag_CPU_name:
14353 	  /* These are merged after Tag_CPU_arch.  */
14354 	  break;
14355 
14356 	case Tag_ABI_optimization_goals:
14357 	case Tag_ABI_FP_optimization_goals:
14358 	  /* Use the first value seen.  */
14359 	  break;
14360 
14361 	case Tag_CPU_arch:
14362 	  {
14363 	    int secondary_compat = -1, secondary_compat_out = -1;
14364 	    unsigned int saved_out_attr = out_attr[i].i;
14365 	    int arch_attr;
14366 	    static const char *name_table[] =
14367 	      {
14368 		/* These aren't real CPU names, but we can't guess
14369 		   that from the architecture version alone.  */
14370 		"Pre v4",
14371 		"ARM v4",
14372 		"ARM v4T",
14373 		"ARM v5T",
14374 		"ARM v5TE",
14375 		"ARM v5TEJ",
14376 		"ARM v6",
14377 		"ARM v6KZ",
14378 		"ARM v6T2",
14379 		"ARM v6K",
14380 		"ARM v7",
14381 		"ARM v6-M",
14382 		"ARM v6S-M",
14383 		"ARM v8",
14384 		"",
14385 		"ARM v8-M.baseline",
14386 		"ARM v8-M.mainline",
14387 	    };
14388 
14389 	    /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
14390 	    secondary_compat = get_secondary_compatible_arch (ibfd);
14391 	    secondary_compat_out = get_secondary_compatible_arch (obfd);
14392 	    arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14393 					      &secondary_compat_out,
14394 					      in_attr[i].i,
14395 					      secondary_compat);
14396 
14397 	    /* Return with error if failed to merge.  */
14398 	    if (arch_attr == -1)
14399 	      return FALSE;
14400 
14401 	    out_attr[i].i = arch_attr;
14402 
14403 	    set_secondary_compatible_arch (obfd, secondary_compat_out);
14404 
14405 	    /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
14406 	    if (out_attr[i].i == saved_out_attr)
14407 	      ; /* Leave the names alone.  */
14408 	    else if (out_attr[i].i == in_attr[i].i)
14409 	      {
14410 		/* The output architecture has been changed to match the
14411 		   input architecture.  Use the input names.  */
14412 		out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14413 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14414 		  : NULL;
14415 		out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14416 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14417 		  : NULL;
14418 	      }
14419 	    else
14420 	      {
14421 		out_attr[Tag_CPU_name].s = NULL;
14422 		out_attr[Tag_CPU_raw_name].s = NULL;
14423 	      }
14424 
14425 	    /* If we still don't have a value for Tag_CPU_name,
14426 	       make one up now.  Tag_CPU_raw_name remains blank.  */
14427 	    if (out_attr[Tag_CPU_name].s == NULL
14428 		&& out_attr[i].i < ARRAY_SIZE (name_table))
14429 	      out_attr[Tag_CPU_name].s =
14430 		_bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14431 	  }
14432 	  break;
14433 
14434 	case Tag_ARM_ISA_use:
14435 	case Tag_THUMB_ISA_use:
14436 	case Tag_WMMX_arch:
14437 	case Tag_Advanced_SIMD_arch:
14438 	  /* ??? Do Advanced_SIMD (NEON) and WMMX conflict?  */
14439 	case Tag_ABI_FP_rounding:
14440 	case Tag_ABI_FP_exceptions:
14441 	case Tag_ABI_FP_user_exceptions:
14442 	case Tag_ABI_FP_number_model:
14443 	case Tag_FP_HP_extension:
14444 	case Tag_CPU_unaligned_access:
14445 	case Tag_T2EE_use:
14446 	case Tag_MPextension_use:
14447 	  /* Use the largest value specified.  */
14448 	  if (in_attr[i].i > out_attr[i].i)
14449 	    out_attr[i].i = in_attr[i].i;
14450 	  break;
14451 
14452 	case Tag_ABI_align_preserved:
14453 	case Tag_ABI_PCS_RO_data:
14454 	  /* Use the smallest value specified.  */
14455 	  if (in_attr[i].i < out_attr[i].i)
14456 	    out_attr[i].i = in_attr[i].i;
14457 	  break;
14458 
14459 	case Tag_ABI_align_needed:
14460 	  if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14461 	      && (in_attr[Tag_ABI_align_preserved].i == 0
14462 		  || out_attr[Tag_ABI_align_preserved].i == 0))
14463 	    {
14464 	      /* This error message should be enabled once all non-conformant
14465 		 binaries in the toolchain have had the attributes set
14466 		 properly.
14467 	      _bfd_error_handler
14468 		(_("error: %pB: 8-byte data alignment conflicts with %pB"),
14469 		 obfd, ibfd);
14470 	      result = FALSE; */
14471 	    }
14472 	  /* Fall through.  */
14473 	case Tag_ABI_FP_denormal:
14474 	case Tag_ABI_PCS_GOT_use:
14475 	  /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14476 	     value if greater than 2 (for future-proofing).  */
14477 	  if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14478 	      || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14479 		  && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14480 	    out_attr[i].i = in_attr[i].i;
14481 	  break;
14482 
14483 	case Tag_Virtualization_use:
14484 	  /* The virtualization tag effectively stores two bits of
14485 	     information: the intended use of TrustZone (in bit 0), and the
14486 	     intended use of Virtualization (in bit 1).  */
14487 	  if (out_attr[i].i == 0)
14488 	    out_attr[i].i = in_attr[i].i;
14489 	  else if (in_attr[i].i != 0
14490 		   && in_attr[i].i != out_attr[i].i)
14491 	    {
14492 	      if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14493 		out_attr[i].i = 3;
14494 	      else
14495 		{
14496 		  _bfd_error_handler
14497 		    (_("error: %pB: unable to merge virtualization attributes "
14498 		       "with %pB"),
14499 		     obfd, ibfd);
14500 		  result = FALSE;
14501 		}
14502 	    }
14503 	  break;
14504 
14505 	case Tag_CPU_arch_profile:
14506 	  if (out_attr[i].i != in_attr[i].i)
14507 	    {
14508 	      /* 0 will merge with anything.
14509 		 'A' and 'S' merge to 'A'.
14510 		 'R' and 'S' merge to 'R'.
14511 		 'M' and 'A|R|S' is an error.  */
14512 	      if (out_attr[i].i == 0
14513 		  || (out_attr[i].i == 'S'
14514 		      && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14515 		out_attr[i].i = in_attr[i].i;
14516 	      else if (in_attr[i].i == 0
14517 		       || (in_attr[i].i == 'S'
14518 			   && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14519 		; /* Do nothing.  */
14520 	      else
14521 		{
14522 		  _bfd_error_handler
14523 		    (_("error: %pB: conflicting architecture profiles %c/%c"),
14524 		     ibfd,
14525 		     in_attr[i].i ? in_attr[i].i : '0',
14526 		     out_attr[i].i ? out_attr[i].i : '0');
14527 		  result = FALSE;
14528 		}
14529 	    }
14530 	  break;
14531 
14532 	case Tag_DSP_extension:
14533 	  /* No need to change output value if any of:
14534 	     - pre (<=) ARMv5T input architecture (do not have DSP)
14535 	     - M input profile not ARMv7E-M and do not have DSP.  */
14536 	  if (in_attr[Tag_CPU_arch].i <= 3
14537 	      || (in_attr[Tag_CPU_arch_profile].i == 'M'
14538 		  && in_attr[Tag_CPU_arch].i != 13
14539 		  && in_attr[i].i == 0))
14540 	    ; /* Do nothing.  */
14541 	  /* Output value should be 0 if DSP part of architecture, ie.
14542 	     - post (>=) ARMv5te architecture output
14543 	     - A, R or S profile output or ARMv7E-M output architecture.  */
14544 	  else if (out_attr[Tag_CPU_arch].i >= 4
14545 		   && (out_attr[Tag_CPU_arch_profile].i == 'A'
14546 		       || out_attr[Tag_CPU_arch_profile].i == 'R'
14547 		       || out_attr[Tag_CPU_arch_profile].i == 'S'
14548 		       || out_attr[Tag_CPU_arch].i == 13))
14549 	    out_attr[i].i = 0;
14550 	  /* Otherwise, DSP instructions are added and not part of output
14551 	     architecture.  */
14552 	  else
14553 	    out_attr[i].i = 1;
14554 	  break;
14555 
14556 	case Tag_FP_arch:
14557 	    {
14558 	      /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14559 		 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14560 		 when it's 0.  It might mean absence of FP hardware if
14561 		 Tag_FP_arch is zero.  */
14562 
14563 #define VFP_VERSION_COUNT 9
14564 	      static const struct
14565 	      {
14566 		  int ver;
14567 		  int regs;
14568 	      } vfp_versions[VFP_VERSION_COUNT] =
14569 		{
14570 		  {0, 0},
14571 		  {1, 16},
14572 		  {2, 16},
14573 		  {3, 32},
14574 		  {3, 16},
14575 		  {4, 32},
14576 		  {4, 16},
14577 		  {8, 32},
14578 		  {8, 16}
14579 		};
14580 	      int ver;
14581 	      int regs;
14582 	      int newval;
14583 
14584 	      /* If the output has no requirement about FP hardware,
14585 		 follow the requirement of the input.  */
14586 	      if (out_attr[i].i == 0)
14587 		{
14588 		  /* This assert is still reasonable, we shouldn't
14589 		     produce the suspicious build attribute
14590 		     combination (See below for in_attr).  */
14591 		  BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14592 		  out_attr[i].i = in_attr[i].i;
14593 		  out_attr[Tag_ABI_HardFP_use].i
14594 		    = in_attr[Tag_ABI_HardFP_use].i;
14595 		  break;
14596 		}
14597 	      /* If the input has no requirement about FP hardware, do
14598 		 nothing.  */
14599 	      else if (in_attr[i].i == 0)
14600 		{
14601 		  /* We used to assert that Tag_ABI_HardFP_use was
14602 		     zero here, but we should never assert when
14603 		     consuming an object file that has suspicious
14604 		     build attributes.  The single precision variant
14605 		     of 'no FP architecture' is still 'no FP
14606 		     architecture', so we just ignore the tag in this
14607 		     case.  */
14608 		  break;
14609 		}
14610 
14611 	      /* Both the input and the output have nonzero Tag_FP_arch.
14612 		 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
14613 
14614 	      /* If both the input and the output have zero Tag_ABI_HardFP_use,
14615 		 do nothing.  */
14616 	      if (in_attr[Tag_ABI_HardFP_use].i == 0
14617 		  && out_attr[Tag_ABI_HardFP_use].i == 0)
14618 		;
14619 	      /* If the input and the output have different Tag_ABI_HardFP_use,
14620 		 the combination of them is 0 (implied by Tag_FP_arch).  */
14621 	      else if (in_attr[Tag_ABI_HardFP_use].i
14622 		       != out_attr[Tag_ABI_HardFP_use].i)
14623 		out_attr[Tag_ABI_HardFP_use].i = 0;
14624 
14625 	      /* Now we can handle Tag_FP_arch.  */
14626 
14627 	      /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14628 		 pick the biggest.  */
14629 	      if (in_attr[i].i >= VFP_VERSION_COUNT
14630 		  && in_attr[i].i > out_attr[i].i)
14631 		{
14632 		  out_attr[i] = in_attr[i];
14633 		  break;
14634 		}
14635 	      /* The output uses the superset of input features
14636 		 (ISA version) and registers.  */
14637 	      ver = vfp_versions[in_attr[i].i].ver;
14638 	      if (ver < vfp_versions[out_attr[i].i].ver)
14639 		ver = vfp_versions[out_attr[i].i].ver;
14640 	      regs = vfp_versions[in_attr[i].i].regs;
14641 	      if (regs < vfp_versions[out_attr[i].i].regs)
14642 		regs = vfp_versions[out_attr[i].i].regs;
14643 	      /* This assumes all possible supersets are also a valid
14644 		 options.  */
14645 	      for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14646 		{
14647 		  if (regs == vfp_versions[newval].regs
14648 		      && ver == vfp_versions[newval].ver)
14649 		    break;
14650 		}
14651 	      out_attr[i].i = newval;
14652 	    }
14653 	  break;
14654 	case Tag_PCS_config:
14655 	  if (out_attr[i].i == 0)
14656 	    out_attr[i].i = in_attr[i].i;
14657 	  else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14658 	    {
14659 	      /* It's sometimes ok to mix different configs, so this is only
14660 		 a warning.  */
14661 	      _bfd_error_handler
14662 		(_("warning: %pB: conflicting platform configuration"), ibfd);
14663 	    }
14664 	  break;
14665 	case Tag_ABI_PCS_R9_use:
14666 	  if (in_attr[i].i != out_attr[i].i
14667 	      && out_attr[i].i != AEABI_R9_unused
14668 	      && in_attr[i].i != AEABI_R9_unused)
14669 	    {
14670 	      _bfd_error_handler
14671 		(_("error: %pB: conflicting use of R9"), ibfd);
14672 	      result = FALSE;
14673 	    }
14674 	  if (out_attr[i].i == AEABI_R9_unused)
14675 	    out_attr[i].i = in_attr[i].i;
14676 	  break;
14677 	case Tag_ABI_PCS_RW_data:
14678 	  if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14679 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14680 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14681 	    {
14682 	      _bfd_error_handler
14683 		(_("error: %pB: SB relative addressing conflicts with use of R9"),
14684 		 ibfd);
14685 	      result = FALSE;
14686 	    }
14687 	  /* Use the smallest value specified.  */
14688 	  if (in_attr[i].i < out_attr[i].i)
14689 	    out_attr[i].i = in_attr[i].i;
14690 	  break;
14691 	case Tag_ABI_PCS_wchar_t:
14692 	  if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14693 	      && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14694 	    {
14695 	      _bfd_error_handler
14696 		(_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14697 		 ibfd, in_attr[i].i, out_attr[i].i);
14698 	    }
14699 	  else if (in_attr[i].i && !out_attr[i].i)
14700 	    out_attr[i].i = in_attr[i].i;
14701 	  break;
14702 	case Tag_ABI_enum_size:
14703 	  if (in_attr[i].i != AEABI_enum_unused)
14704 	    {
14705 	      if (out_attr[i].i == AEABI_enum_unused
14706 		  || out_attr[i].i == AEABI_enum_forced_wide)
14707 		{
14708 		  /* The existing object is compatible with anything.
14709 		     Use whatever requirements the new object has.  */
14710 		  out_attr[i].i = in_attr[i].i;
14711 		}
14712 	      else if (in_attr[i].i != AEABI_enum_forced_wide
14713 		       && out_attr[i].i != in_attr[i].i
14714 		       && !elf_arm_tdata (obfd)->no_enum_size_warning)
14715 		{
14716 		  static const char *aeabi_enum_names[] =
14717 		    { "", "variable-size", "32-bit", "" };
14718 		  const char *in_name =
14719 		    in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14720 		    ? aeabi_enum_names[in_attr[i].i]
14721 		    : "<unknown>";
14722 		  const char *out_name =
14723 		    out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14724 		    ? aeabi_enum_names[out_attr[i].i]
14725 		    : "<unknown>";
14726 		  _bfd_error_handler
14727 		    (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14728 		     ibfd, in_name, out_name);
14729 		}
14730 	    }
14731 	  break;
14732 	case Tag_ABI_VFP_args:
14733 	  /* Aready done.  */
14734 	  break;
14735 	case Tag_ABI_WMMX_args:
14736 	  if (in_attr[i].i != out_attr[i].i)
14737 	    {
14738 	      _bfd_error_handler
14739 		(_("error: %pB uses iWMMXt register arguments, %pB does not"),
14740 		 ibfd, obfd);
14741 	      result = FALSE;
14742 	    }
14743 	  break;
14744 	case Tag_compatibility:
14745 	  /* Merged in target-independent code.  */
14746 	  break;
14747 	case Tag_ABI_HardFP_use:
14748 	  /* This is handled along with Tag_FP_arch.  */
14749 	  break;
14750 	case Tag_ABI_FP_16bit_format:
14751 	  if (in_attr[i].i != 0 && out_attr[i].i != 0)
14752 	    {
14753 	      if (in_attr[i].i != out_attr[i].i)
14754 		{
14755 		  _bfd_error_handler
14756 		    (_("error: fp16 format mismatch between %pB and %pB"),
14757 		     ibfd, obfd);
14758 		  result = FALSE;
14759 		}
14760 	    }
14761 	  if (in_attr[i].i != 0)
14762 	    out_attr[i].i = in_attr[i].i;
14763 	  break;
14764 
14765 	case Tag_DIV_use:
14766 	  /* A value of zero on input means that the divide instruction may
14767 	     be used if available in the base architecture as specified via
14768 	     Tag_CPU_arch and Tag_CPU_arch_profile.  A value of 1 means that
14769 	     the user did not want divide instructions.  A value of 2
14770 	     explicitly means that divide instructions were allowed in ARM
14771 	     and Thumb state.  */
14772 	  if (in_attr[i].i == out_attr[i].i)
14773 	    /* Do nothing.  */ ;
14774 	  else if (elf32_arm_attributes_forbid_div (in_attr)
14775 		   && !elf32_arm_attributes_accept_div (out_attr))
14776 	    out_attr[i].i = 1;
14777 	  else if (elf32_arm_attributes_forbid_div (out_attr)
14778 		   && elf32_arm_attributes_accept_div (in_attr))
14779 	    out_attr[i].i = in_attr[i].i;
14780 	  else if (in_attr[i].i == 2)
14781 	    out_attr[i].i = in_attr[i].i;
14782 	  break;
14783 
14784 	case Tag_MPextension_use_legacy:
14785 	  /* We don't output objects with Tag_MPextension_use_legacy - we
14786 	     move the value to Tag_MPextension_use.  */
14787 	  if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
14788 	    {
14789 	      if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
14790 		{
14791 		  _bfd_error_handler
14792 		    (_("%pB has both the current and legacy "
14793 		       "Tag_MPextension_use attributes"),
14794 		     ibfd);
14795 		  result = FALSE;
14796 		}
14797 	    }
14798 
14799 	  if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
14800 	    out_attr[Tag_MPextension_use] = in_attr[i];
14801 
14802 	  break;
14803 
14804 	case Tag_nodefaults:
14805 	  /* This tag is set if it exists, but the value is unused (and is
14806 	     typically zero).  We don't actually need to do anything here -
14807 	     the merge happens automatically when the type flags are merged
14808 	     below.  */
14809 	  break;
14810 	case Tag_also_compatible_with:
14811 	  /* Already done in Tag_CPU_arch.  */
14812 	  break;
14813 	case Tag_conformance:
14814 	  /* Keep the attribute if it matches.  Throw it away otherwise.
14815 	     No attribute means no claim to conform.  */
14816 	  if (!in_attr[i].s || !out_attr[i].s
14817 	      || strcmp (in_attr[i].s, out_attr[i].s) != 0)
14818 	    out_attr[i].s = NULL;
14819 	  break;
14820 
14821 	default:
14822 	  result
14823 	    = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
14824 	}
14825 
14826       /* If out_attr was copied from in_attr then it won't have a type yet.  */
14827       if (in_attr[i].type && !out_attr[i].type)
14828 	out_attr[i].type = in_attr[i].type;
14829     }
14830 
14831   /* Merge Tag_compatibility attributes and any common GNU ones.  */
14832   if (!_bfd_elf_merge_object_attributes (ibfd, info))
14833     return FALSE;
14834 
14835   /* Check for any attributes not known on ARM.  */
14836   result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
14837 
14838   return result;
14839 }
14840 
14841 
14842 /* Return TRUE if the two EABI versions are incompatible.  */
14843 
14844 static bfd_boolean
14845 elf32_arm_versions_compatible (unsigned iver, unsigned over)
14846 {
14847   /* v4 and v5 are the same spec before and after it was released,
14848      so allow mixing them.  */
14849   if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
14850       || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
14851     return TRUE;
14852 
14853   return (iver == over);
14854 }
14855 
14856 /* Merge backend specific data from an object file to the output
14857    object file when linking.  */
14858 
14859 static bfd_boolean
14860 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
14861 
14862 /* Display the flags field.  */
14863 
14864 static bfd_boolean
14865 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
14866 {
14867   FILE * file = (FILE *) ptr;
14868   unsigned long flags;
14869 
14870   BFD_ASSERT (abfd != NULL && ptr != NULL);
14871 
14872   /* Print normal ELF private data.  */
14873   _bfd_elf_print_private_bfd_data (abfd, ptr);
14874 
14875   flags = elf_elfheader (abfd)->e_flags;
14876   /* Ignore init flag - it may not be set, despite the flags field
14877      containing valid data.  */
14878 
14879   fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
14880 
14881   switch (EF_ARM_EABI_VERSION (flags))
14882     {
14883     case EF_ARM_EABI_UNKNOWN:
14884       /* The following flag bits are GNU extensions and not part of the
14885 	 official ARM ELF extended ABI.  Hence they are only decoded if
14886 	 the EABI version is not set.  */
14887       if (flags & EF_ARM_INTERWORK)
14888 	fprintf (file, _(" [interworking enabled]"));
14889 
14890       if (flags & EF_ARM_APCS_26)
14891 	fprintf (file, " [APCS-26]");
14892       else
14893 	fprintf (file, " [APCS-32]");
14894 
14895       if (flags & EF_ARM_VFP_FLOAT)
14896 	fprintf (file, _(" [VFP float format]"));
14897       else if (flags & EF_ARM_MAVERICK_FLOAT)
14898 	fprintf (file, _(" [Maverick float format]"));
14899       else
14900 	fprintf (file, _(" [FPA float format]"));
14901 
14902       if (flags & EF_ARM_APCS_FLOAT)
14903 	fprintf (file, _(" [floats passed in float registers]"));
14904 
14905       if (flags & EF_ARM_PIC)
14906 	fprintf (file, _(" [position independent]"));
14907 
14908       if (flags & EF_ARM_NEW_ABI)
14909 	fprintf (file, _(" [new ABI]"));
14910 
14911       if (flags & EF_ARM_OLD_ABI)
14912 	fprintf (file, _(" [old ABI]"));
14913 
14914       if (flags & EF_ARM_SOFT_FLOAT)
14915 	fprintf (file, _(" [software FP]"));
14916 
14917       flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
14918 		 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
14919 		 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
14920 		 | EF_ARM_MAVERICK_FLOAT);
14921       break;
14922 
14923     case EF_ARM_EABI_VER1:
14924       fprintf (file, _(" [Version1 EABI]"));
14925 
14926       if (flags & EF_ARM_SYMSARESORTED)
14927 	fprintf (file, _(" [sorted symbol table]"));
14928       else
14929 	fprintf (file, _(" [unsorted symbol table]"));
14930 
14931       flags &= ~ EF_ARM_SYMSARESORTED;
14932       break;
14933 
14934     case EF_ARM_EABI_VER2:
14935       fprintf (file, _(" [Version2 EABI]"));
14936 
14937       if (flags & EF_ARM_SYMSARESORTED)
14938 	fprintf (file, _(" [sorted symbol table]"));
14939       else
14940 	fprintf (file, _(" [unsorted symbol table]"));
14941 
14942       if (flags & EF_ARM_DYNSYMSUSESEGIDX)
14943 	fprintf (file, _(" [dynamic symbols use segment index]"));
14944 
14945       if (flags & EF_ARM_MAPSYMSFIRST)
14946 	fprintf (file, _(" [mapping symbols precede others]"));
14947 
14948       flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
14949 		 | EF_ARM_MAPSYMSFIRST);
14950       break;
14951 
14952     case EF_ARM_EABI_VER3:
14953       fprintf (file, _(" [Version3 EABI]"));
14954       break;
14955 
14956     case EF_ARM_EABI_VER4:
14957       fprintf (file, _(" [Version4 EABI]"));
14958       goto eabi;
14959 
14960     case EF_ARM_EABI_VER5:
14961       fprintf (file, _(" [Version5 EABI]"));
14962 
14963       if (flags & EF_ARM_ABI_FLOAT_SOFT)
14964 	fprintf (file, _(" [soft-float ABI]"));
14965 
14966       if (flags & EF_ARM_ABI_FLOAT_HARD)
14967 	fprintf (file, _(" [hard-float ABI]"));
14968 
14969       flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
14970 
14971     eabi:
14972       if (flags & EF_ARM_BE8)
14973 	fprintf (file, _(" [BE8]"));
14974 
14975       if (flags & EF_ARM_LE8)
14976 	fprintf (file, _(" [LE8]"));
14977 
14978       flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
14979       break;
14980 
14981     default:
14982       fprintf (file, _(" <EABI version unrecognised>"));
14983       break;
14984     }
14985 
14986   flags &= ~ EF_ARM_EABIMASK;
14987 
14988   if (flags & EF_ARM_RELEXEC)
14989     fprintf (file, _(" [relocatable executable]"));
14990 
14991   if (flags & EF_ARM_PIC)
14992     fprintf (file, _(" [position independent]"));
14993 
14994   if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
14995     fprintf (file, _(" [FDPIC ABI supplement]"));
14996 
14997   flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
14998 
14999   if (flags)
15000     fprintf (file, _("<Unrecognised flag bits set>"));
15001 
15002   fputc ('\n', file);
15003 
15004   return TRUE;
15005 }
15006 
15007 static int
15008 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15009 {
15010   switch (ELF_ST_TYPE (elf_sym->st_info))
15011     {
15012     case STT_ARM_TFUNC:
15013       return ELF_ST_TYPE (elf_sym->st_info);
15014 
15015     case STT_ARM_16BIT:
15016       /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15017 	 This allows us to distinguish between data used by Thumb instructions
15018 	 and non-data (which is probably code) inside Thumb regions of an
15019 	 executable.  */
15020       if (type != STT_OBJECT && type != STT_TLS)
15021 	return ELF_ST_TYPE (elf_sym->st_info);
15022       break;
15023 
15024     default:
15025       break;
15026     }
15027 
15028   return type;
15029 }
15030 
15031 static asection *
15032 elf32_arm_gc_mark_hook (asection *sec,
15033 			struct bfd_link_info *info,
15034 			Elf_Internal_Rela *rel,
15035 			struct elf_link_hash_entry *h,
15036 			Elf_Internal_Sym *sym)
15037 {
15038   if (h != NULL)
15039     switch (ELF32_R_TYPE (rel->r_info))
15040       {
15041       case R_ARM_GNU_VTINHERIT:
15042       case R_ARM_GNU_VTENTRY:
15043 	return NULL;
15044       }
15045 
15046   return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15047 }
15048 
15049 /* Look through the relocs for a section during the first phase.  */
15050 
15051 static bfd_boolean
15052 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15053 			asection *sec, const Elf_Internal_Rela *relocs)
15054 {
15055   Elf_Internal_Shdr *symtab_hdr;
15056   struct elf_link_hash_entry **sym_hashes;
15057   const Elf_Internal_Rela *rel;
15058   const Elf_Internal_Rela *rel_end;
15059   bfd *dynobj;
15060   asection *sreloc;
15061   struct elf32_arm_link_hash_table *htab;
15062   bfd_boolean call_reloc_p;
15063   bfd_boolean may_become_dynamic_p;
15064   bfd_boolean may_need_local_target_p;
15065   unsigned long nsyms;
15066 
15067   if (bfd_link_relocatable (info))
15068     return TRUE;
15069 
15070   BFD_ASSERT (is_arm_elf (abfd));
15071 
15072   htab = elf32_arm_hash_table (info);
15073   if (htab == NULL)
15074     return FALSE;
15075 
15076   sreloc = NULL;
15077 
15078   /* Create dynamic sections for relocatable executables so that we can
15079      copy relocations.  */
15080   if (htab->root.is_relocatable_executable
15081       && ! htab->root.dynamic_sections_created)
15082     {
15083       if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15084 	return FALSE;
15085     }
15086 
15087   if (htab->root.dynobj == NULL)
15088     htab->root.dynobj = abfd;
15089   if (!create_ifunc_sections (info))
15090     return FALSE;
15091 
15092   dynobj = htab->root.dynobj;
15093 
15094   symtab_hdr = & elf_symtab_hdr (abfd);
15095   sym_hashes = elf_sym_hashes (abfd);
15096   nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15097 
15098   rel_end = relocs + sec->reloc_count;
15099   for (rel = relocs; rel < rel_end; rel++)
15100     {
15101       Elf_Internal_Sym *isym;
15102       struct elf_link_hash_entry *h;
15103       struct elf32_arm_link_hash_entry *eh;
15104       unsigned int r_symndx;
15105       int r_type;
15106 
15107       r_symndx = ELF32_R_SYM (rel->r_info);
15108       r_type = ELF32_R_TYPE (rel->r_info);
15109       r_type = arm_real_reloc_type (htab, r_type);
15110 
15111       if (r_symndx >= nsyms
15112 	  /* PR 9934: It is possible to have relocations that do not
15113 	     refer to symbols, thus it is also possible to have an
15114 	     object file containing relocations but no symbol table.  */
15115 	  && (r_symndx > STN_UNDEF || nsyms > 0))
15116 	{
15117 	  _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15118 			      r_symndx);
15119 	  return FALSE;
15120 	}
15121 
15122       h = NULL;
15123       isym = NULL;
15124       if (nsyms > 0)
15125 	{
15126 	  if (r_symndx < symtab_hdr->sh_info)
15127 	    {
15128 	      /* A local symbol.  */
15129 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15130 					    abfd, r_symndx);
15131 	      if (isym == NULL)
15132 		return FALSE;
15133 	    }
15134 	  else
15135 	    {
15136 	      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15137 	      while (h->root.type == bfd_link_hash_indirect
15138 		     || h->root.type == bfd_link_hash_warning)
15139 		h = (struct elf_link_hash_entry *) h->root.u.i.link;
15140 	    }
15141 	}
15142 
15143       eh = (struct elf32_arm_link_hash_entry *) h;
15144 
15145       call_reloc_p = FALSE;
15146       may_become_dynamic_p = FALSE;
15147       may_need_local_target_p = FALSE;
15148 
15149       /* Could be done earlier, if h were already available.  */
15150       r_type = elf32_arm_tls_transition (info, r_type, h);
15151       switch (r_type)
15152 	{
15153 	case R_ARM_GOTOFFFUNCDESC:
15154 	  {
15155 	    if (h == NULL)
15156 	      {
15157 		if (!elf32_arm_allocate_local_sym_info (abfd))
15158 		  return FALSE;
15159 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15160 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15161 	      }
15162 	    else
15163 	      {
15164 		eh->fdpic_cnts.gotofffuncdesc_cnt++;
15165 	      }
15166 	  }
15167 	  break;
15168 
15169 	case R_ARM_GOTFUNCDESC:
15170 	  {
15171 	    if (h == NULL)
15172 	      {
15173 		/* Such a relocation is not supposed to be generated
15174 		   by gcc on a static function. */
15175 		/* Anyway if needed it could be handled.  */
15176 		abort();
15177 	      }
15178 	    else
15179 	      {
15180 		eh->fdpic_cnts.gotfuncdesc_cnt++;
15181 	      }
15182 	  }
15183 	  break;
15184 
15185 	case R_ARM_FUNCDESC:
15186 	  {
15187 	    if (h == NULL)
15188 	      {
15189 		if (!elf32_arm_allocate_local_sym_info (abfd))
15190 		  return FALSE;
15191 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15192 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15193 	      }
15194 	    else
15195 	      {
15196 		eh->fdpic_cnts.funcdesc_cnt++;
15197 	      }
15198 	  }
15199 	  break;
15200 
15201 	  case R_ARM_GOT32:
15202 	  case R_ARM_GOT_PREL:
15203 	  case R_ARM_TLS_GD32:
15204 	  case R_ARM_TLS_GD32_FDPIC:
15205 	  case R_ARM_TLS_IE32:
15206 	  case R_ARM_TLS_IE32_FDPIC:
15207 	  case R_ARM_TLS_GOTDESC:
15208 	  case R_ARM_TLS_DESCSEQ:
15209 	  case R_ARM_THM_TLS_DESCSEQ:
15210 	  case R_ARM_TLS_CALL:
15211 	  case R_ARM_THM_TLS_CALL:
15212 	    /* This symbol requires a global offset table entry.  */
15213 	    {
15214 	      int tls_type, old_tls_type;
15215 
15216 	      switch (r_type)
15217 		{
15218 		case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15219 		case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15220 
15221 		case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15222 		case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15223 
15224 		case R_ARM_TLS_GOTDESC:
15225 		case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15226 		case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15227 		  tls_type = GOT_TLS_GDESC; break;
15228 
15229 		default: tls_type = GOT_NORMAL; break;
15230 		}
15231 
15232 	      if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15233 		info->flags |= DF_STATIC_TLS;
15234 
15235 	      if (h != NULL)
15236 		{
15237 		  h->got.refcount++;
15238 		  old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15239 		}
15240 	      else
15241 		{
15242 		  /* This is a global offset table entry for a local symbol.  */
15243 		  if (!elf32_arm_allocate_local_sym_info (abfd))
15244 		    return FALSE;
15245 		  elf_local_got_refcounts (abfd)[r_symndx] += 1;
15246 		  old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15247 		}
15248 
15249 	      /* If a variable is accessed with both tls methods, two
15250 		 slots may be created.  */
15251 	      if (GOT_TLS_GD_ANY_P (old_tls_type)
15252 		  && GOT_TLS_GD_ANY_P (tls_type))
15253 		tls_type |= old_tls_type;
15254 
15255 	      /* We will already have issued an error message if there
15256 		 is a TLS/non-TLS mismatch, based on the symbol
15257 		 type.  So just combine any TLS types needed.  */
15258 	      if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15259 		  && tls_type != GOT_NORMAL)
15260 		tls_type |= old_tls_type;
15261 
15262 	      /* If the symbol is accessed in both IE and GDESC
15263 		 method, we're able to relax. Turn off the GDESC flag,
15264 		 without messing up with any other kind of tls types
15265 		 that may be involved.  */
15266 	      if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15267 		tls_type &= ~GOT_TLS_GDESC;
15268 
15269 	      if (old_tls_type != tls_type)
15270 		{
15271 		  if (h != NULL)
15272 		    elf32_arm_hash_entry (h)->tls_type = tls_type;
15273 		  else
15274 		    elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15275 		}
15276 	    }
15277 	    /* Fall through.  */
15278 
15279 	  case R_ARM_TLS_LDM32:
15280 	  case R_ARM_TLS_LDM32_FDPIC:
15281 	    if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15282 		htab->tls_ldm_got.refcount++;
15283 	    /* Fall through.  */
15284 
15285 	  case R_ARM_GOTOFF32:
15286 	  case R_ARM_GOTPC:
15287 	    if (htab->root.sgot == NULL
15288 		&& !create_got_section (htab->root.dynobj, info))
15289 	      return FALSE;
15290 	    break;
15291 
15292 	  case R_ARM_PC24:
15293 	  case R_ARM_PLT32:
15294 	  case R_ARM_CALL:
15295 	  case R_ARM_JUMP24:
15296 	  case R_ARM_PREL31:
15297 	  case R_ARM_THM_CALL:
15298 	  case R_ARM_THM_JUMP24:
15299 	  case R_ARM_THM_JUMP19:
15300 	    call_reloc_p = TRUE;
15301 	    may_need_local_target_p = TRUE;
15302 	    break;
15303 
15304 	  case R_ARM_ABS12:
15305 	    /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15306 	       ldr __GOTT_INDEX__ offsets.  */
15307 	    if (!htab->vxworks_p)
15308 	      {
15309 		may_need_local_target_p = TRUE;
15310 		break;
15311 	      }
15312 	    else goto jump_over;
15313 
15314 	    /* Fall through.  */
15315 
15316 	  case R_ARM_MOVW_ABS_NC:
15317 	  case R_ARM_MOVT_ABS:
15318 	  case R_ARM_THM_MOVW_ABS_NC:
15319 	  case R_ARM_THM_MOVT_ABS:
15320 	    if (bfd_link_pic (info))
15321 	      {
15322 		_bfd_error_handler
15323 		  (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15324 		   abfd, elf32_arm_howto_table_1[r_type].name,
15325 		   (h) ? h->root.root.string : "a local symbol");
15326 		bfd_set_error (bfd_error_bad_value);
15327 		return FALSE;
15328 	      }
15329 
15330 	    /* Fall through.  */
15331 	  case R_ARM_ABS32:
15332 	  case R_ARM_ABS32_NOI:
15333 	jump_over:
15334 	    if (h != NULL && bfd_link_executable (info))
15335 	      {
15336 		h->pointer_equality_needed = 1;
15337 	      }
15338 	    /* Fall through.  */
15339 	  case R_ARM_REL32:
15340 	  case R_ARM_REL32_NOI:
15341 	  case R_ARM_MOVW_PREL_NC:
15342 	  case R_ARM_MOVT_PREL:
15343 	  case R_ARM_THM_MOVW_PREL_NC:
15344 	  case R_ARM_THM_MOVT_PREL:
15345 
15346 	    /* Should the interworking branches be listed here?  */
15347 	    if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15348 		 || htab->fdpic_p)
15349 		&& (sec->flags & SEC_ALLOC) != 0)
15350 	      {
15351 		if (h == NULL
15352 		    && elf32_arm_howto_from_type (r_type)->pc_relative)
15353 		  {
15354 		    /* In shared libraries and relocatable executables,
15355 		       we treat local relative references as calls;
15356 		       see the related SYMBOL_CALLS_LOCAL code in
15357 		       allocate_dynrelocs.  */
15358 		    call_reloc_p = TRUE;
15359 		    may_need_local_target_p = TRUE;
15360 		  }
15361 		else
15362 		  /* We are creating a shared library or relocatable
15363 		     executable, and this is a reloc against a global symbol,
15364 		     or a non-PC-relative reloc against a local symbol.
15365 		     We may need to copy the reloc into the output.  */
15366 		  may_become_dynamic_p = TRUE;
15367 	      }
15368 	    else
15369 	      may_need_local_target_p = TRUE;
15370 	    break;
15371 
15372 	/* This relocation describes the C++ object vtable hierarchy.
15373 	   Reconstruct it for later use during GC.  */
15374 	case R_ARM_GNU_VTINHERIT:
15375 	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15376 	    return FALSE;
15377 	  break;
15378 
15379 	/* This relocation describes which C++ vtable entries are actually
15380 	   used.  Record for later use during GC.  */
15381 	case R_ARM_GNU_VTENTRY:
15382 	  BFD_ASSERT (h != NULL);
15383 	  if (h != NULL
15384 	      && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15385 	    return FALSE;
15386 	  break;
15387 	}
15388 
15389       if (h != NULL)
15390 	{
15391 	  if (call_reloc_p)
15392 	    /* We may need a .plt entry if the function this reloc
15393 	       refers to is in a different object, regardless of the
15394 	       symbol's type.  We can't tell for sure yet, because
15395 	       something later might force the symbol local.  */
15396 	    h->needs_plt = 1;
15397 	  else if (may_need_local_target_p)
15398 	    /* If this reloc is in a read-only section, we might
15399 	       need a copy reloc.  We can't check reliably at this
15400 	       stage whether the section is read-only, as input
15401 	       sections have not yet been mapped to output sections.
15402 	       Tentatively set the flag for now, and correct in
15403 	       adjust_dynamic_symbol.  */
15404 	    h->non_got_ref = 1;
15405 	}
15406 
15407       if (may_need_local_target_p
15408 	  && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15409 	{
15410 	  union gotplt_union *root_plt;
15411 	  struct arm_plt_info *arm_plt;
15412 	  struct arm_local_iplt_info *local_iplt;
15413 
15414 	  if (h != NULL)
15415 	    {
15416 	      root_plt = &h->plt;
15417 	      arm_plt = &eh->plt;
15418 	    }
15419 	  else
15420 	    {
15421 	      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15422 	      if (local_iplt == NULL)
15423 		return FALSE;
15424 	      root_plt = &local_iplt->root;
15425 	      arm_plt = &local_iplt->arm;
15426 	    }
15427 
15428 	  /* If the symbol is a function that doesn't bind locally,
15429 	     this relocation will need a PLT entry.  */
15430 	  if (root_plt->refcount != -1)
15431 	    root_plt->refcount += 1;
15432 
15433 	  if (!call_reloc_p)
15434 	    arm_plt->noncall_refcount++;
15435 
15436 	  /* It's too early to use htab->use_blx here, so we have to
15437 	     record possible blx references separately from
15438 	     relocs that definitely need a thumb stub.  */
15439 
15440 	  if (r_type == R_ARM_THM_CALL)
15441 	    arm_plt->maybe_thumb_refcount += 1;
15442 
15443 	  if (r_type == R_ARM_THM_JUMP24
15444 	      || r_type == R_ARM_THM_JUMP19)
15445 	    arm_plt->thumb_refcount += 1;
15446 	}
15447 
15448       if (may_become_dynamic_p)
15449 	{
15450 	  struct elf_dyn_relocs *p, **head;
15451 
15452 	  /* Create a reloc section in dynobj.  */
15453 	  if (sreloc == NULL)
15454 	    {
15455 	      sreloc = _bfd_elf_make_dynamic_reloc_section
15456 		(sec, dynobj, 2, abfd, ! htab->use_rel);
15457 
15458 	      if (sreloc == NULL)
15459 		return FALSE;
15460 
15461 	      /* BPABI objects never have dynamic relocations mapped.  */
15462 	      if (htab->symbian_p)
15463 		{
15464 		  flagword flags;
15465 
15466 		  flags = bfd_get_section_flags (dynobj, sreloc);
15467 		  flags &= ~(SEC_LOAD | SEC_ALLOC);
15468 		  bfd_set_section_flags (dynobj, sreloc, flags);
15469 		}
15470 	    }
15471 
15472 	  /* If this is a global symbol, count the number of
15473 	     relocations we need for this symbol.  */
15474 	  if (h != NULL)
15475 	    head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15476 	  else
15477 	    {
15478 	      head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15479 	      if (head == NULL)
15480 		return FALSE;
15481 	    }
15482 
15483 	  p = *head;
15484 	  if (p == NULL || p->sec != sec)
15485 	    {
15486 	      bfd_size_type amt = sizeof *p;
15487 
15488 	      p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15489 	      if (p == NULL)
15490 		return FALSE;
15491 	      p->next = *head;
15492 	      *head = p;
15493 	      p->sec = sec;
15494 	      p->count = 0;
15495 	      p->pc_count = 0;
15496 	    }
15497 
15498 	  if (elf32_arm_howto_from_type (r_type)->pc_relative)
15499 	    p->pc_count += 1;
15500 	  p->count += 1;
15501 	  if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15502 	      && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15503 	    /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15504 	       that will become rofixup.  */
15505 	    /* This is due to the fact that we suppose all will become rofixup.  */
15506 	    fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15507 	    _bfd_error_handler
15508 	      (_("FDPIC does not yet support %s relocation"
15509 		 " to become dynamic for executable"),
15510 	       elf32_arm_howto_table_1[r_type].name);
15511 	    abort();
15512 	  }
15513 	}
15514     }
15515 
15516   return TRUE;
15517 }
15518 
15519 static void
15520 elf32_arm_update_relocs (asection *o,
15521 			 struct bfd_elf_section_reloc_data *reldata)
15522 {
15523   void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15524   void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15525   const struct elf_backend_data *bed;
15526   _arm_elf_section_data *eado;
15527   struct bfd_link_order *p;
15528   bfd_byte *erela_head, *erela;
15529   Elf_Internal_Rela *irela_head, *irela;
15530   Elf_Internal_Shdr *rel_hdr;
15531   bfd *abfd;
15532   unsigned int count;
15533 
15534   eado = get_arm_elf_section_data (o);
15535 
15536   if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15537     return;
15538 
15539   abfd = o->owner;
15540   bed = get_elf_backend_data (abfd);
15541   rel_hdr = reldata->hdr;
15542 
15543   if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15544     {
15545       swap_in = bed->s->swap_reloc_in;
15546       swap_out = bed->s->swap_reloc_out;
15547     }
15548   else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15549     {
15550       swap_in = bed->s->swap_reloca_in;
15551       swap_out = bed->s->swap_reloca_out;
15552     }
15553   else
15554     abort ();
15555 
15556   erela_head = rel_hdr->contents;
15557   irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15558     ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15559 
15560   erela = erela_head;
15561   irela = irela_head;
15562   count = 0;
15563 
15564   for (p = o->map_head.link_order; p; p = p->next)
15565     {
15566       if (p->type == bfd_section_reloc_link_order
15567 	  || p->type == bfd_symbol_reloc_link_order)
15568 	{
15569 	  (*swap_in) (abfd, erela, irela);
15570 	  erela += rel_hdr->sh_entsize;
15571 	  irela++;
15572 	  count++;
15573 	}
15574       else if (p->type == bfd_indirect_link_order)
15575 	{
15576 	  struct bfd_elf_section_reloc_data *input_reldata;
15577 	  arm_unwind_table_edit *edit_list, *edit_tail;
15578 	  _arm_elf_section_data *eadi;
15579 	  bfd_size_type j;
15580 	  bfd_vma offset;
15581 	  asection *i;
15582 
15583 	  i = p->u.indirect.section;
15584 
15585 	  eadi = get_arm_elf_section_data (i);
15586 	  edit_list = eadi->u.exidx.unwind_edit_list;
15587 	  edit_tail = eadi->u.exidx.unwind_edit_tail;
15588 	  offset = o->vma + i->output_offset;
15589 
15590 	  if (eadi->elf.rel.hdr &&
15591 	      eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15592 	    input_reldata = &eadi->elf.rel;
15593 	  else if (eadi->elf.rela.hdr &&
15594 		   eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15595 	    input_reldata = &eadi->elf.rela;
15596 	  else
15597 	    abort ();
15598 
15599 	  if (edit_list)
15600 	    {
15601 	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15602 		{
15603 		  arm_unwind_table_edit *edit_node, *edit_next;
15604 		  bfd_vma bias;
15605 		  bfd_vma reloc_index;
15606 
15607 		  (*swap_in) (abfd, erela, irela);
15608 		  reloc_index = (irela->r_offset - offset) / 8;
15609 
15610 		  bias = 0;
15611 		  edit_node = edit_list;
15612 		  for (edit_next = edit_list;
15613 		       edit_next && edit_next->index <= reloc_index;
15614 		       edit_next = edit_node->next)
15615 		    {
15616 		      bias++;
15617 		      edit_node = edit_next;
15618 		    }
15619 
15620 		  if (edit_node->type != DELETE_EXIDX_ENTRY
15621 		      || edit_node->index != reloc_index)
15622 		    {
15623 		      irela->r_offset -= bias * 8;
15624 		      irela++;
15625 		      count++;
15626 		    }
15627 
15628 		  erela += rel_hdr->sh_entsize;
15629 		}
15630 
15631 	      if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15632 		{
15633 		  /* New relocation entity.  */
15634 		  asection *text_sec = edit_tail->linked_section;
15635 		  asection *text_out = text_sec->output_section;
15636 		  bfd_vma exidx_offset = offset + i->size - 8;
15637 
15638 		  irela->r_addend = 0;
15639 		  irela->r_offset = exidx_offset;
15640 		  irela->r_info = ELF32_R_INFO
15641 		    (text_out->target_index, R_ARM_PREL31);
15642 		  irela++;
15643 		  count++;
15644 		}
15645 	    }
15646 	  else
15647 	    {
15648 	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15649 		{
15650 		  (*swap_in) (abfd, erela, irela);
15651 		  erela += rel_hdr->sh_entsize;
15652 		  irela++;
15653 		}
15654 
15655 	      count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15656 	    }
15657 	}
15658     }
15659 
15660   reldata->count = count;
15661   rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15662 
15663   erela = erela_head;
15664   irela = irela_head;
15665   while (count > 0)
15666     {
15667       (*swap_out) (abfd, irela, erela);
15668       erela += rel_hdr->sh_entsize;
15669       irela++;
15670       count--;
15671     }
15672 
15673   free (irela_head);
15674 
15675   /* Hashes are no longer valid.  */
15676   free (reldata->hashes);
15677   reldata->hashes = NULL;
15678 }
15679 
15680 /* Unwinding tables are not referenced directly.  This pass marks them as
15681    required if the corresponding code section is marked.  Similarly, ARMv8-M
15682    secure entry functions can only be referenced by SG veneers which are
15683    created after the GC process. They need to be marked in case they reside in
15684    their own section (as would be the case if code was compiled with
15685    -ffunction-sections).  */
15686 
15687 static bfd_boolean
15688 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15689 				  elf_gc_mark_hook_fn gc_mark_hook)
15690 {
15691   bfd *sub;
15692   Elf_Internal_Shdr **elf_shdrp;
15693   asection *cmse_sec;
15694   obj_attribute *out_attr;
15695   Elf_Internal_Shdr *symtab_hdr;
15696   unsigned i, sym_count, ext_start;
15697   const struct elf_backend_data *bed;
15698   struct elf_link_hash_entry **sym_hashes;
15699   struct elf32_arm_link_hash_entry *cmse_hash;
15700   bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15701 
15702   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15703 
15704   out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15705   is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15706 	   && out_attr[Tag_CPU_arch_profile].i == 'M';
15707 
15708   /* Marking EH data may cause additional code sections to be marked,
15709      requiring multiple passes.  */
15710   again = TRUE;
15711   while (again)
15712     {
15713       again = FALSE;
15714       for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15715 	{
15716 	  asection *o;
15717 
15718 	  if (! is_arm_elf (sub))
15719 	    continue;
15720 
15721 	  elf_shdrp = elf_elfsections (sub);
15722 	  for (o = sub->sections; o != NULL; o = o->next)
15723 	    {
15724 	      Elf_Internal_Shdr *hdr;
15725 
15726 	      hdr = &elf_section_data (o)->this_hdr;
15727 	      if (hdr->sh_type == SHT_ARM_EXIDX
15728 		  && hdr->sh_link
15729 		  && hdr->sh_link < elf_numsections (sub)
15730 		  && !o->gc_mark
15731 		  && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15732 		{
15733 		  again = TRUE;
15734 		  if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15735 		    return FALSE;
15736 		}
15737 	    }
15738 
15739 	  /* Mark section holding ARMv8-M secure entry functions.  We mark all
15740 	     of them so no need for a second browsing.  */
15741 	  if (is_v8m && first_bfd_browse)
15742 	    {
15743 	      sym_hashes = elf_sym_hashes (sub);
15744 	      bed = get_elf_backend_data (sub);
15745 	      symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15746 	      sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15747 	      ext_start = symtab_hdr->sh_info;
15748 
15749 	      /* Scan symbols.  */
15750 	      for (i = ext_start; i < sym_count; i++)
15751 		{
15752 		  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15753 
15754 		  /* Assume it is a special symbol.  If not, cmse_scan will
15755 		     warn about it and user can do something about it.  */
15756 		  if (ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
15757 		    {
15758 		      cmse_sec = cmse_hash->root.root.u.def.section;
15759 		      if (!cmse_sec->gc_mark
15760 			  && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
15761 			return FALSE;
15762 		    }
15763 		}
15764 	    }
15765 	}
15766       first_bfd_browse = FALSE;
15767     }
15768 
15769   return TRUE;
15770 }
15771 
15772 /* Treat mapping symbols as special target symbols.  */
15773 
15774 static bfd_boolean
15775 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
15776 {
15777   return bfd_is_arm_special_symbol_name (sym->name,
15778 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
15779 }
15780 
15781 /* This is a copy of elf_find_function() from elf.c except that
15782    ARM mapping symbols are ignored when looking for function names
15783    and STT_ARM_TFUNC is considered to a function type.  */
15784 
15785 static bfd_boolean
15786 arm_elf_find_function (bfd *	     abfd ATTRIBUTE_UNUSED,
15787 		       asymbol **    symbols,
15788 		       asection *    section,
15789 		       bfd_vma	     offset,
15790 		       const char ** filename_ptr,
15791 		       const char ** functionname_ptr)
15792 {
15793   const char * filename = NULL;
15794   asymbol * func = NULL;
15795   bfd_vma low_func = 0;
15796   asymbol ** p;
15797 
15798   for (p = symbols; *p != NULL; p++)
15799     {
15800       elf_symbol_type *q;
15801 
15802       q = (elf_symbol_type *) *p;
15803 
15804       switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
15805 	{
15806 	default:
15807 	  break;
15808 	case STT_FILE:
15809 	  filename = bfd_asymbol_name (&q->symbol);
15810 	  break;
15811 	case STT_FUNC:
15812 	case STT_ARM_TFUNC:
15813 	case STT_NOTYPE:
15814 	  /* Skip mapping symbols.  */
15815 	  if ((q->symbol.flags & BSF_LOCAL)
15816 	      && bfd_is_arm_special_symbol_name (q->symbol.name,
15817 		    BFD_ARM_SPECIAL_SYM_TYPE_ANY))
15818 	    continue;
15819 	  /* Fall through.  */
15820 	  if (bfd_get_section (&q->symbol) == section
15821 	      && q->symbol.value >= low_func
15822 	      && q->symbol.value <= offset)
15823 	    {
15824 	      func = (asymbol *) q;
15825 	      low_func = q->symbol.value;
15826 	    }
15827 	  break;
15828 	}
15829     }
15830 
15831   if (func == NULL)
15832     return FALSE;
15833 
15834   if (filename_ptr)
15835     *filename_ptr = filename;
15836   if (functionname_ptr)
15837     *functionname_ptr = bfd_asymbol_name (func);
15838 
15839   return TRUE;
15840 }
15841 
15842 
15843 /* Find the nearest line to a particular section and offset, for error
15844    reporting.   This code is a duplicate of the code in elf.c, except
15845    that it uses arm_elf_find_function.  */
15846 
15847 static bfd_boolean
15848 elf32_arm_find_nearest_line (bfd *	    abfd,
15849 			     asymbol **	    symbols,
15850 			     asection *	    section,
15851 			     bfd_vma	    offset,
15852 			     const char **  filename_ptr,
15853 			     const char **  functionname_ptr,
15854 			     unsigned int * line_ptr,
15855 			     unsigned int * discriminator_ptr)
15856 {
15857   bfd_boolean found = FALSE;
15858 
15859   if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
15860 				     filename_ptr, functionname_ptr,
15861 				     line_ptr, discriminator_ptr,
15862 				     dwarf_debug_sections, 0,
15863 				     & elf_tdata (abfd)->dwarf2_find_line_info))
15864     {
15865       if (!*functionname_ptr)
15866 	arm_elf_find_function (abfd, symbols, section, offset,
15867 			       *filename_ptr ? NULL : filename_ptr,
15868 			       functionname_ptr);
15869 
15870       return TRUE;
15871     }
15872 
15873   /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
15874      uses DWARF1.  */
15875 
15876   if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
15877 					     & found, filename_ptr,
15878 					     functionname_ptr, line_ptr,
15879 					     & elf_tdata (abfd)->line_info))
15880     return FALSE;
15881 
15882   if (found && (*functionname_ptr || *line_ptr))
15883     return TRUE;
15884 
15885   if (symbols == NULL)
15886     return FALSE;
15887 
15888   if (! arm_elf_find_function (abfd, symbols, section, offset,
15889 			       filename_ptr, functionname_ptr))
15890     return FALSE;
15891 
15892   *line_ptr = 0;
15893   return TRUE;
15894 }
15895 
15896 static bfd_boolean
15897 elf32_arm_find_inliner_info (bfd *	    abfd,
15898 			     const char **  filename_ptr,
15899 			     const char **  functionname_ptr,
15900 			     unsigned int * line_ptr)
15901 {
15902   bfd_boolean found;
15903   found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
15904 					 functionname_ptr, line_ptr,
15905 					 & elf_tdata (abfd)->dwarf2_find_line_info);
15906   return found;
15907 }
15908 
15909 /* Find dynamic relocs for H that apply to read-only sections.  */
15910 
15911 static asection *
15912 readonly_dynrelocs (struct elf_link_hash_entry *h)
15913 {
15914   struct elf_dyn_relocs *p;
15915 
15916   for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
15917     {
15918       asection *s = p->sec->output_section;
15919 
15920       if (s != NULL && (s->flags & SEC_READONLY) != 0)
15921 	return p->sec;
15922     }
15923   return NULL;
15924 }
15925 
15926 /* Adjust a symbol defined by a dynamic object and referenced by a
15927    regular object.  The current definition is in some section of the
15928    dynamic object, but we're not including those sections.  We have to
15929    change the definition to something the rest of the link can
15930    understand.  */
15931 
15932 static bfd_boolean
15933 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
15934 				 struct elf_link_hash_entry * h)
15935 {
15936   bfd * dynobj;
15937   asection *s, *srel;
15938   struct elf32_arm_link_hash_entry * eh;
15939   struct elf32_arm_link_hash_table *globals;
15940 
15941   globals = elf32_arm_hash_table (info);
15942   if (globals == NULL)
15943     return FALSE;
15944 
15945   dynobj = elf_hash_table (info)->dynobj;
15946 
15947   /* Make sure we know what is going on here.  */
15948   BFD_ASSERT (dynobj != NULL
15949 	      && (h->needs_plt
15950 		  || h->type == STT_GNU_IFUNC
15951 		  || h->is_weakalias
15952 		  || (h->def_dynamic
15953 		      && h->ref_regular
15954 		      && !h->def_regular)));
15955 
15956   eh = (struct elf32_arm_link_hash_entry *) h;
15957 
15958   /* If this is a function, put it in the procedure linkage table.  We
15959      will fill in the contents of the procedure linkage table later,
15960      when we know the address of the .got section.  */
15961   if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
15962     {
15963       /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
15964 	 symbol binds locally.  */
15965       if (h->plt.refcount <= 0
15966 	  || (h->type != STT_GNU_IFUNC
15967 	      && (SYMBOL_CALLS_LOCAL (info, h)
15968 		  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
15969 		      && h->root.type == bfd_link_hash_undefweak))))
15970 	{
15971 	  /* This case can occur if we saw a PLT32 reloc in an input
15972 	     file, but the symbol was never referred to by a dynamic
15973 	     object, or if all references were garbage collected.  In
15974 	     such a case, we don't actually need to build a procedure
15975 	     linkage table, and we can just do a PC24 reloc instead.  */
15976 	  h->plt.offset = (bfd_vma) -1;
15977 	  eh->plt.thumb_refcount = 0;
15978 	  eh->plt.maybe_thumb_refcount = 0;
15979 	  eh->plt.noncall_refcount = 0;
15980 	  h->needs_plt = 0;
15981 	}
15982 
15983       return TRUE;
15984     }
15985   else
15986     {
15987       /* It's possible that we incorrectly decided a .plt reloc was
15988 	 needed for an R_ARM_PC24 or similar reloc to a non-function sym
15989 	 in check_relocs.  We can't decide accurately between function
15990 	 and non-function syms in check-relocs; Objects loaded later in
15991 	 the link may change h->type.  So fix it now.  */
15992       h->plt.offset = (bfd_vma) -1;
15993       eh->plt.thumb_refcount = 0;
15994       eh->plt.maybe_thumb_refcount = 0;
15995       eh->plt.noncall_refcount = 0;
15996     }
15997 
15998   /* If this is a weak symbol, and there is a real definition, the
15999      processor independent code will have arranged for us to see the
16000      real definition first, and we can just use the same value.  */
16001   if (h->is_weakalias)
16002     {
16003       struct elf_link_hash_entry *def = weakdef (h);
16004       BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16005       h->root.u.def.section = def->root.u.def.section;
16006       h->root.u.def.value = def->root.u.def.value;
16007       return TRUE;
16008     }
16009 
16010   /* If there are no non-GOT references, we do not need a copy
16011      relocation.  */
16012   if (!h->non_got_ref)
16013     return TRUE;
16014 
16015   /* This is a reference to a symbol defined by a dynamic object which
16016      is not a function.  */
16017 
16018   /* If we are creating a shared library, we must presume that the
16019      only references to the symbol are via the global offset table.
16020      For such cases we need not do anything here; the relocations will
16021      be handled correctly by relocate_section.  Relocatable executables
16022      can reference data in shared objects directly, so we don't need to
16023      do anything here.  */
16024   if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16025     return TRUE;
16026 
16027   /* We must allocate the symbol in our .dynbss section, which will
16028      become part of the .bss section of the executable.  There will be
16029      an entry for this symbol in the .dynsym section.  The dynamic
16030      object will contain position independent code, so all references
16031      from the dynamic object to this symbol will go through the global
16032      offset table.  The dynamic linker will use the .dynsym entry to
16033      determine the address it must put in the global offset table, so
16034      both the dynamic object and the regular object will refer to the
16035      same memory location for the variable.  */
16036   /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16037      linker to copy the initial value out of the dynamic object and into
16038      the runtime process image.  We need to remember the offset into the
16039      .rel(a).bss section we are going to use.  */
16040   if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16041     {
16042       s = globals->root.sdynrelro;
16043       srel = globals->root.sreldynrelro;
16044     }
16045   else
16046     {
16047       s = globals->root.sdynbss;
16048       srel = globals->root.srelbss;
16049     }
16050   if (info->nocopyreloc == 0
16051       && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16052       && h->size != 0)
16053     {
16054       elf32_arm_allocate_dynrelocs (info, srel, 1);
16055       h->needs_copy = 1;
16056     }
16057 
16058   return _bfd_elf_adjust_dynamic_copy (info, h, s);
16059 }
16060 
16061 /* Allocate space in .plt, .got and associated reloc sections for
16062    dynamic relocs.  */
16063 
16064 static bfd_boolean
16065 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16066 {
16067   struct bfd_link_info *info;
16068   struct elf32_arm_link_hash_table *htab;
16069   struct elf32_arm_link_hash_entry *eh;
16070   struct elf_dyn_relocs *p;
16071 
16072   if (h->root.type == bfd_link_hash_indirect)
16073     return TRUE;
16074 
16075   eh = (struct elf32_arm_link_hash_entry *) h;
16076 
16077   info = (struct bfd_link_info *) inf;
16078   htab = elf32_arm_hash_table (info);
16079   if (htab == NULL)
16080     return FALSE;
16081 
16082   if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16083       && h->plt.refcount > 0)
16084     {
16085       /* Make sure this symbol is output as a dynamic symbol.
16086 	 Undefined weak syms won't yet be marked as dynamic.  */
16087       if (h->dynindx == -1 && !h->forced_local
16088 	  && h->root.type == bfd_link_hash_undefweak)
16089 	{
16090 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16091 	    return FALSE;
16092 	}
16093 
16094       /* If the call in the PLT entry binds locally, the associated
16095 	 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16096 	 the usual R_ARM_JUMP_SLOT.  Put it in the .iplt section rather
16097 	 than the .plt section.  */
16098       if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16099 	{
16100 	  eh->is_iplt = 1;
16101 	  if (eh->plt.noncall_refcount == 0
16102 	      && SYMBOL_REFERENCES_LOCAL (info, h))
16103 	    /* All non-call references can be resolved directly.
16104 	       This means that they can (and in some cases, must)
16105 	       resolve directly to the run-time target, rather than
16106 	       to the PLT.  That in turns means that any .got entry
16107 	       would be equal to the .igot.plt entry, so there's
16108 	       no point having both.  */
16109 	    h->got.refcount = 0;
16110 	}
16111 
16112       if (bfd_link_pic (info)
16113 	  || eh->is_iplt
16114 	  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16115 	{
16116 	  elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16117 
16118 	  /* If this symbol is not defined in a regular file, and we are
16119 	     not generating a shared library, then set the symbol to this
16120 	     location in the .plt.  This is required to make function
16121 	     pointers compare as equal between the normal executable and
16122 	     the shared library.  */
16123 	  if (! bfd_link_pic (info)
16124 	      && !h->def_regular)
16125 	    {
16126 	      h->root.u.def.section = htab->root.splt;
16127 	      h->root.u.def.value = h->plt.offset;
16128 
16129 	      /* Make sure the function is not marked as Thumb, in case
16130 		 it is the target of an ABS32 relocation, which will
16131 		 point to the PLT entry.  */
16132 	      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16133 	    }
16134 
16135 	  /* VxWorks executables have a second set of relocations for
16136 	     each PLT entry.  They go in a separate relocation section,
16137 	     which is processed by the kernel loader.  */
16138 	  if (htab->vxworks_p && !bfd_link_pic (info))
16139 	    {
16140 	      /* There is a relocation for the initial PLT entry:
16141 		 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
16142 	      if (h->plt.offset == htab->plt_header_size)
16143 		elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16144 
16145 	      /* There are two extra relocations for each subsequent
16146 		 PLT entry: an R_ARM_32 relocation for the GOT entry,
16147 		 and an R_ARM_32 relocation for the PLT entry.  */
16148 	      elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16149 	    }
16150 	}
16151       else
16152 	{
16153 	  h->plt.offset = (bfd_vma) -1;
16154 	  h->needs_plt = 0;
16155 	}
16156     }
16157   else
16158     {
16159       h->plt.offset = (bfd_vma) -1;
16160       h->needs_plt = 0;
16161     }
16162 
16163   eh = (struct elf32_arm_link_hash_entry *) h;
16164   eh->tlsdesc_got = (bfd_vma) -1;
16165 
16166   if (h->got.refcount > 0)
16167     {
16168       asection *s;
16169       bfd_boolean dyn;
16170       int tls_type = elf32_arm_hash_entry (h)->tls_type;
16171       int indx;
16172 
16173       /* Make sure this symbol is output as a dynamic symbol.
16174 	 Undefined weak syms won't yet be marked as dynamic.  */
16175       if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16176 	  && h->root.type == bfd_link_hash_undefweak)
16177 	{
16178 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16179 	    return FALSE;
16180 	}
16181 
16182       if (!htab->symbian_p)
16183 	{
16184 	  s = htab->root.sgot;
16185 	  h->got.offset = s->size;
16186 
16187 	  if (tls_type == GOT_UNKNOWN)
16188 	    abort ();
16189 
16190 	  if (tls_type == GOT_NORMAL)
16191 	    /* Non-TLS symbols need one GOT slot.  */
16192 	    s->size += 4;
16193 	  else
16194 	    {
16195 	      if (tls_type & GOT_TLS_GDESC)
16196 		{
16197 		  /* R_ARM_TLS_DESC needs 2 GOT slots.  */
16198 		  eh->tlsdesc_got
16199 		    = (htab->root.sgotplt->size
16200 		       - elf32_arm_compute_jump_table_size (htab));
16201 		  htab->root.sgotplt->size += 8;
16202 		  h->got.offset = (bfd_vma) -2;
16203 		  /* plt.got_offset needs to know there's a TLS_DESC
16204 		     reloc in the middle of .got.plt.  */
16205 		  htab->num_tls_desc++;
16206 		}
16207 
16208 	      if (tls_type & GOT_TLS_GD)
16209 		{
16210 		  /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16211 		     consecutive GOT slots.  If the symbol is both GD
16212 		     and GDESC, got.offset may have been
16213 		     overwritten.  */
16214 		  h->got.offset = s->size;
16215 		  s->size += 8;
16216 		}
16217 
16218 	      if (tls_type & GOT_TLS_IE)
16219 		/* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16220 		   slot.  */
16221 		s->size += 4;
16222 	    }
16223 
16224 	  dyn = htab->root.dynamic_sections_created;
16225 
16226 	  indx = 0;
16227 	  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16228 					       bfd_link_pic (info),
16229 					       h)
16230 	      && (!bfd_link_pic (info)
16231 		  || !SYMBOL_REFERENCES_LOCAL (info, h)))
16232 	    indx = h->dynindx;
16233 
16234 	  if (tls_type != GOT_NORMAL
16235 	      && (bfd_link_pic (info) || indx != 0)
16236 	      && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16237 		  || h->root.type != bfd_link_hash_undefweak))
16238 	    {
16239 	      if (tls_type & GOT_TLS_IE)
16240 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16241 
16242 	      if (tls_type & GOT_TLS_GD)
16243 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16244 
16245 	      if (tls_type & GOT_TLS_GDESC)
16246 		{
16247 		  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16248 		  /* GDESC needs a trampoline to jump to.  */
16249 		  htab->tls_trampoline = -1;
16250 		}
16251 
16252 	      /* Only GD needs it.  GDESC just emits one relocation per
16253 		 2 entries.  */
16254 	      if ((tls_type & GOT_TLS_GD) && indx != 0)
16255 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16256 	    }
16257 	  else if (((indx != -1) || htab->fdpic_p)
16258 		   && !SYMBOL_REFERENCES_LOCAL (info, h))
16259 	    {
16260 	      if (htab->root.dynamic_sections_created)
16261 		/* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation.  */
16262 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16263 	    }
16264 	  else if (h->type == STT_GNU_IFUNC
16265 		   && eh->plt.noncall_refcount == 0)
16266 	    /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16267 	       they all resolve dynamically instead.  Reserve room for the
16268 	       GOT entry's R_ARM_IRELATIVE relocation.  */
16269 	    elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16270 	  else if (bfd_link_pic (info)
16271 		   && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16272 		       || h->root.type != bfd_link_hash_undefweak))
16273 	    /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
16274 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16275 	  else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16276 	    /* Reserve room for rofixup for FDPIC executable.  */
16277 	    /* TLS relocs do not need space since they are completely
16278 	       resolved.  */
16279 	    htab->srofixup->size += 4;
16280 	}
16281     }
16282   else
16283     h->got.offset = (bfd_vma) -1;
16284 
16285   /* FDPIC support.  */
16286   if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16287     {
16288       /* Symbol musn't be exported.  */
16289       if (h->dynindx != -1)
16290 	abort();
16291 
16292       /* We only allocate one function descriptor with its associated relocation.  */
16293       if (eh->fdpic_cnts.funcdesc_offset == -1)
16294 	{
16295 	  asection *s = htab->root.sgot;
16296 
16297 	  eh->fdpic_cnts.funcdesc_offset = s->size;
16298 	  s->size += 8;
16299 	  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16300 	  if (bfd_link_pic(info))
16301 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16302 	  else
16303 	    htab->srofixup->size += 8;
16304 	}
16305     }
16306 
16307   if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16308     {
16309       asection *s = htab->root.sgot;
16310 
16311       if (htab->root.dynamic_sections_created && h->dynindx == -1
16312 	  && !h->forced_local)
16313 	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16314 	  return FALSE;
16315 
16316       if (h->dynindx == -1)
16317 	{
16318 	  /* We only allocate one function descriptor with its associated relocation. q */
16319 	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16320 	    {
16321 
16322 	      eh->fdpic_cnts.funcdesc_offset = s->size;
16323 	      s->size += 8;
16324 	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16325 	      if (bfd_link_pic(info))
16326 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16327 	      else
16328 		htab->srofixup->size += 8;
16329 	    }
16330 	}
16331 
16332       /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16333 	 R_ARM_RELATIVE/rofixup relocation on it.  */
16334       eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16335       s->size += 4;
16336       if (h->dynindx == -1 && !bfd_link_pic(info))
16337         htab->srofixup->size += 4;
16338       else
16339         elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16340     }
16341 
16342   if (eh->fdpic_cnts.funcdesc_cnt > 0)
16343     {
16344       if (htab->root.dynamic_sections_created && h->dynindx == -1
16345 	  && !h->forced_local)
16346 	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16347 	  return FALSE;
16348 
16349       if (h->dynindx == -1)
16350 	{
16351 	  /* We only allocate one function descriptor with its associated relocation.  */
16352 	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16353 	    {
16354 	      asection *s = htab->root.sgot;
16355 
16356 	      eh->fdpic_cnts.funcdesc_offset = s->size;
16357 	      s->size += 8;
16358 	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16359 	      if (bfd_link_pic(info))
16360 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16361 	      else
16362 		htab->srofixup->size += 8;
16363 	    }
16364 	}
16365       if (h->dynindx == -1 && !bfd_link_pic(info))
16366 	{
16367 	  /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup.  */
16368 	  htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16369 	}
16370       else
16371 	{
16372 	  /* Will need one dynamic reloc per reference. will be either
16373 	     R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols.  */
16374 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16375 					eh->fdpic_cnts.funcdesc_cnt);
16376 	}
16377     }
16378 
16379   /* Allocate stubs for exported Thumb functions on v4t.  */
16380   if (!htab->use_blx && h->dynindx != -1
16381       && h->def_regular
16382       && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16383       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16384     {
16385       struct elf_link_hash_entry * th;
16386       struct bfd_link_hash_entry * bh;
16387       struct elf_link_hash_entry * myh;
16388       char name[1024];
16389       asection *s;
16390       bh = NULL;
16391       /* Create a new symbol to regist the real location of the function.  */
16392       s = h->root.u.def.section;
16393       sprintf (name, "__real_%s", h->root.root.string);
16394       _bfd_generic_link_add_one_symbol (info, s->owner,
16395 					name, BSF_GLOBAL, s,
16396 					h->root.u.def.value,
16397 					NULL, TRUE, FALSE, &bh);
16398 
16399       myh = (struct elf_link_hash_entry *) bh;
16400       myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16401       myh->forced_local = 1;
16402       ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16403       eh->export_glue = myh;
16404       th = record_arm_to_thumb_glue (info, h);
16405       /* Point the symbol at the stub.  */
16406       h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16407       ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16408       h->root.u.def.section = th->root.u.def.section;
16409       h->root.u.def.value = th->root.u.def.value & ~1;
16410     }
16411 
16412   if (eh->dyn_relocs == NULL)
16413     return TRUE;
16414 
16415   /* In the shared -Bsymbolic case, discard space allocated for
16416      dynamic pc-relative relocs against symbols which turn out to be
16417      defined in regular objects.  For the normal shared case, discard
16418      space for pc-relative relocs that have become local due to symbol
16419      visibility changes.  */
16420 
16421   if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16422     {
16423       /* Relocs that use pc_count are PC-relative forms, which will appear
16424 	 on something like ".long foo - ." or "movw REG, foo - .".  We want
16425 	 calls to protected symbols to resolve directly to the function
16426 	 rather than going via the plt.  If people want function pointer
16427 	 comparisons to work as expected then they should avoid writing
16428 	 assembly like ".long foo - .".  */
16429       if (SYMBOL_CALLS_LOCAL (info, h))
16430 	{
16431 	  struct elf_dyn_relocs **pp;
16432 
16433 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16434 	    {
16435 	      p->count -= p->pc_count;
16436 	      p->pc_count = 0;
16437 	      if (p->count == 0)
16438 		*pp = p->next;
16439 	      else
16440 		pp = &p->next;
16441 	    }
16442 	}
16443 
16444       if (htab->vxworks_p)
16445 	{
16446 	  struct elf_dyn_relocs **pp;
16447 
16448 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16449 	    {
16450 	      if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16451 		*pp = p->next;
16452 	      else
16453 		pp = &p->next;
16454 	    }
16455 	}
16456 
16457       /* Also discard relocs on undefined weak syms with non-default
16458 	 visibility.  */
16459       if (eh->dyn_relocs != NULL
16460 	  && h->root.type == bfd_link_hash_undefweak)
16461 	{
16462 	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16463 	      || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16464 	    eh->dyn_relocs = NULL;
16465 
16466 	  /* Make sure undefined weak symbols are output as a dynamic
16467 	     symbol in PIEs.  */
16468 	  else if (htab->root.dynamic_sections_created && h->dynindx == -1
16469 		   && !h->forced_local)
16470 	    {
16471 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16472 		return FALSE;
16473 	    }
16474 	}
16475 
16476       else if (htab->root.is_relocatable_executable && h->dynindx == -1
16477 	       && h->root.type == bfd_link_hash_new)
16478 	{
16479 	  /* Output absolute symbols so that we can create relocations
16480 	     against them.  For normal symbols we output a relocation
16481 	     against the section that contains them.  */
16482 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16483 	    return FALSE;
16484 	}
16485 
16486     }
16487   else
16488     {
16489       /* For the non-shared case, discard space for relocs against
16490 	 symbols which turn out to need copy relocs or are not
16491 	 dynamic.  */
16492 
16493       if (!h->non_got_ref
16494 	  && ((h->def_dynamic
16495 	       && !h->def_regular)
16496 	      || (htab->root.dynamic_sections_created
16497 		  && (h->root.type == bfd_link_hash_undefweak
16498 		      || h->root.type == bfd_link_hash_undefined))))
16499 	{
16500 	  /* Make sure this symbol is output as a dynamic symbol.
16501 	     Undefined weak syms won't yet be marked as dynamic.  */
16502 	  if (h->dynindx == -1 && !h->forced_local
16503 	      && h->root.type == bfd_link_hash_undefweak)
16504 	    {
16505 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16506 		return FALSE;
16507 	    }
16508 
16509 	  /* If that succeeded, we know we'll be keeping all the
16510 	     relocs.  */
16511 	  if (h->dynindx != -1)
16512 	    goto keep;
16513 	}
16514 
16515       eh->dyn_relocs = NULL;
16516 
16517     keep: ;
16518     }
16519 
16520   /* Finally, allocate space.  */
16521   for (p = eh->dyn_relocs; p != NULL; p = p->next)
16522     {
16523       asection *sreloc = elf_section_data (p->sec)->sreloc;
16524 
16525       if (h->type == STT_GNU_IFUNC
16526 	  && eh->plt.noncall_refcount == 0
16527 	  && SYMBOL_REFERENCES_LOCAL (info, h))
16528 	elf32_arm_allocate_irelocs (info, sreloc, p->count);
16529       else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16530 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16531       else if (htab->fdpic_p && !bfd_link_pic(info))
16532 	htab->srofixup->size += 4 * p->count;
16533       else
16534 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16535     }
16536 
16537   return TRUE;
16538 }
16539 
16540 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16541    read-only sections.  */
16542 
16543 static bfd_boolean
16544 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16545 {
16546   asection *sec;
16547 
16548   if (h->root.type == bfd_link_hash_indirect)
16549     return TRUE;
16550 
16551   sec = readonly_dynrelocs (h);
16552   if (sec != NULL)
16553     {
16554       struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16555 
16556       info->flags |= DF_TEXTREL;
16557       info->callbacks->minfo
16558 	(_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16559 	 sec->owner, h->root.root.string, sec);
16560 
16561       /* Not an error, just cut short the traversal.  */
16562       return FALSE;
16563     }
16564 
16565   return TRUE;
16566 }
16567 
16568 void
16569 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16570 				 int byteswap_code)
16571 {
16572   struct elf32_arm_link_hash_table *globals;
16573 
16574   globals = elf32_arm_hash_table (info);
16575   if (globals == NULL)
16576     return;
16577 
16578   globals->byteswap_code = byteswap_code;
16579 }
16580 
16581 /* Set the sizes of the dynamic sections.  */
16582 
16583 static bfd_boolean
16584 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16585 				 struct bfd_link_info * info)
16586 {
16587   bfd * dynobj;
16588   asection * s;
16589   bfd_boolean plt;
16590   bfd_boolean relocs;
16591   bfd *ibfd;
16592   struct elf32_arm_link_hash_table *htab;
16593 
16594   htab = elf32_arm_hash_table (info);
16595   if (htab == NULL)
16596     return FALSE;
16597 
16598   dynobj = elf_hash_table (info)->dynobj;
16599   BFD_ASSERT (dynobj != NULL);
16600   check_use_blx (htab);
16601 
16602   if (elf_hash_table (info)->dynamic_sections_created)
16603     {
16604       /* Set the contents of the .interp section to the interpreter.  */
16605       if (bfd_link_executable (info) && !info->nointerp)
16606 	{
16607 	  s = bfd_get_linker_section (dynobj, ".interp");
16608 	  BFD_ASSERT (s != NULL);
16609 	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16610 	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16611 	}
16612     }
16613 
16614   /* Set up .got offsets for local syms, and space for local dynamic
16615      relocs.  */
16616   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16617     {
16618       bfd_signed_vma *local_got;
16619       bfd_signed_vma *end_local_got;
16620       struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16621       char *local_tls_type;
16622       bfd_vma *local_tlsdesc_gotent;
16623       bfd_size_type locsymcount;
16624       Elf_Internal_Shdr *symtab_hdr;
16625       asection *srel;
16626       bfd_boolean is_vxworks = htab->vxworks_p;
16627       unsigned int symndx;
16628       struct fdpic_local *local_fdpic_cnts;
16629 
16630       if (! is_arm_elf (ibfd))
16631 	continue;
16632 
16633       for (s = ibfd->sections; s != NULL; s = s->next)
16634 	{
16635 	  struct elf_dyn_relocs *p;
16636 
16637 	  for (p = (struct elf_dyn_relocs *)
16638 		   elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16639 	    {
16640 	      if (!bfd_is_abs_section (p->sec)
16641 		  && bfd_is_abs_section (p->sec->output_section))
16642 		{
16643 		  /* Input section has been discarded, either because
16644 		     it is a copy of a linkonce section or due to
16645 		     linker script /DISCARD/, so we'll be discarding
16646 		     the relocs too.  */
16647 		}
16648 	      else if (is_vxworks
16649 		       && strcmp (p->sec->output_section->name,
16650 				  ".tls_vars") == 0)
16651 		{
16652 		  /* Relocations in vxworks .tls_vars sections are
16653 		     handled specially by the loader.  */
16654 		}
16655 	      else if (p->count != 0)
16656 		{
16657 		  srel = elf_section_data (p->sec)->sreloc;
16658 		  if (htab->fdpic_p && !bfd_link_pic(info))
16659 		    htab->srofixup->size += 4 * p->count;
16660 		  else
16661 		    elf32_arm_allocate_dynrelocs (info, srel, p->count);
16662 		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16663 		    info->flags |= DF_TEXTREL;
16664 		}
16665 	    }
16666 	}
16667 
16668       local_got = elf_local_got_refcounts (ibfd);
16669       if (!local_got)
16670 	continue;
16671 
16672       symtab_hdr = & elf_symtab_hdr (ibfd);
16673       locsymcount = symtab_hdr->sh_info;
16674       end_local_got = local_got + locsymcount;
16675       local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16676       local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16677       local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16678       local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16679       symndx = 0;
16680       s = htab->root.sgot;
16681       srel = htab->root.srelgot;
16682       for (; local_got < end_local_got;
16683 	   ++local_got, ++local_iplt_ptr, ++local_tls_type,
16684 	   ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16685 	{
16686 	  *local_tlsdesc_gotent = (bfd_vma) -1;
16687 	  local_iplt = *local_iplt_ptr;
16688 
16689 	  /* FDPIC support.  */
16690 	  if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16691 	    {
16692 	      if (local_fdpic_cnts->funcdesc_offset == -1)
16693 		{
16694 		  local_fdpic_cnts->funcdesc_offset = s->size;
16695 		  s->size += 8;
16696 
16697 		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16698 		  if (bfd_link_pic(info))
16699 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16700 		  else
16701 		    htab->srofixup->size += 8;
16702 		}
16703 	    }
16704 
16705 	  if (local_fdpic_cnts->funcdesc_cnt > 0)
16706 	    {
16707 	      if (local_fdpic_cnts->funcdesc_offset == -1)
16708 		{
16709 		  local_fdpic_cnts->funcdesc_offset = s->size;
16710 		  s->size += 8;
16711 
16712 		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16713 		  if (bfd_link_pic(info))
16714 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16715 		  else
16716 		    htab->srofixup->size += 8;
16717 		}
16718 
16719 	      /* We will add n R_ARM_RELATIVE relocations or n rofixups.  */
16720 	      if (bfd_link_pic(info))
16721 		elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16722 	      else
16723 		htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16724 	    }
16725 
16726 	  if (local_iplt != NULL)
16727 	    {
16728 	      struct elf_dyn_relocs *p;
16729 
16730 	      if (local_iplt->root.refcount > 0)
16731 		{
16732 		  elf32_arm_allocate_plt_entry (info, TRUE,
16733 						&local_iplt->root,
16734 						&local_iplt->arm);
16735 		  if (local_iplt->arm.noncall_refcount == 0)
16736 		    /* All references to the PLT are calls, so all
16737 		       non-call references can resolve directly to the
16738 		       run-time target.  This means that the .got entry
16739 		       would be the same as the .igot.plt entry, so there's
16740 		       no point creating both.  */
16741 		    *local_got = 0;
16742 		}
16743 	      else
16744 		{
16745 		  BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16746 		  local_iplt->root.offset = (bfd_vma) -1;
16747 		}
16748 
16749 	      for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16750 		{
16751 		  asection *psrel;
16752 
16753 		  psrel = elf_section_data (p->sec)->sreloc;
16754 		  if (local_iplt->arm.noncall_refcount == 0)
16755 		    elf32_arm_allocate_irelocs (info, psrel, p->count);
16756 		  else
16757 		    elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16758 		}
16759 	    }
16760 	  if (*local_got > 0)
16761 	    {
16762 	      Elf_Internal_Sym *isym;
16763 
16764 	      *local_got = s->size;
16765 	      if (*local_tls_type & GOT_TLS_GD)
16766 		/* TLS_GD relocs need an 8-byte structure in the GOT.  */
16767 		s->size += 8;
16768 	      if (*local_tls_type & GOT_TLS_GDESC)
16769 		{
16770 		  *local_tlsdesc_gotent = htab->root.sgotplt->size
16771 		    - elf32_arm_compute_jump_table_size (htab);
16772 		  htab->root.sgotplt->size += 8;
16773 		  *local_got = (bfd_vma) -2;
16774 		  /* plt.got_offset needs to know there's a TLS_DESC
16775 		     reloc in the middle of .got.plt.  */
16776 		  htab->num_tls_desc++;
16777 		}
16778 	      if (*local_tls_type & GOT_TLS_IE)
16779 		s->size += 4;
16780 
16781 	      if (*local_tls_type & GOT_NORMAL)
16782 		{
16783 		  /* If the symbol is both GD and GDESC, *local_got
16784 		     may have been overwritten.  */
16785 		  *local_got = s->size;
16786 		  s->size += 4;
16787 		}
16788 
16789 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16790 	      if (isym == NULL)
16791 		return FALSE;
16792 
16793 	      /* If all references to an STT_GNU_IFUNC PLT are calls,
16794 		 then all non-call references, including this GOT entry,
16795 		 resolve directly to the run-time target.  */
16796 	      if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16797 		  && (local_iplt == NULL
16798 		      || local_iplt->arm.noncall_refcount == 0))
16799 		elf32_arm_allocate_irelocs (info, srel, 1);
16800 	      else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16801 		{
16802 		  if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16803 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16804 		  else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16805 		    htab->srofixup->size += 4;
16806 
16807 		  if ((bfd_link_pic (info) || htab->fdpic_p)
16808 		      && *local_tls_type & GOT_TLS_GDESC)
16809 		    {
16810 		      elf32_arm_allocate_dynrelocs (info,
16811 						    htab->root.srelplt, 1);
16812 		      htab->tls_trampoline = -1;
16813 		    }
16814 		}
16815 	    }
16816 	  else
16817 	    *local_got = (bfd_vma) -1;
16818 	}
16819     }
16820 
16821   if (htab->tls_ldm_got.refcount > 0)
16822     {
16823       /* Allocate two GOT entries and one dynamic relocation (if necessary)
16824 	 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
16825       htab->tls_ldm_got.offset = htab->root.sgot->size;
16826       htab->root.sgot->size += 8;
16827       if (bfd_link_pic (info))
16828 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16829     }
16830   else
16831     htab->tls_ldm_got.offset = -1;
16832 
16833   /* At the very end of the .rofixup section is a pointer to the GOT,
16834      reserve space for it. */
16835   if (htab->fdpic_p && htab->srofixup != NULL)
16836     htab->srofixup->size += 4;
16837 
16838   /* Allocate global sym .plt and .got entries, and space for global
16839      sym dynamic relocs.  */
16840   elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
16841 
16842   /* Here we rummage through the found bfds to collect glue information.  */
16843   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16844     {
16845       if (! is_arm_elf (ibfd))
16846 	continue;
16847 
16848       /* Initialise mapping tables for code/data.  */
16849       bfd_elf32_arm_init_maps (ibfd);
16850 
16851       if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
16852 	  || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
16853 	  || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
16854 	_bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
16855     }
16856 
16857   /* Allocate space for the glue sections now that we've sized them.  */
16858   bfd_elf32_arm_allocate_interworking_sections (info);
16859 
16860   /* For every jump slot reserved in the sgotplt, reloc_count is
16861      incremented.  However, when we reserve space for TLS descriptors,
16862      it's not incremented, so in order to compute the space reserved
16863      for them, it suffices to multiply the reloc count by the jump
16864      slot size.  */
16865   if (htab->root.srelplt)
16866     htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
16867 
16868   if (htab->tls_trampoline)
16869     {
16870       if (htab->root.splt->size == 0)
16871 	htab->root.splt->size += htab->plt_header_size;
16872 
16873       htab->tls_trampoline = htab->root.splt->size;
16874       htab->root.splt->size += htab->plt_entry_size;
16875 
16876       /* If we're not using lazy TLS relocations, don't generate the
16877 	 PLT and GOT entries they require.  */
16878       if (!(info->flags & DF_BIND_NOW))
16879 	{
16880 	  htab->dt_tlsdesc_got = htab->root.sgot->size;
16881 	  htab->root.sgot->size += 4;
16882 
16883 	  htab->dt_tlsdesc_plt = htab->root.splt->size;
16884 	  htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
16885 	}
16886     }
16887 
16888   /* The check_relocs and adjust_dynamic_symbol entry points have
16889      determined the sizes of the various dynamic sections.  Allocate
16890      memory for them.  */
16891   plt = FALSE;
16892   relocs = FALSE;
16893   for (s = dynobj->sections; s != NULL; s = s->next)
16894     {
16895       const char * name;
16896 
16897       if ((s->flags & SEC_LINKER_CREATED) == 0)
16898 	continue;
16899 
16900       /* It's OK to base decisions on the section name, because none
16901 	 of the dynobj section names depend upon the input files.  */
16902       name = bfd_get_section_name (dynobj, s);
16903 
16904       if (s == htab->root.splt)
16905 	{
16906 	  /* Remember whether there is a PLT.  */
16907 	  plt = s->size != 0;
16908 	}
16909       else if (CONST_STRNEQ (name, ".rel"))
16910 	{
16911 	  if (s->size != 0)
16912 	    {
16913 	      /* Remember whether there are any reloc sections other
16914 		 than .rel(a).plt and .rela.plt.unloaded.  */
16915 	      if (s != htab->root.srelplt && s != htab->srelplt2)
16916 		relocs = TRUE;
16917 
16918 	      /* We use the reloc_count field as a counter if we need
16919 		 to copy relocs into the output file.  */
16920 	      s->reloc_count = 0;
16921 	    }
16922 	}
16923       else if (s != htab->root.sgot
16924 	       && s != htab->root.sgotplt
16925 	       && s != htab->root.iplt
16926 	       && s != htab->root.igotplt
16927 	       && s != htab->root.sdynbss
16928 	       && s != htab->root.sdynrelro
16929 	       && s != htab->srofixup)
16930 	{
16931 	  /* It's not one of our sections, so don't allocate space.  */
16932 	  continue;
16933 	}
16934 
16935       if (s->size == 0)
16936 	{
16937 	  /* If we don't need this section, strip it from the
16938 	     output file.  This is mostly to handle .rel(a).bss and
16939 	     .rel(a).plt.  We must create both sections in
16940 	     create_dynamic_sections, because they must be created
16941 	     before the linker maps input sections to output
16942 	     sections.  The linker does that before
16943 	     adjust_dynamic_symbol is called, and it is that
16944 	     function which decides whether anything needs to go
16945 	     into these sections.  */
16946 	  s->flags |= SEC_EXCLUDE;
16947 	  continue;
16948 	}
16949 
16950       if ((s->flags & SEC_HAS_CONTENTS) == 0)
16951 	continue;
16952 
16953       /* Allocate memory for the section contents.  */
16954       s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
16955       if (s->contents == NULL)
16956 	return FALSE;
16957     }
16958 
16959   if (elf_hash_table (info)->dynamic_sections_created)
16960     {
16961       /* Add some entries to the .dynamic section.  We fill in the
16962 	 values later, in elf32_arm_finish_dynamic_sections, but we
16963 	 must add the entries now so that we get the correct size for
16964 	 the .dynamic section.  The DT_DEBUG entry is filled in by the
16965 	 dynamic linker and used by the debugger.  */
16966 #define add_dynamic_entry(TAG, VAL) \
16967   _bfd_elf_add_dynamic_entry (info, TAG, VAL)
16968 
16969      if (bfd_link_executable (info))
16970 	{
16971 	  if (!add_dynamic_entry (DT_DEBUG, 0))
16972 	    return FALSE;
16973 	}
16974 
16975       if (plt)
16976 	{
16977 	  if (   !add_dynamic_entry (DT_PLTGOT, 0)
16978 	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
16979 	      || !add_dynamic_entry (DT_PLTREL,
16980 				     htab->use_rel ? DT_REL : DT_RELA)
16981 	      || !add_dynamic_entry (DT_JMPREL, 0))
16982 	    return FALSE;
16983 
16984 	  if (htab->dt_tlsdesc_plt
16985 	      && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
16986 		  || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
16987 	    return FALSE;
16988 	}
16989 
16990       if (relocs)
16991 	{
16992 	  if (htab->use_rel)
16993 	    {
16994 	      if (!add_dynamic_entry (DT_REL, 0)
16995 		  || !add_dynamic_entry (DT_RELSZ, 0)
16996 		  || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
16997 		return FALSE;
16998 	    }
16999 	  else
17000 	    {
17001 	      if (!add_dynamic_entry (DT_RELA, 0)
17002 		  || !add_dynamic_entry (DT_RELASZ, 0)
17003 		  || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17004 		return FALSE;
17005 	    }
17006 	}
17007 
17008       /* If any dynamic relocs apply to a read-only section,
17009 	 then we need a DT_TEXTREL entry.  */
17010       if ((info->flags & DF_TEXTREL) == 0)
17011 	elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17012 
17013       if ((info->flags & DF_TEXTREL) != 0)
17014 	{
17015 	  if (!add_dynamic_entry (DT_TEXTREL, 0))
17016 	    return FALSE;
17017 	}
17018       if (htab->vxworks_p
17019 	  && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17020 	return FALSE;
17021     }
17022 #undef add_dynamic_entry
17023 
17024   return TRUE;
17025 }
17026 
17027 /* Size sections even though they're not dynamic.  We use it to setup
17028    _TLS_MODULE_BASE_, if needed.  */
17029 
17030 static bfd_boolean
17031 elf32_arm_always_size_sections (bfd *output_bfd,
17032 				struct bfd_link_info *info)
17033 {
17034   asection *tls_sec;
17035   struct elf32_arm_link_hash_table *htab;
17036 
17037   htab = elf32_arm_hash_table (info);
17038 
17039   if (bfd_link_relocatable (info))
17040     return TRUE;
17041 
17042   tls_sec = elf_hash_table (info)->tls_sec;
17043 
17044   if (tls_sec)
17045     {
17046       struct elf_link_hash_entry *tlsbase;
17047 
17048       tlsbase = elf_link_hash_lookup
17049 	(elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17050 
17051       if (tlsbase)
17052 	{
17053 	  struct bfd_link_hash_entry *bh = NULL;
17054 	  const struct elf_backend_data *bed
17055 	    = get_elf_backend_data (output_bfd);
17056 
17057 	  if (!(_bfd_generic_link_add_one_symbol
17058 		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17059 		 tls_sec, 0, NULL, FALSE,
17060 		 bed->collect, &bh)))
17061 	    return FALSE;
17062 
17063 	  tlsbase->type = STT_TLS;
17064 	  tlsbase = (struct elf_link_hash_entry *)bh;
17065 	  tlsbase->def_regular = 1;
17066 	  tlsbase->other = STV_HIDDEN;
17067 	  (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17068 	}
17069     }
17070 
17071   if (htab->fdpic_p && !bfd_link_relocatable (info)
17072       && !bfd_elf_stack_segment_size (output_bfd, info,
17073 				      "__stacksize", DEFAULT_STACK_SIZE))
17074     return FALSE;
17075 
17076   return TRUE;
17077 }
17078 
17079 /* Finish up dynamic symbol handling.  We set the contents of various
17080    dynamic sections here.  */
17081 
17082 static bfd_boolean
17083 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17084 				 struct bfd_link_info * info,
17085 				 struct elf_link_hash_entry * h,
17086 				 Elf_Internal_Sym * sym)
17087 {
17088   struct elf32_arm_link_hash_table *htab;
17089   struct elf32_arm_link_hash_entry *eh;
17090 
17091   htab = elf32_arm_hash_table (info);
17092   if (htab == NULL)
17093     return FALSE;
17094 
17095   eh = (struct elf32_arm_link_hash_entry *) h;
17096 
17097   if (h->plt.offset != (bfd_vma) -1)
17098     {
17099       if (!eh->is_iplt)
17100 	{
17101 	  BFD_ASSERT (h->dynindx != -1);
17102 	  if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17103 					      h->dynindx, 0))
17104 	    return FALSE;
17105 	}
17106 
17107       if (!h->def_regular)
17108 	{
17109 	  /* Mark the symbol as undefined, rather than as defined in
17110 	     the .plt section.  */
17111 	  sym->st_shndx = SHN_UNDEF;
17112 	  /* If the symbol is weak we need to clear the value.
17113 	     Otherwise, the PLT entry would provide a definition for
17114 	     the symbol even if the symbol wasn't defined anywhere,
17115 	     and so the symbol would never be NULL.  Leave the value if
17116 	     there were any relocations where pointer equality matters
17117 	     (this is a clue for the dynamic linker, to make function
17118 	     pointer comparisons work between an application and shared
17119 	     library).  */
17120 	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17121 	    sym->st_value = 0;
17122 	}
17123       else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17124 	{
17125 	  /* At least one non-call relocation references this .iplt entry,
17126 	     so the .iplt entry is the function's canonical address.  */
17127 	  sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17128 	  ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17129 	  sym->st_shndx = (_bfd_elf_section_from_bfd_section
17130 			   (output_bfd, htab->root.iplt->output_section));
17131 	  sym->st_value = (h->plt.offset
17132 			   + htab->root.iplt->output_section->vma
17133 			   + htab->root.iplt->output_offset);
17134 	}
17135     }
17136 
17137   if (h->needs_copy)
17138     {
17139       asection * s;
17140       Elf_Internal_Rela rel;
17141 
17142       /* This symbol needs a copy reloc.  Set it up.  */
17143       BFD_ASSERT (h->dynindx != -1
17144 		  && (h->root.type == bfd_link_hash_defined
17145 		      || h->root.type == bfd_link_hash_defweak));
17146 
17147       rel.r_addend = 0;
17148       rel.r_offset = (h->root.u.def.value
17149 		      + h->root.u.def.section->output_section->vma
17150 		      + h->root.u.def.section->output_offset);
17151       rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17152       if (h->root.u.def.section == htab->root.sdynrelro)
17153 	s = htab->root.sreldynrelro;
17154       else
17155 	s = htab->root.srelbss;
17156       elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17157     }
17158 
17159   /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  On VxWorks,
17160      and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17161      it is relative to the ".got" section.  */
17162   if (h == htab->root.hdynamic
17163       || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17164     sym->st_shndx = SHN_ABS;
17165 
17166   return TRUE;
17167 }
17168 
17169 static void
17170 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17171 		    void *contents,
17172 		    const unsigned long *template, unsigned count)
17173 {
17174   unsigned ix;
17175 
17176   for (ix = 0; ix != count; ix++)
17177     {
17178       unsigned long insn = template[ix];
17179 
17180       /* Emit mov pc,rx if bx is not permitted.  */
17181       if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17182 	insn = (insn & 0xf000000f) | 0x01a0f000;
17183       put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17184     }
17185 }
17186 
17187 /* Install the special first PLT entry for elf32-arm-nacl.  Unlike
17188    other variants, NaCl needs this entry in a static executable's
17189    .iplt too.  When we're handling that case, GOT_DISPLACEMENT is
17190    zero.  For .iplt really only the last bundle is useful, and .iplt
17191    could have a shorter first entry, with each individual PLT entry's
17192    relative branch calculated differently so it targets the last
17193    bundle instead of the instruction before it (labelled .Lplt_tail
17194    above).  But it's simpler to keep the size and layout of PLT0
17195    consistent with the dynamic case, at the cost of some dead code at
17196    the start of .iplt and the one dead store to the stack at the start
17197    of .Lplt_tail.  */
17198 static void
17199 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17200 		   asection *plt, bfd_vma got_displacement)
17201 {
17202   unsigned int i;
17203 
17204   put_arm_insn (htab, output_bfd,
17205 		elf32_arm_nacl_plt0_entry[0]
17206 		| arm_movw_immediate (got_displacement),
17207 		plt->contents + 0);
17208   put_arm_insn (htab, output_bfd,
17209 		elf32_arm_nacl_plt0_entry[1]
17210 		| arm_movt_immediate (got_displacement),
17211 		plt->contents + 4);
17212 
17213   for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17214     put_arm_insn (htab, output_bfd,
17215 		  elf32_arm_nacl_plt0_entry[i],
17216 		  plt->contents + (i * 4));
17217 }
17218 
17219 /* Finish up the dynamic sections.  */
17220 
17221 static bfd_boolean
17222 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17223 {
17224   bfd * dynobj;
17225   asection * sgot;
17226   asection * sdyn;
17227   struct elf32_arm_link_hash_table *htab;
17228 
17229   htab = elf32_arm_hash_table (info);
17230   if (htab == NULL)
17231     return FALSE;
17232 
17233   dynobj = elf_hash_table (info)->dynobj;
17234 
17235   sgot = htab->root.sgotplt;
17236   /* A broken linker script might have discarded the dynamic sections.
17237      Catch this here so that we do not seg-fault later on.  */
17238   if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17239     return FALSE;
17240   sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17241 
17242   if (elf_hash_table (info)->dynamic_sections_created)
17243     {
17244       asection *splt;
17245       Elf32_External_Dyn *dyncon, *dynconend;
17246 
17247       splt = htab->root.splt;
17248       BFD_ASSERT (splt != NULL && sdyn != NULL);
17249       BFD_ASSERT (htab->symbian_p || sgot != NULL);
17250 
17251       dyncon = (Elf32_External_Dyn *) sdyn->contents;
17252       dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17253 
17254       for (; dyncon < dynconend; dyncon++)
17255 	{
17256 	  Elf_Internal_Dyn dyn;
17257 	  const char * name;
17258 	  asection * s;
17259 
17260 	  bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17261 
17262 	  switch (dyn.d_tag)
17263 	    {
17264 	      unsigned int type;
17265 
17266 	    default:
17267 	      if (htab->vxworks_p
17268 		  && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17269 		bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17270 	      break;
17271 
17272 	    case DT_HASH:
17273 	      name = ".hash";
17274 	      goto get_vma_if_bpabi;
17275 	    case DT_STRTAB:
17276 	      name = ".dynstr";
17277 	      goto get_vma_if_bpabi;
17278 	    case DT_SYMTAB:
17279 	      name = ".dynsym";
17280 	      goto get_vma_if_bpabi;
17281 	    case DT_VERSYM:
17282 	      name = ".gnu.version";
17283 	      goto get_vma_if_bpabi;
17284 	    case DT_VERDEF:
17285 	      name = ".gnu.version_d";
17286 	      goto get_vma_if_bpabi;
17287 	    case DT_VERNEED:
17288 	      name = ".gnu.version_r";
17289 	      goto get_vma_if_bpabi;
17290 
17291 	    case DT_PLTGOT:
17292 	      name = htab->symbian_p ? ".got" : ".got.plt";
17293 	      goto get_vma;
17294 	    case DT_JMPREL:
17295 	      name = RELOC_SECTION (htab, ".plt");
17296 	    get_vma:
17297 	      s = bfd_get_linker_section (dynobj, name);
17298 	      if (s == NULL)
17299 		{
17300 		  _bfd_error_handler
17301 		    (_("could not find section %s"), name);
17302 		  bfd_set_error (bfd_error_invalid_operation);
17303 		  return FALSE;
17304 		}
17305 	      if (!htab->symbian_p)
17306 		dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17307 	      else
17308 		/* In the BPABI, tags in the PT_DYNAMIC section point
17309 		   at the file offset, not the memory address, for the
17310 		   convenience of the post linker.  */
17311 		dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17312 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17313 	      break;
17314 
17315 	    get_vma_if_bpabi:
17316 	      if (htab->symbian_p)
17317 		goto get_vma;
17318 	      break;
17319 
17320 	    case DT_PLTRELSZ:
17321 	      s = htab->root.srelplt;
17322 	      BFD_ASSERT (s != NULL);
17323 	      dyn.d_un.d_val = s->size;
17324 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17325 	      break;
17326 
17327 	    case DT_RELSZ:
17328 	    case DT_RELASZ:
17329 	    case DT_REL:
17330 	    case DT_RELA:
17331 	      /* In the BPABI, the DT_REL tag must point at the file
17332 		 offset, not the VMA, of the first relocation
17333 		 section.  So, we use code similar to that in
17334 		 elflink.c, but do not check for SHF_ALLOC on the
17335 		 relocation section, since relocation sections are
17336 		 never allocated under the BPABI.  PLT relocs are also
17337 		 included.  */
17338 	      if (htab->symbian_p)
17339 		{
17340 		  unsigned int i;
17341 		  type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17342 			  ? SHT_REL : SHT_RELA);
17343 		  dyn.d_un.d_val = 0;
17344 		  for (i = 1; i < elf_numsections (output_bfd); i++)
17345 		    {
17346 		      Elf_Internal_Shdr *hdr
17347 			= elf_elfsections (output_bfd)[i];
17348 		      if (hdr->sh_type == type)
17349 			{
17350 			  if (dyn.d_tag == DT_RELSZ
17351 			      || dyn.d_tag == DT_RELASZ)
17352 			    dyn.d_un.d_val += hdr->sh_size;
17353 			  else if ((ufile_ptr) hdr->sh_offset
17354 				   <= dyn.d_un.d_val - 1)
17355 			    dyn.d_un.d_val = hdr->sh_offset;
17356 			}
17357 		    }
17358 		  bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17359 		}
17360 	      break;
17361 
17362 	    case DT_TLSDESC_PLT:
17363 	      s = htab->root.splt;
17364 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17365 				+ htab->dt_tlsdesc_plt);
17366 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17367 	      break;
17368 
17369 	    case DT_TLSDESC_GOT:
17370 	      s = htab->root.sgot;
17371 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17372 				+ htab->dt_tlsdesc_got);
17373 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17374 	      break;
17375 
17376 	      /* Set the bottom bit of DT_INIT/FINI if the
17377 		 corresponding function is Thumb.  */
17378 	    case DT_INIT:
17379 	      name = info->init_function;
17380 	      goto get_sym;
17381 	    case DT_FINI:
17382 	      name = info->fini_function;
17383 	    get_sym:
17384 	      /* If it wasn't set by elf_bfd_final_link
17385 		 then there is nothing to adjust.  */
17386 	      if (dyn.d_un.d_val != 0)
17387 		{
17388 		  struct elf_link_hash_entry * eh;
17389 
17390 		  eh = elf_link_hash_lookup (elf_hash_table (info), name,
17391 					     FALSE, FALSE, TRUE);
17392 		  if (eh != NULL
17393 		      && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17394 			 == ST_BRANCH_TO_THUMB)
17395 		    {
17396 		      dyn.d_un.d_val |= 1;
17397 		      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17398 		    }
17399 		}
17400 	      break;
17401 	    }
17402 	}
17403 
17404       /* Fill in the first entry in the procedure linkage table.  */
17405       if (splt->size > 0 && htab->plt_header_size)
17406 	{
17407 	  const bfd_vma *plt0_entry;
17408 	  bfd_vma got_address, plt_address, got_displacement;
17409 
17410 	  /* Calculate the addresses of the GOT and PLT.  */
17411 	  got_address = sgot->output_section->vma + sgot->output_offset;
17412 	  plt_address = splt->output_section->vma + splt->output_offset;
17413 
17414 	  if (htab->vxworks_p)
17415 	    {
17416 	      /* The VxWorks GOT is relocated by the dynamic linker.
17417 		 Therefore, we must emit relocations rather than simply
17418 		 computing the values now.  */
17419 	      Elf_Internal_Rela rel;
17420 
17421 	      plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17422 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17423 			    splt->contents + 0);
17424 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17425 			    splt->contents + 4);
17426 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17427 			    splt->contents + 8);
17428 	      bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17429 
17430 	      /* Generate a relocation for _GLOBAL_OFFSET_TABLE_.  */
17431 	      rel.r_offset = plt_address + 12;
17432 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17433 	      rel.r_addend = 0;
17434 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17435 				     htab->srelplt2->contents);
17436 	    }
17437 	  else if (htab->nacl_p)
17438 	    arm_nacl_put_plt0 (htab, output_bfd, splt,
17439 			       got_address + 8 - (plt_address + 16));
17440 	  else if (using_thumb_only (htab))
17441 	    {
17442 	      got_displacement = got_address - (plt_address + 12);
17443 
17444 	      plt0_entry = elf32_thumb2_plt0_entry;
17445 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17446 			    splt->contents + 0);
17447 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17448 			    splt->contents + 4);
17449 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17450 			    splt->contents + 8);
17451 
17452 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17453 	    }
17454 	  else
17455 	    {
17456 	      got_displacement = got_address - (plt_address + 16);
17457 
17458 	      plt0_entry = elf32_arm_plt0_entry;
17459 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17460 			    splt->contents + 0);
17461 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17462 			    splt->contents + 4);
17463 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17464 			    splt->contents + 8);
17465 	      put_arm_insn (htab, output_bfd, plt0_entry[3],
17466 			    splt->contents + 12);
17467 
17468 #ifdef FOUR_WORD_PLT
17469 	      /* The displacement value goes in the otherwise-unused
17470 		 last word of the second entry.  */
17471 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17472 #else
17473 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17474 #endif
17475 	    }
17476 	}
17477 
17478       /* UnixWare sets the entsize of .plt to 4, although that doesn't
17479 	 really seem like the right value.  */
17480       if (splt->output_section->owner == output_bfd)
17481 	elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17482 
17483       if (htab->dt_tlsdesc_plt)
17484 	{
17485 	  bfd_vma got_address
17486 	    = sgot->output_section->vma + sgot->output_offset;
17487 	  bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17488 				    + htab->root.sgot->output_offset);
17489 	  bfd_vma plt_address
17490 	    = splt->output_section->vma + splt->output_offset;
17491 
17492 	  arm_put_trampoline (htab, output_bfd,
17493 			      splt->contents + htab->dt_tlsdesc_plt,
17494 			      dl_tlsdesc_lazy_trampoline, 6);
17495 
17496 	  bfd_put_32 (output_bfd,
17497 		      gotplt_address + htab->dt_tlsdesc_got
17498 		      - (plt_address + htab->dt_tlsdesc_plt)
17499 		      - dl_tlsdesc_lazy_trampoline[6],
17500 		      splt->contents + htab->dt_tlsdesc_plt + 24);
17501 	  bfd_put_32 (output_bfd,
17502 		      got_address - (plt_address + htab->dt_tlsdesc_plt)
17503 		      - dl_tlsdesc_lazy_trampoline[7],
17504 		      splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17505 	}
17506 
17507       if (htab->tls_trampoline)
17508 	{
17509 	  arm_put_trampoline (htab, output_bfd,
17510 			      splt->contents + htab->tls_trampoline,
17511 			      tls_trampoline, 3);
17512 #ifdef FOUR_WORD_PLT
17513 	  bfd_put_32 (output_bfd, 0x00000000,
17514 		      splt->contents + htab->tls_trampoline + 12);
17515 #endif
17516 	}
17517 
17518       if (htab->vxworks_p
17519 	  && !bfd_link_pic (info)
17520 	  && htab->root.splt->size > 0)
17521 	{
17522 	  /* Correct the .rel(a).plt.unloaded relocations.  They will have
17523 	     incorrect symbol indexes.  */
17524 	  int num_plts;
17525 	  unsigned char *p;
17526 
17527 	  num_plts = ((htab->root.splt->size - htab->plt_header_size)
17528 		      / htab->plt_entry_size);
17529 	  p = htab->srelplt2->contents + RELOC_SIZE (htab);
17530 
17531 	  for (; num_plts; num_plts--)
17532 	    {
17533 	      Elf_Internal_Rela rel;
17534 
17535 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17536 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17537 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17538 	      p += RELOC_SIZE (htab);
17539 
17540 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17541 	      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17542 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17543 	      p += RELOC_SIZE (htab);
17544 	    }
17545 	}
17546     }
17547 
17548   if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17549     /* NaCl uses a special first entry in .iplt too.  */
17550     arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17551 
17552   /* Fill in the first three entries in the global offset table.  */
17553   if (sgot)
17554     {
17555       if (sgot->size > 0)
17556 	{
17557 	  if (sdyn == NULL)
17558 	    bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17559 	  else
17560 	    bfd_put_32 (output_bfd,
17561 			sdyn->output_section->vma + sdyn->output_offset,
17562 			sgot->contents);
17563 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17564 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17565 	}
17566 
17567       elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17568     }
17569 
17570   /* At the very end of the .rofixup section is a pointer to the GOT.  */
17571   if (htab->fdpic_p && htab->srofixup != NULL)
17572     {
17573       struct elf_link_hash_entry *hgot = htab->root.hgot;
17574 
17575       bfd_vma got_value = hgot->root.u.def.value
17576 	+ hgot->root.u.def.section->output_section->vma
17577 	+ hgot->root.u.def.section->output_offset;
17578 
17579       arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17580 
17581       /* Make sure we allocated and generated the same number of fixups.  */
17582       BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17583     }
17584 
17585   return TRUE;
17586 }
17587 
17588 static void
17589 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
17590 {
17591   Elf_Internal_Ehdr * i_ehdrp;	/* ELF file header, internal form.  */
17592   struct elf32_arm_link_hash_table *globals;
17593   struct elf_segment_map *m;
17594 
17595   i_ehdrp = elf_elfheader (abfd);
17596 
17597   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17598     i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17599   else
17600     _bfd_elf_post_process_headers (abfd, link_info);
17601   i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17602 
17603   if (link_info)
17604     {
17605       globals = elf32_arm_hash_table (link_info);
17606       if (globals != NULL && globals->byteswap_code)
17607 	i_ehdrp->e_flags |= EF_ARM_BE8;
17608 
17609       if (globals->fdpic_p)
17610 	i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17611     }
17612 
17613   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17614       && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17615     {
17616       int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17617       if (abi == AEABI_VFP_args_vfp)
17618 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17619       else
17620 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17621     }
17622 
17623   /* Scan segment to set p_flags attribute if it contains only sections with
17624      SHF_ARM_PURECODE flag.  */
17625   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17626     {
17627       unsigned int j;
17628 
17629       if (m->count == 0)
17630 	continue;
17631       for (j = 0; j < m->count; j++)
17632 	{
17633 	  if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17634 	    break;
17635 	}
17636       if (j == m->count)
17637 	{
17638 	  m->p_flags = PF_X;
17639 	  m->p_flags_valid = 1;
17640 	}
17641     }
17642 }
17643 
17644 static enum elf_reloc_type_class
17645 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17646 			    const asection *rel_sec ATTRIBUTE_UNUSED,
17647 			    const Elf_Internal_Rela *rela)
17648 {
17649   switch ((int) ELF32_R_TYPE (rela->r_info))
17650     {
17651     case R_ARM_RELATIVE:
17652       return reloc_class_relative;
17653     case R_ARM_JUMP_SLOT:
17654       return reloc_class_plt;
17655     case R_ARM_COPY:
17656       return reloc_class_copy;
17657     case R_ARM_IRELATIVE:
17658       return reloc_class_ifunc;
17659     default:
17660       return reloc_class_normal;
17661     }
17662 }
17663 
17664 static void
17665 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
17666 {
17667   bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17668 }
17669 
17670 /* Return TRUE if this is an unwinding table entry.  */
17671 
17672 static bfd_boolean
17673 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17674 {
17675   return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17676 	  || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17677 }
17678 
17679 
17680 /* Set the type and flags for an ARM section.  We do this by
17681    the section name, which is a hack, but ought to work.  */
17682 
17683 static bfd_boolean
17684 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17685 {
17686   const char * name;
17687 
17688   name = bfd_get_section_name (abfd, sec);
17689 
17690   if (is_arm_elf_unwind_section_name (abfd, name))
17691     {
17692       hdr->sh_type = SHT_ARM_EXIDX;
17693       hdr->sh_flags |= SHF_LINK_ORDER;
17694     }
17695 
17696   if (sec->flags & SEC_ELF_PURECODE)
17697     hdr->sh_flags |= SHF_ARM_PURECODE;
17698 
17699   return TRUE;
17700 }
17701 
17702 /* Handle an ARM specific section when reading an object file.  This is
17703    called when bfd_section_from_shdr finds a section with an unknown
17704    type.  */
17705 
17706 static bfd_boolean
17707 elf32_arm_section_from_shdr (bfd *abfd,
17708 			     Elf_Internal_Shdr * hdr,
17709 			     const char *name,
17710 			     int shindex)
17711 {
17712   /* There ought to be a place to keep ELF backend specific flags, but
17713      at the moment there isn't one.  We just keep track of the
17714      sections by their name, instead.  Fortunately, the ABI gives
17715      names for all the ARM specific sections, so we will probably get
17716      away with this.  */
17717   switch (hdr->sh_type)
17718     {
17719     case SHT_ARM_EXIDX:
17720     case SHT_ARM_PREEMPTMAP:
17721     case SHT_ARM_ATTRIBUTES:
17722       break;
17723 
17724     default:
17725       return FALSE;
17726     }
17727 
17728   if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17729     return FALSE;
17730 
17731   return TRUE;
17732 }
17733 
17734 static _arm_elf_section_data *
17735 get_arm_elf_section_data (asection * sec)
17736 {
17737   if (sec && sec->owner && is_arm_elf (sec->owner))
17738     return elf32_arm_section_data (sec);
17739   else
17740     return NULL;
17741 }
17742 
17743 typedef struct
17744 {
17745   void *flaginfo;
17746   struct bfd_link_info *info;
17747   asection *sec;
17748   int sec_shndx;
17749   int (*func) (void *, const char *, Elf_Internal_Sym *,
17750 	       asection *, struct elf_link_hash_entry *);
17751 } output_arch_syminfo;
17752 
17753 enum map_symbol_type
17754 {
17755   ARM_MAP_ARM,
17756   ARM_MAP_THUMB,
17757   ARM_MAP_DATA
17758 };
17759 
17760 
17761 /* Output a single mapping symbol.  */
17762 
17763 static bfd_boolean
17764 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17765 			  enum map_symbol_type type,
17766 			  bfd_vma offset)
17767 {
17768   static const char *names[3] = {"$a", "$t", "$d"};
17769   Elf_Internal_Sym sym;
17770 
17771   sym.st_value = osi->sec->output_section->vma
17772 		 + osi->sec->output_offset
17773 		 + offset;
17774   sym.st_size = 0;
17775   sym.st_other = 0;
17776   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17777   sym.st_shndx = osi->sec_shndx;
17778   sym.st_target_internal = 0;
17779   elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17780   return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17781 }
17782 
17783 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17784    IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt.  */
17785 
17786 static bfd_boolean
17787 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17788 			    bfd_boolean is_iplt_entry_p,
17789 			    union gotplt_union *root_plt,
17790 			    struct arm_plt_info *arm_plt)
17791 {
17792   struct elf32_arm_link_hash_table *htab;
17793   bfd_vma addr, plt_header_size;
17794 
17795   if (root_plt->offset == (bfd_vma) -1)
17796     return TRUE;
17797 
17798   htab = elf32_arm_hash_table (osi->info);
17799   if (htab == NULL)
17800     return FALSE;
17801 
17802   if (is_iplt_entry_p)
17803     {
17804       osi->sec = htab->root.iplt;
17805       plt_header_size = 0;
17806     }
17807   else
17808     {
17809       osi->sec = htab->root.splt;
17810       plt_header_size = htab->plt_header_size;
17811     }
17812   osi->sec_shndx = (_bfd_elf_section_from_bfd_section
17813 		    (osi->info->output_bfd, osi->sec->output_section));
17814 
17815   addr = root_plt->offset & -2;
17816   if (htab->symbian_p)
17817     {
17818       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17819 	return FALSE;
17820       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
17821 	return FALSE;
17822     }
17823   else if (htab->vxworks_p)
17824     {
17825       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17826 	return FALSE;
17827       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
17828 	return FALSE;
17829       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
17830 	return FALSE;
17831       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
17832 	return FALSE;
17833     }
17834   else if (htab->nacl_p)
17835     {
17836       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17837 	return FALSE;
17838     }
17839   else if (htab->fdpic_p)
17840     {
17841       enum map_symbol_type type = using_thumb_only(htab)
17842 	? ARM_MAP_THUMB
17843 	: ARM_MAP_ARM;
17844 
17845       if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
17846         if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17847           return FALSE;
17848       if (!elf32_arm_output_map_sym (osi, type, addr))
17849         return FALSE;
17850       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
17851         return FALSE;
17852       if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
17853         if (!elf32_arm_output_map_sym (osi, type, addr + 24))
17854           return FALSE;
17855     }
17856   else if (using_thumb_only (htab))
17857     {
17858       if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
17859 	return FALSE;
17860     }
17861   else
17862     {
17863       bfd_boolean thumb_stub_p;
17864 
17865       thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
17866       if (thumb_stub_p)
17867 	{
17868 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
17869 	    return FALSE;
17870 	}
17871 #ifdef FOUR_WORD_PLT
17872       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17873 	return FALSE;
17874       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
17875 	return FALSE;
17876 #else
17877       /* A three-word PLT with no Thumb thunk contains only Arm code,
17878 	 so only need to output a mapping symbol for the first PLT entry and
17879 	 entries with thumb thunks.  */
17880       if (thumb_stub_p || addr == plt_header_size)
17881 	{
17882 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
17883 	    return FALSE;
17884 	}
17885 #endif
17886     }
17887 
17888   return TRUE;
17889 }
17890 
17891 /* Output mapping symbols for PLT entries associated with H.  */
17892 
17893 static bfd_boolean
17894 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
17895 {
17896   output_arch_syminfo *osi = (output_arch_syminfo *) inf;
17897   struct elf32_arm_link_hash_entry *eh;
17898 
17899   if (h->root.type == bfd_link_hash_indirect)
17900     return TRUE;
17901 
17902   if (h->root.type == bfd_link_hash_warning)
17903     /* When warning symbols are created, they **replace** the "real"
17904        entry in the hash table, thus we never get to see the real
17905        symbol in a hash traversal.  So look at it now.  */
17906     h = (struct elf_link_hash_entry *) h->root.u.i.link;
17907 
17908   eh = (struct elf32_arm_link_hash_entry *) h;
17909   return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
17910 				     &h->plt, &eh->plt);
17911 }
17912 
17913 /* Bind a veneered symbol to its veneer identified by its hash entry
17914    STUB_ENTRY.  The veneered location thus loose its symbol.  */
17915 
17916 static void
17917 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
17918 {
17919   struct elf32_arm_link_hash_entry *hash = stub_entry->h;
17920 
17921   BFD_ASSERT (hash);
17922   hash->root.root.u.def.section = stub_entry->stub_sec;
17923   hash->root.root.u.def.value = stub_entry->stub_offset;
17924   hash->root.size = stub_entry->stub_size;
17925 }
17926 
17927 /* Output a single local symbol for a generated stub.  */
17928 
17929 static bfd_boolean
17930 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
17931 			   bfd_vma offset, bfd_vma size)
17932 {
17933   Elf_Internal_Sym sym;
17934 
17935   sym.st_value = osi->sec->output_section->vma
17936 		 + osi->sec->output_offset
17937 		 + offset;
17938   sym.st_size = size;
17939   sym.st_other = 0;
17940   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
17941   sym.st_shndx = osi->sec_shndx;
17942   sym.st_target_internal = 0;
17943   return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
17944 }
17945 
17946 static bfd_boolean
17947 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
17948 		  void * in_arg)
17949 {
17950   struct elf32_arm_stub_hash_entry *stub_entry;
17951   asection *stub_sec;
17952   bfd_vma addr;
17953   char *stub_name;
17954   output_arch_syminfo *osi;
17955   const insn_sequence *template_sequence;
17956   enum stub_insn_type prev_type;
17957   int size;
17958   int i;
17959   enum map_symbol_type sym_type;
17960 
17961   /* Massage our args to the form they really have.  */
17962   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
17963   osi = (output_arch_syminfo *) in_arg;
17964 
17965   stub_sec = stub_entry->stub_sec;
17966 
17967   /* Ensure this stub is attached to the current section being
17968      processed.  */
17969   if (stub_sec != osi->sec)
17970     return TRUE;
17971 
17972   addr = (bfd_vma) stub_entry->stub_offset;
17973   template_sequence = stub_entry->stub_template;
17974 
17975   if (arm_stub_sym_claimed (stub_entry->stub_type))
17976     arm_stub_claim_sym (stub_entry);
17977   else
17978     {
17979       stub_name = stub_entry->output_name;
17980       switch (template_sequence[0].type)
17981 	{
17982 	case ARM_TYPE:
17983 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
17984 					  stub_entry->stub_size))
17985 	    return FALSE;
17986 	  break;
17987 	case THUMB16_TYPE:
17988 	case THUMB32_TYPE:
17989 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
17990 					  stub_entry->stub_size))
17991 	    return FALSE;
17992 	  break;
17993 	default:
17994 	  BFD_FAIL ();
17995 	  return 0;
17996 	}
17997     }
17998 
17999   prev_type = DATA_TYPE;
18000   size = 0;
18001   for (i = 0; i < stub_entry->stub_template_size; i++)
18002     {
18003       switch (template_sequence[i].type)
18004 	{
18005 	case ARM_TYPE:
18006 	  sym_type = ARM_MAP_ARM;
18007 	  break;
18008 
18009 	case THUMB16_TYPE:
18010 	case THUMB32_TYPE:
18011 	  sym_type = ARM_MAP_THUMB;
18012 	  break;
18013 
18014 	case DATA_TYPE:
18015 	  sym_type = ARM_MAP_DATA;
18016 	  break;
18017 
18018 	default:
18019 	  BFD_FAIL ();
18020 	  return FALSE;
18021 	}
18022 
18023       if (template_sequence[i].type != prev_type)
18024 	{
18025 	  prev_type = template_sequence[i].type;
18026 	  if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18027 	    return FALSE;
18028 	}
18029 
18030       switch (template_sequence[i].type)
18031 	{
18032 	case ARM_TYPE:
18033 	case THUMB32_TYPE:
18034 	  size += 4;
18035 	  break;
18036 
18037 	case THUMB16_TYPE:
18038 	  size += 2;
18039 	  break;
18040 
18041 	case DATA_TYPE:
18042 	  size += 4;
18043 	  break;
18044 
18045 	default:
18046 	  BFD_FAIL ();
18047 	  return FALSE;
18048 	}
18049     }
18050 
18051   return TRUE;
18052 }
18053 
18054 /* Output mapping symbols for linker generated sections,
18055    and for those data-only sections that do not have a
18056    $d.  */
18057 
18058 static bfd_boolean
18059 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18060 				  struct bfd_link_info *info,
18061 				  void *flaginfo,
18062 				  int (*func) (void *, const char *,
18063 					       Elf_Internal_Sym *,
18064 					       asection *,
18065 					       struct elf_link_hash_entry *))
18066 {
18067   output_arch_syminfo osi;
18068   struct elf32_arm_link_hash_table *htab;
18069   bfd_vma offset;
18070   bfd_size_type size;
18071   bfd *input_bfd;
18072 
18073   htab = elf32_arm_hash_table (info);
18074   if (htab == NULL)
18075     return FALSE;
18076 
18077   check_use_blx (htab);
18078 
18079   osi.flaginfo = flaginfo;
18080   osi.info = info;
18081   osi.func = func;
18082 
18083   /* Add a $d mapping symbol to data-only sections that
18084      don't have any mapping symbol.  This may result in (harmless) redundant
18085      mapping symbols.  */
18086   for (input_bfd = info->input_bfds;
18087        input_bfd != NULL;
18088        input_bfd = input_bfd->link.next)
18089     {
18090       if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18091 	for (osi.sec = input_bfd->sections;
18092 	     osi.sec != NULL;
18093 	     osi.sec = osi.sec->next)
18094 	  {
18095 	    if (osi.sec->output_section != NULL
18096 		&& ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18097 		    != 0)
18098 		&& (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18099 		   == SEC_HAS_CONTENTS
18100 		&& get_arm_elf_section_data (osi.sec) != NULL
18101 		&& get_arm_elf_section_data (osi.sec)->mapcount == 0
18102 		&& osi.sec->size > 0
18103 		&& (osi.sec->flags & SEC_EXCLUDE) == 0)
18104 	      {
18105 		osi.sec_shndx = _bfd_elf_section_from_bfd_section
18106 		  (output_bfd, osi.sec->output_section);
18107 		if (osi.sec_shndx != (int)SHN_BAD)
18108 		  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18109 	      }
18110 	  }
18111     }
18112 
18113   /* ARM->Thumb glue.  */
18114   if (htab->arm_glue_size > 0)
18115     {
18116       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18117 					ARM2THUMB_GLUE_SECTION_NAME);
18118 
18119       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18120 	  (output_bfd, osi.sec->output_section);
18121       if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18122 	  || htab->pic_veneer)
18123 	size = ARM2THUMB_PIC_GLUE_SIZE;
18124       else if (htab->use_blx)
18125 	size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18126       else
18127 	size = ARM2THUMB_STATIC_GLUE_SIZE;
18128 
18129       for (offset = 0; offset < htab->arm_glue_size; offset += size)
18130 	{
18131 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18132 	  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18133 	}
18134     }
18135 
18136   /* Thumb->ARM glue.  */
18137   if (htab->thumb_glue_size > 0)
18138     {
18139       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18140 					THUMB2ARM_GLUE_SECTION_NAME);
18141 
18142       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18143 	  (output_bfd, osi.sec->output_section);
18144       size = THUMB2ARM_GLUE_SIZE;
18145 
18146       for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18147 	{
18148 	  elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18149 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18150 	}
18151     }
18152 
18153   /* ARMv4 BX veneers.  */
18154   if (htab->bx_glue_size > 0)
18155     {
18156       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18157 					ARM_BX_GLUE_SECTION_NAME);
18158 
18159       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18160 	  (output_bfd, osi.sec->output_section);
18161 
18162       elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18163     }
18164 
18165   /* Long calls stubs.  */
18166   if (htab->stub_bfd && htab->stub_bfd->sections)
18167     {
18168       asection* stub_sec;
18169 
18170       for (stub_sec = htab->stub_bfd->sections;
18171 	   stub_sec != NULL;
18172 	   stub_sec = stub_sec->next)
18173 	{
18174 	  /* Ignore non-stub sections.  */
18175 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
18176 	    continue;
18177 
18178 	  osi.sec = stub_sec;
18179 
18180 	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
18181 	    (output_bfd, osi.sec->output_section);
18182 
18183 	  bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18184 	}
18185     }
18186 
18187   /* Finally, output mapping symbols for the PLT.  */
18188   if (htab->root.splt && htab->root.splt->size > 0)
18189     {
18190       osi.sec = htab->root.splt;
18191       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18192 		       (output_bfd, osi.sec->output_section));
18193 
18194       /* Output mapping symbols for the plt header.  SymbianOS does not have a
18195 	 plt header.  */
18196       if (htab->vxworks_p)
18197 	{
18198 	  /* VxWorks shared libraries have no PLT header.  */
18199 	  if (!bfd_link_pic (info))
18200 	    {
18201 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18202 		return FALSE;
18203 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18204 		return FALSE;
18205 	    }
18206 	}
18207       else if (htab->nacl_p)
18208 	{
18209 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18210 	    return FALSE;
18211 	}
18212       else if (using_thumb_only (htab) && !htab->fdpic_p)
18213 	{
18214 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18215 	    return FALSE;
18216 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18217 	    return FALSE;
18218 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18219 	    return FALSE;
18220 	}
18221       else if (!htab->symbian_p && !htab->fdpic_p)
18222 	{
18223 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18224 	    return FALSE;
18225 #ifndef FOUR_WORD_PLT
18226 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18227 	    return FALSE;
18228 #endif
18229 	}
18230     }
18231   if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18232     {
18233       /* NaCl uses a special first entry in .iplt too.  */
18234       osi.sec = htab->root.iplt;
18235       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18236 		       (output_bfd, osi.sec->output_section));
18237       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18238 	return FALSE;
18239     }
18240   if ((htab->root.splt && htab->root.splt->size > 0)
18241       || (htab->root.iplt && htab->root.iplt->size > 0))
18242     {
18243       elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18244       for (input_bfd = info->input_bfds;
18245 	   input_bfd != NULL;
18246 	   input_bfd = input_bfd->link.next)
18247 	{
18248 	  struct arm_local_iplt_info **local_iplt;
18249 	  unsigned int i, num_syms;
18250 
18251 	  local_iplt = elf32_arm_local_iplt (input_bfd);
18252 	  if (local_iplt != NULL)
18253 	    {
18254 	      num_syms = elf_symtab_hdr (input_bfd).sh_info;
18255 	      for (i = 0; i < num_syms; i++)
18256 		if (local_iplt[i] != NULL
18257 		    && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18258 						    &local_iplt[i]->root,
18259 						    &local_iplt[i]->arm))
18260 		  return FALSE;
18261 	    }
18262 	}
18263     }
18264   if (htab->dt_tlsdesc_plt != 0)
18265     {
18266       /* Mapping symbols for the lazy tls trampoline.  */
18267       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18268 	return FALSE;
18269 
18270       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18271 				     htab->dt_tlsdesc_plt + 24))
18272 	return FALSE;
18273     }
18274   if (htab->tls_trampoline != 0)
18275     {
18276       /* Mapping symbols for the tls trampoline.  */
18277       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18278 	return FALSE;
18279 #ifdef FOUR_WORD_PLT
18280       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18281 				     htab->tls_trampoline + 12))
18282 	return FALSE;
18283 #endif
18284     }
18285 
18286   return TRUE;
18287 }
18288 
18289 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18290    the import library.  All SYMCOUNT symbols of ABFD can be examined
18291    from their pointers in SYMS.  Pointers of symbols to keep should be
18292    stored continuously at the beginning of that array.
18293 
18294    Returns the number of symbols to keep.  */
18295 
18296 static unsigned int
18297 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18298 			       struct bfd_link_info *info,
18299 			       asymbol **syms, long symcount)
18300 {
18301   size_t maxnamelen;
18302   char *cmse_name;
18303   long src_count, dst_count = 0;
18304   struct elf32_arm_link_hash_table *htab;
18305 
18306   htab = elf32_arm_hash_table (info);
18307   if (!htab->stub_bfd || !htab->stub_bfd->sections)
18308     symcount = 0;
18309 
18310   maxnamelen = 128;
18311   cmse_name = (char *) bfd_malloc (maxnamelen);
18312   for (src_count = 0; src_count < symcount; src_count++)
18313     {
18314       struct elf32_arm_link_hash_entry *cmse_hash;
18315       asymbol *sym;
18316       flagword flags;
18317       char *name;
18318       size_t namelen;
18319 
18320       sym = syms[src_count];
18321       flags = sym->flags;
18322       name = (char *) bfd_asymbol_name (sym);
18323 
18324       if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18325 	continue;
18326       if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18327 	continue;
18328 
18329       namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18330       if (namelen > maxnamelen)
18331 	{
18332 	  cmse_name = (char *)
18333 	    bfd_realloc (cmse_name, namelen);
18334 	  maxnamelen = namelen;
18335 	}
18336       snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18337       cmse_hash = (struct elf32_arm_link_hash_entry *)
18338 	elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18339 
18340       if (!cmse_hash
18341 	  || (cmse_hash->root.root.type != bfd_link_hash_defined
18342 	      && cmse_hash->root.root.type != bfd_link_hash_defweak)
18343 	  || cmse_hash->root.type != STT_FUNC)
18344 	continue;
18345 
18346       if (!ARM_GET_SYM_CMSE_SPCL (cmse_hash->root.target_internal))
18347 	continue;
18348 
18349       syms[dst_count++] = sym;
18350     }
18351   free (cmse_name);
18352 
18353   syms[dst_count] = NULL;
18354 
18355   return dst_count;
18356 }
18357 
18358 /* Filter symbols of ABFD to include in the import library.  All
18359    SYMCOUNT symbols of ABFD can be examined from their pointers in
18360    SYMS.  Pointers of symbols to keep should be stored continuously at
18361    the beginning of that array.
18362 
18363    Returns the number of symbols to keep.  */
18364 
18365 static unsigned int
18366 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18367 				 struct bfd_link_info *info,
18368 				 asymbol **syms, long symcount)
18369 {
18370   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18371 
18372   /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18373      Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18374      library to be a relocatable object file.  */
18375   BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18376   if (globals->cmse_implib)
18377     return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18378   else
18379     return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18380 }
18381 
18382 /* Allocate target specific section data.  */
18383 
18384 static bfd_boolean
18385 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18386 {
18387   if (!sec->used_by_bfd)
18388     {
18389       _arm_elf_section_data *sdata;
18390       bfd_size_type amt = sizeof (*sdata);
18391 
18392       sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18393       if (sdata == NULL)
18394 	return FALSE;
18395       sec->used_by_bfd = sdata;
18396     }
18397 
18398   return _bfd_elf_new_section_hook (abfd, sec);
18399 }
18400 
18401 
18402 /* Used to order a list of mapping symbols by address.  */
18403 
18404 static int
18405 elf32_arm_compare_mapping (const void * a, const void * b)
18406 {
18407   const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18408   const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18409 
18410   if (amap->vma > bmap->vma)
18411     return 1;
18412   else if (amap->vma < bmap->vma)
18413     return -1;
18414   else if (amap->type > bmap->type)
18415     /* Ensure results do not depend on the host qsort for objects with
18416        multiple mapping symbols at the same address by sorting on type
18417        after vma.  */
18418     return 1;
18419   else if (amap->type < bmap->type)
18420     return -1;
18421   else
18422     return 0;
18423 }
18424 
18425 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified.  */
18426 
18427 static unsigned long
18428 offset_prel31 (unsigned long addr, bfd_vma offset)
18429 {
18430   return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18431 }
18432 
18433 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18434    relocations.  */
18435 
18436 static void
18437 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18438 {
18439   unsigned long first_word = bfd_get_32 (output_bfd, from);
18440   unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18441 
18442   /* High bit of first word is supposed to be zero.  */
18443   if ((first_word & 0x80000000ul) == 0)
18444     first_word = offset_prel31 (first_word, offset);
18445 
18446   /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18447      (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry.  */
18448   if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18449     second_word = offset_prel31 (second_word, offset);
18450 
18451   bfd_put_32 (output_bfd, first_word, to);
18452   bfd_put_32 (output_bfd, second_word, to + 4);
18453 }
18454 
18455 /* Data for make_branch_to_a8_stub().  */
18456 
18457 struct a8_branch_to_stub_data
18458 {
18459   asection *writing_section;
18460   bfd_byte *contents;
18461 };
18462 
18463 
18464 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18465    places for a particular section.  */
18466 
18467 static bfd_boolean
18468 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18469 		       void *in_arg)
18470 {
18471   struct elf32_arm_stub_hash_entry *stub_entry;
18472   struct a8_branch_to_stub_data *data;
18473   bfd_byte *contents;
18474   unsigned long branch_insn;
18475   bfd_vma veneered_insn_loc, veneer_entry_loc;
18476   bfd_signed_vma branch_offset;
18477   bfd *abfd;
18478   unsigned int loc;
18479 
18480   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18481   data = (struct a8_branch_to_stub_data *) in_arg;
18482 
18483   if (stub_entry->target_section != data->writing_section
18484       || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18485     return TRUE;
18486 
18487   contents = data->contents;
18488 
18489   /* We use target_section as Cortex-A8 erratum workaround stubs are only
18490      generated when both source and target are in the same section.  */
18491   veneered_insn_loc = stub_entry->target_section->output_section->vma
18492 		      + stub_entry->target_section->output_offset
18493 		      + stub_entry->source_value;
18494 
18495   veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18496 		     + stub_entry->stub_sec->output_offset
18497 		     + stub_entry->stub_offset;
18498 
18499   if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18500     veneered_insn_loc &= ~3u;
18501 
18502   branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18503 
18504   abfd = stub_entry->target_section->owner;
18505   loc = stub_entry->source_value;
18506 
18507   /* We attempt to avoid this condition by setting stubs_always_after_branch
18508      in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18509      This check is just to be on the safe side...  */
18510   if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18511     {
18512       _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18513 			    "allocated in unsafe location"), abfd);
18514       return FALSE;
18515     }
18516 
18517   switch (stub_entry->stub_type)
18518     {
18519     case arm_stub_a8_veneer_b:
18520     case arm_stub_a8_veneer_b_cond:
18521       branch_insn = 0xf0009000;
18522       goto jump24;
18523 
18524     case arm_stub_a8_veneer_blx:
18525       branch_insn = 0xf000e800;
18526       goto jump24;
18527 
18528     case arm_stub_a8_veneer_bl:
18529       {
18530 	unsigned int i1, j1, i2, j2, s;
18531 
18532 	branch_insn = 0xf000d000;
18533 
18534       jump24:
18535 	if (branch_offset < -16777216 || branch_offset > 16777214)
18536 	  {
18537 	    /* There's not much we can do apart from complain if this
18538 	       happens.  */
18539 	    _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18540 				  "of range (input file too large)"), abfd);
18541 	    return FALSE;
18542 	  }
18543 
18544 	/* i1 = not(j1 eor s), so:
18545 	   not i1 = j1 eor s
18546 	   j1 = (not i1) eor s.  */
18547 
18548 	branch_insn |= (branch_offset >> 1) & 0x7ff;
18549 	branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18550 	i2 = (branch_offset >> 22) & 1;
18551 	i1 = (branch_offset >> 23) & 1;
18552 	s = (branch_offset >> 24) & 1;
18553 	j1 = (!i1) ^ s;
18554 	j2 = (!i2) ^ s;
18555 	branch_insn |= j2 << 11;
18556 	branch_insn |= j1 << 13;
18557 	branch_insn |= s << 26;
18558       }
18559       break;
18560 
18561     default:
18562       BFD_FAIL ();
18563       return FALSE;
18564     }
18565 
18566   bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18567   bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18568 
18569   return TRUE;
18570 }
18571 
18572 /* Beginning of stm32l4xx work-around.  */
18573 
18574 /* Functions encoding instructions necessary for the emission of the
18575    fix-stm32l4xx-629360.
18576    Encoding is extracted from the
18577    ARM (C) Architecture Reference Manual
18578    ARMv7-A and ARMv7-R edition
18579    ARM DDI 0406C.b (ID072512).  */
18580 
18581 static inline bfd_vma
18582 create_instruction_branch_absolute (int branch_offset)
18583 {
18584   /* A8.8.18 B (A8-334)
18585      B target_address (Encoding T4).  */
18586   /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
18587   /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
18588   /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
18589 
18590   int s = ((branch_offset & 0x1000000) >> 24);
18591   int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18592   int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18593 
18594   if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18595     BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
18596 
18597   bfd_vma patched_inst = 0xf0009000
18598     | s << 26 /* S.  */
18599     | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
18600     | j1 << 13 /* J1.  */
18601     | j2 << 11 /* J2.  */
18602     | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
18603 
18604   return patched_inst;
18605 }
18606 
18607 static inline bfd_vma
18608 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18609 {
18610   /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18611      LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
18612   bfd_vma patched_inst = 0xe8900000
18613     | (/*W=*/wback << 21)
18614     | (base_reg << 16)
18615     | (reg_mask & 0x0000ffff);
18616 
18617   return patched_inst;
18618 }
18619 
18620 static inline bfd_vma
18621 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18622 {
18623   /* A8.8.60 LDMDB/LDMEA (A8-402)
18624      LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
18625   bfd_vma patched_inst = 0xe9100000
18626     | (/*W=*/wback << 21)
18627     | (base_reg << 16)
18628     | (reg_mask & 0x0000ffff);
18629 
18630   return patched_inst;
18631 }
18632 
18633 static inline bfd_vma
18634 create_instruction_mov (int target_reg, int source_reg)
18635 {
18636   /* A8.8.103 MOV (register) (A8-486)
18637      MOV Rd, Rm (Encoding T1).  */
18638   bfd_vma patched_inst = 0x4600
18639     | (target_reg & 0x7)
18640     | ((target_reg & 0x8) >> 3) << 7
18641     | (source_reg << 3);
18642 
18643   return patched_inst;
18644 }
18645 
18646 static inline bfd_vma
18647 create_instruction_sub (int target_reg, int source_reg, int value)
18648 {
18649   /* A8.8.221 SUB (immediate) (A8-708)
18650      SUB Rd, Rn, #value (Encoding T3).  */
18651   bfd_vma patched_inst = 0xf1a00000
18652     | (target_reg << 8)
18653     | (source_reg << 16)
18654     | (/*S=*/0 << 20)
18655     | ((value & 0x800) >> 11) << 26
18656     | ((value & 0x700) >>  8) << 12
18657     | (value & 0x0ff);
18658 
18659   return patched_inst;
18660 }
18661 
18662 static inline bfd_vma
18663 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18664 			   int first_reg)
18665 {
18666   /* A8.8.332 VLDM (A8-922)
18667      VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
18668   bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18669     | (/*W=*/wback << 21)
18670     | (base_reg << 16)
18671     | (num_words & 0x000000ff)
18672     | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18673     | (first_reg & 0x00000001) << 22;
18674 
18675   return patched_inst;
18676 }
18677 
18678 static inline bfd_vma
18679 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18680 			   int first_reg)
18681 {
18682   /* A8.8.332 VLDM (A8-922)
18683      VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
18684   bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18685     | (base_reg << 16)
18686     | (num_words & 0x000000ff)
18687     | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18688     | (first_reg & 0x00000001) << 22;
18689 
18690   return patched_inst;
18691 }
18692 
18693 static inline bfd_vma
18694 create_instruction_udf_w (int value)
18695 {
18696   /* A8.8.247 UDF (A8-758)
18697      Undefined (Encoding T2).  */
18698   bfd_vma patched_inst = 0xf7f0a000
18699     | (value & 0x00000fff)
18700     | (value & 0x000f0000) << 16;
18701 
18702   return patched_inst;
18703 }
18704 
18705 static inline bfd_vma
18706 create_instruction_udf (int value)
18707 {
18708   /* A8.8.247 UDF (A8-758)
18709      Undefined (Encoding T1).  */
18710   bfd_vma patched_inst = 0xde00
18711     | (value & 0xff);
18712 
18713   return patched_inst;
18714 }
18715 
18716 /* Functions writing an instruction in memory, returning the next
18717    memory position to write to.  */
18718 
18719 static inline bfd_byte *
18720 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18721 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18722 {
18723   put_thumb2_insn (htab, output_bfd, insn, pt);
18724   return pt + 4;
18725 }
18726 
18727 static inline bfd_byte *
18728 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18729 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18730 {
18731   put_thumb_insn (htab, output_bfd, insn, pt);
18732   return pt + 2;
18733 }
18734 
18735 /* Function filling up a region in memory with T1 and T2 UDFs taking
18736    care of alignment.  */
18737 
18738 static bfd_byte *
18739 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18740 			 bfd *			 output_bfd,
18741 			 const bfd_byte * const	 base_stub_contents,
18742 			 bfd_byte * const	 from_stub_contents,
18743 			 const bfd_byte * const	 end_stub_contents)
18744 {
18745   bfd_byte *current_stub_contents = from_stub_contents;
18746 
18747   /* Fill the remaining of the stub with deterministic contents : UDF
18748      instructions.
18749      Check if realignment is needed on modulo 4 frontier using T1, to
18750      further use T2.  */
18751   if ((current_stub_contents < end_stub_contents)
18752       && !((current_stub_contents - base_stub_contents) % 2)
18753       && ((current_stub_contents - base_stub_contents) % 4))
18754     current_stub_contents =
18755       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18756 			  create_instruction_udf (0));
18757 
18758   for (; current_stub_contents < end_stub_contents;)
18759     current_stub_contents =
18760       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18761 			  create_instruction_udf_w (0));
18762 
18763   return current_stub_contents;
18764 }
18765 
18766 /* Functions writing the stream of instructions equivalent to the
18767    derived sequence for ldmia, ldmdb, vldm respectively.  */
18768 
18769 static void
18770 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18771 				       bfd * output_bfd,
18772 				       const insn32 initial_insn,
18773 				       const bfd_byte *const initial_insn_addr,
18774 				       bfd_byte *const base_stub_contents)
18775 {
18776   int wback = (initial_insn & 0x00200000) >> 21;
18777   int ri, rn = (initial_insn & 0x000F0000) >> 16;
18778   int insn_all_registers = initial_insn & 0x0000ffff;
18779   int insn_low_registers, insn_high_registers;
18780   int usable_register_mask;
18781   int nb_registers = elf32_arm_popcount (insn_all_registers);
18782   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18783   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18784   bfd_byte *current_stub_contents = base_stub_contents;
18785 
18786   BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18787 
18788   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18789      smaller than 8 registers load sequences that do not cause the
18790      hardware issue.  */
18791   if (nb_registers <= 8)
18792     {
18793       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18794       current_stub_contents =
18795 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18796 			    initial_insn);
18797 
18798       /* B initial_insn_addr+4.  */
18799       if (!restore_pc)
18800 	current_stub_contents =
18801 	  push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18802 			      create_instruction_branch_absolute
18803 			      (initial_insn_addr - current_stub_contents));
18804 
18805       /* Fill the remaining of the stub with deterministic contents.  */
18806       current_stub_contents =
18807 	stm32l4xx_fill_stub_udf (htab, output_bfd,
18808 				 base_stub_contents, current_stub_contents,
18809 				 base_stub_contents +
18810 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18811 
18812       return;
18813     }
18814 
18815   /* - reg_list[13] == 0.  */
18816   BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
18817 
18818   /* - reg_list[14] & reg_list[15] != 1.  */
18819   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18820 
18821   /* - if (wback==1) reg_list[rn] == 0.  */
18822   BFD_ASSERT (!wback || !restore_rn);
18823 
18824   /* - nb_registers > 8.  */
18825   BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18826 
18827   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
18828 
18829   /* In the following algorithm, we split this wide LDM using 2 LDM insns:
18830     - One with the 7 lowest registers (register mask 0x007F)
18831       This LDM will finally contain between 2 and 7 registers
18832     - One with the 7 highest registers (register mask 0xDF80)
18833       This ldm will finally contain between 2 and 7 registers.  */
18834   insn_low_registers = insn_all_registers & 0x007F;
18835   insn_high_registers = insn_all_registers & 0xDF80;
18836 
18837   /* A spare register may be needed during this veneer to temporarily
18838      handle the base register.  This register will be restored with the
18839      last LDM operation.
18840      The usable register may be any general purpose register (that
18841      excludes PC, SP, LR : register mask is 0x1FFF).  */
18842   usable_register_mask = 0x1FFF;
18843 
18844   /* Generate the stub function.  */
18845   if (wback)
18846     {
18847       /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
18848       current_stub_contents =
18849 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18850 			    create_instruction_ldmia
18851 			    (rn, /*wback=*/1, insn_low_registers));
18852 
18853       /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
18854       current_stub_contents =
18855 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18856 			    create_instruction_ldmia
18857 			    (rn, /*wback=*/1, insn_high_registers));
18858       if (!restore_pc)
18859 	{
18860 	  /* B initial_insn_addr+4.  */
18861 	  current_stub_contents =
18862 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18863 				create_instruction_branch_absolute
18864 				(initial_insn_addr - current_stub_contents));
18865        }
18866     }
18867   else /* if (!wback).  */
18868     {
18869       ri = rn;
18870 
18871       /* If Rn is not part of the high-register-list, move it there.  */
18872       if (!(insn_high_registers & (1 << rn)))
18873 	{
18874 	  /* Choose a Ri in the high-register-list that will be restored.  */
18875 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
18876 
18877 	  /* MOV Ri, Rn.  */
18878 	  current_stub_contents =
18879 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18880 				create_instruction_mov (ri, rn));
18881 	}
18882 
18883       /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
18884       current_stub_contents =
18885 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18886 			    create_instruction_ldmia
18887 			    (ri, /*wback=*/1, insn_low_registers));
18888 
18889       /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
18890       current_stub_contents =
18891 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18892 			    create_instruction_ldmia
18893 			    (ri, /*wback=*/0, insn_high_registers));
18894 
18895       if (!restore_pc)
18896 	{
18897 	  /* B initial_insn_addr+4.  */
18898 	  current_stub_contents =
18899 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18900 				create_instruction_branch_absolute
18901 				(initial_insn_addr - current_stub_contents));
18902 	}
18903     }
18904 
18905   /* Fill the remaining of the stub with deterministic contents.  */
18906   current_stub_contents =
18907     stm32l4xx_fill_stub_udf (htab, output_bfd,
18908 			     base_stub_contents, current_stub_contents,
18909 			     base_stub_contents +
18910 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18911 }
18912 
18913 static void
18914 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
18915 				       bfd * output_bfd,
18916 				       const insn32 initial_insn,
18917 				       const bfd_byte *const initial_insn_addr,
18918 				       bfd_byte *const base_stub_contents)
18919 {
18920   int wback = (initial_insn & 0x00200000) >> 21;
18921   int ri, rn = (initial_insn & 0x000f0000) >> 16;
18922   int insn_all_registers = initial_insn & 0x0000ffff;
18923   int insn_low_registers, insn_high_registers;
18924   int usable_register_mask;
18925   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18926   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18927   int nb_registers = elf32_arm_popcount (insn_all_registers);
18928   bfd_byte *current_stub_contents = base_stub_contents;
18929 
18930   BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
18931 
18932   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18933      smaller than 8 registers load sequences that do not cause the
18934      hardware issue.  */
18935   if (nb_registers <= 8)
18936     {
18937       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18938       current_stub_contents =
18939 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18940 			    initial_insn);
18941 
18942       /* B initial_insn_addr+4.  */
18943       current_stub_contents =
18944 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18945 			    create_instruction_branch_absolute
18946 			    (initial_insn_addr - current_stub_contents));
18947 
18948       /* Fill the remaining of the stub with deterministic contents.  */
18949       current_stub_contents =
18950 	stm32l4xx_fill_stub_udf (htab, output_bfd,
18951 				 base_stub_contents, current_stub_contents,
18952 				 base_stub_contents +
18953 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
18954 
18955       return;
18956     }
18957 
18958   /* - reg_list[13] == 0.  */
18959   BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
18960 
18961   /* - reg_list[14] & reg_list[15] != 1.  */
18962   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
18963 
18964   /* - if (wback==1) reg_list[rn] == 0.  */
18965   BFD_ASSERT (!wback || !restore_rn);
18966 
18967   /* - nb_registers > 8.  */
18968   BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
18969 
18970   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
18971 
18972   /* In the following algorithm, we split this wide LDM using 2 LDM insn:
18973     - One with the 7 lowest registers (register mask 0x007F)
18974       This LDM will finally contain between 2 and 7 registers
18975     - One with the 7 highest registers (register mask 0xDF80)
18976       This ldm will finally contain between 2 and 7 registers.  */
18977   insn_low_registers = insn_all_registers & 0x007F;
18978   insn_high_registers = insn_all_registers & 0xDF80;
18979 
18980   /* A spare register may be needed during this veneer to temporarily
18981      handle the base register.  This register will be restored with
18982      the last LDM operation.
18983      The usable register may be any general purpose register (that excludes
18984      PC, SP, LR : register mask is 0x1FFF).  */
18985   usable_register_mask = 0x1FFF;
18986 
18987   /* Generate the stub function.  */
18988   if (!wback && !restore_pc && !restore_rn)
18989     {
18990       /* Choose a Ri in the low-register-list that will be restored.  */
18991       ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
18992 
18993       /* MOV Ri, Rn.  */
18994       current_stub_contents =
18995 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18996 			    create_instruction_mov (ri, rn));
18997 
18998       /* LDMDB Ri!, {R-high-register-list}.  */
18999       current_stub_contents =
19000 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19001 			    create_instruction_ldmdb
19002 			    (ri, /*wback=*/1, insn_high_registers));
19003 
19004       /* LDMDB Ri, {R-low-register-list}.  */
19005       current_stub_contents =
19006 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19007 			    create_instruction_ldmdb
19008 			    (ri, /*wback=*/0, insn_low_registers));
19009 
19010       /* B initial_insn_addr+4.  */
19011       current_stub_contents =
19012 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19013 			    create_instruction_branch_absolute
19014 			    (initial_insn_addr - current_stub_contents));
19015     }
19016   else if (wback && !restore_pc && !restore_rn)
19017     {
19018       /* LDMDB Rn!, {R-high-register-list}.  */
19019       current_stub_contents =
19020 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19021 			    create_instruction_ldmdb
19022 			    (rn, /*wback=*/1, insn_high_registers));
19023 
19024       /* LDMDB Rn!, {R-low-register-list}.  */
19025       current_stub_contents =
19026 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19027 			    create_instruction_ldmdb
19028 			    (rn, /*wback=*/1, insn_low_registers));
19029 
19030       /* B initial_insn_addr+4.  */
19031       current_stub_contents =
19032 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19033 			    create_instruction_branch_absolute
19034 			    (initial_insn_addr - current_stub_contents));
19035     }
19036   else if (!wback && restore_pc && !restore_rn)
19037     {
19038       /* Choose a Ri in the high-register-list that will be restored.  */
19039       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19040 
19041       /* SUB Ri, Rn, #(4*nb_registers).  */
19042       current_stub_contents =
19043 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19044 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19045 
19046       /* LDMIA Ri!, {R-low-register-list}.  */
19047       current_stub_contents =
19048 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19049 			    create_instruction_ldmia
19050 			    (ri, /*wback=*/1, insn_low_registers));
19051 
19052       /* LDMIA Ri, {R-high-register-list}.  */
19053       current_stub_contents =
19054 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19055 			    create_instruction_ldmia
19056 			    (ri, /*wback=*/0, insn_high_registers));
19057     }
19058   else if (wback && restore_pc && !restore_rn)
19059     {
19060       /* Choose a Ri in the high-register-list that will be restored.  */
19061       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19062 
19063       /* SUB Rn, Rn, #(4*nb_registers)  */
19064       current_stub_contents =
19065 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19066 			    create_instruction_sub (rn, rn, (4 * nb_registers)));
19067 
19068       /* MOV Ri, Rn.  */
19069       current_stub_contents =
19070 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19071 			    create_instruction_mov (ri, rn));
19072 
19073       /* LDMIA Ri!, {R-low-register-list}.  */
19074       current_stub_contents =
19075 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19076 			    create_instruction_ldmia
19077 			    (ri, /*wback=*/1, insn_low_registers));
19078 
19079       /* LDMIA Ri, {R-high-register-list}.  */
19080       current_stub_contents =
19081 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19082 			    create_instruction_ldmia
19083 			    (ri, /*wback=*/0, insn_high_registers));
19084     }
19085   else if (!wback && !restore_pc && restore_rn)
19086     {
19087       ri = rn;
19088       if (!(insn_low_registers & (1 << rn)))
19089 	{
19090 	  /* Choose a Ri in the low-register-list that will be restored.  */
19091 	  ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19092 
19093 	  /* MOV Ri, Rn.  */
19094 	  current_stub_contents =
19095 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19096 				create_instruction_mov (ri, rn));
19097 	}
19098 
19099       /* LDMDB Ri!, {R-high-register-list}.  */
19100       current_stub_contents =
19101 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19102 			    create_instruction_ldmdb
19103 			    (ri, /*wback=*/1, insn_high_registers));
19104 
19105       /* LDMDB Ri, {R-low-register-list}.  */
19106       current_stub_contents =
19107 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19108 			    create_instruction_ldmdb
19109 			    (ri, /*wback=*/0, insn_low_registers));
19110 
19111       /* B initial_insn_addr+4.  */
19112       current_stub_contents =
19113 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19114 			    create_instruction_branch_absolute
19115 			    (initial_insn_addr - current_stub_contents));
19116     }
19117   else if (!wback && restore_pc && restore_rn)
19118     {
19119       ri = rn;
19120       if (!(insn_high_registers & (1 << rn)))
19121 	{
19122 	  /* Choose a Ri in the high-register-list that will be restored.  */
19123 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19124 	}
19125 
19126       /* SUB Ri, Rn, #(4*nb_registers).  */
19127       current_stub_contents =
19128 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19129 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19130 
19131       /* LDMIA Ri!, {R-low-register-list}.  */
19132       current_stub_contents =
19133 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19134 			    create_instruction_ldmia
19135 			    (ri, /*wback=*/1, insn_low_registers));
19136 
19137       /* LDMIA Ri, {R-high-register-list}.  */
19138       current_stub_contents =
19139 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19140 			    create_instruction_ldmia
19141 			    (ri, /*wback=*/0, insn_high_registers));
19142     }
19143   else if (wback && restore_rn)
19144     {
19145       /* The assembler should not have accepted to encode this.  */
19146       BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19147 	"undefined behavior.\n");
19148     }
19149 
19150   /* Fill the remaining of the stub with deterministic contents.  */
19151   current_stub_contents =
19152     stm32l4xx_fill_stub_udf (htab, output_bfd,
19153 			     base_stub_contents, current_stub_contents,
19154 			     base_stub_contents +
19155 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19156 
19157 }
19158 
19159 static void
19160 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19161 				      bfd * output_bfd,
19162 				      const insn32 initial_insn,
19163 				      const bfd_byte *const initial_insn_addr,
19164 				      bfd_byte *const base_stub_contents)
19165 {
19166   int num_words = ((unsigned int) initial_insn << 24) >> 24;
19167   bfd_byte *current_stub_contents = base_stub_contents;
19168 
19169   BFD_ASSERT (is_thumb2_vldm (initial_insn));
19170 
19171   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19172      smaller than 8 words load sequences that do not cause the
19173      hardware issue.  */
19174   if (num_words <= 8)
19175     {
19176       /* Untouched instruction.  */
19177       current_stub_contents =
19178 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19179 			    initial_insn);
19180 
19181       /* B initial_insn_addr+4.  */
19182       current_stub_contents =
19183 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19184 			    create_instruction_branch_absolute
19185 			    (initial_insn_addr - current_stub_contents));
19186     }
19187   else
19188     {
19189       bfd_boolean is_dp = /* DP encoding.  */
19190 	(initial_insn & 0xfe100f00) == 0xec100b00;
19191       bfd_boolean is_ia_nobang = /* (IA without !).  */
19192 	(((initial_insn << 7) >> 28) & 0xd) == 0x4;
19193       bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP.  */
19194 	(((initial_insn << 7) >> 28) & 0xd) == 0x5;
19195       bfd_boolean is_db_bang = /* (DB with !).  */
19196 	(((initial_insn << 7) >> 28) & 0xd) == 0x9;
19197       int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19198       /* d = UInt (Vd:D);.  */
19199       int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19200 	| (((unsigned int)initial_insn << 9) >> 31);
19201 
19202       /* Compute the number of 8-words chunks needed to split.  */
19203       int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19204       int chunk;
19205 
19206       /* The test coverage has been done assuming the following
19207 	 hypothesis that exactly one of the previous is_ predicates is
19208 	 true.  */
19209       BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19210 		  && !(is_ia_nobang & is_ia_bang & is_db_bang));
19211 
19212       /* We treat the cutting of the words in one pass for all
19213 	 cases, then we emit the adjustments:
19214 
19215 	 vldm rx, {...}
19216 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19217 	 -> sub rx, rx, #size (list)
19218 
19219 	 vldm rx!, {...}
19220 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19221 	 This also handles vpop instruction (when rx is sp)
19222 
19223 	 vldmd rx!, {...}
19224 	 -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
19225       for (chunk = 0; chunk < chunks; ++chunk)
19226 	{
19227 	  bfd_vma new_insn = 0;
19228 
19229 	  if (is_ia_nobang || is_ia_bang)
19230 	    {
19231 	      new_insn = create_instruction_vldmia
19232 		(base_reg,
19233 		 is_dp,
19234 		 /*wback= .  */1,
19235 		 chunks - (chunk + 1) ?
19236 		 8 : num_words - chunk * 8,
19237 		 first_reg + chunk * 8);
19238 	    }
19239 	  else if (is_db_bang)
19240 	    {
19241 	      new_insn = create_instruction_vldmdb
19242 		(base_reg,
19243 		 is_dp,
19244 		 chunks - (chunk + 1) ?
19245 		 8 : num_words - chunk * 8,
19246 		 first_reg + chunk * 8);
19247 	    }
19248 
19249 	  if (new_insn)
19250 	    current_stub_contents =
19251 	      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19252 				  new_insn);
19253 	}
19254 
19255       /* Only this case requires the base register compensation
19256 	 subtract.  */
19257       if (is_ia_nobang)
19258 	{
19259 	  current_stub_contents =
19260 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19261 				create_instruction_sub
19262 				(base_reg, base_reg, 4*num_words));
19263 	}
19264 
19265       /* B initial_insn_addr+4.  */
19266       current_stub_contents =
19267 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19268 			    create_instruction_branch_absolute
19269 			    (initial_insn_addr - current_stub_contents));
19270     }
19271 
19272   /* Fill the remaining of the stub with deterministic contents.  */
19273   current_stub_contents =
19274     stm32l4xx_fill_stub_udf (htab, output_bfd,
19275 			     base_stub_contents, current_stub_contents,
19276 			     base_stub_contents +
19277 			     STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19278 }
19279 
19280 static void
19281 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19282 				 bfd * output_bfd,
19283 				 const insn32 wrong_insn,
19284 				 const bfd_byte *const wrong_insn_addr,
19285 				 bfd_byte *const stub_contents)
19286 {
19287   if (is_thumb2_ldmia (wrong_insn))
19288     stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19289 					   wrong_insn, wrong_insn_addr,
19290 					   stub_contents);
19291   else if (is_thumb2_ldmdb (wrong_insn))
19292     stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19293 					   wrong_insn, wrong_insn_addr,
19294 					   stub_contents);
19295   else if (is_thumb2_vldm (wrong_insn))
19296     stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19297 					  wrong_insn, wrong_insn_addr,
19298 					  stub_contents);
19299 }
19300 
19301 /* End of stm32l4xx work-around.  */
19302 
19303 
19304 /* Do code byteswapping.  Return FALSE afterwards so that the section is
19305    written out as normal.  */
19306 
19307 static bfd_boolean
19308 elf32_arm_write_section (bfd *output_bfd,
19309 			 struct bfd_link_info *link_info,
19310 			 asection *sec,
19311 			 bfd_byte *contents)
19312 {
19313   unsigned int mapcount, errcount;
19314   _arm_elf_section_data *arm_data;
19315   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19316   elf32_arm_section_map *map;
19317   elf32_vfp11_erratum_list *errnode;
19318   elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19319   bfd_vma ptr;
19320   bfd_vma end;
19321   bfd_vma offset = sec->output_section->vma + sec->output_offset;
19322   bfd_byte tmp;
19323   unsigned int i;
19324 
19325   if (globals == NULL)
19326     return FALSE;
19327 
19328   /* If this section has not been allocated an _arm_elf_section_data
19329      structure then we cannot record anything.  */
19330   arm_data = get_arm_elf_section_data (sec);
19331   if (arm_data == NULL)
19332     return FALSE;
19333 
19334   mapcount = arm_data->mapcount;
19335   map = arm_data->map;
19336   errcount = arm_data->erratumcount;
19337 
19338   if (errcount != 0)
19339     {
19340       unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19341 
19342       for (errnode = arm_data->erratumlist; errnode != 0;
19343 	   errnode = errnode->next)
19344 	{
19345 	  bfd_vma target = errnode->vma - offset;
19346 
19347 	  switch (errnode->type)
19348 	    {
19349 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19350 	      {
19351 		bfd_vma branch_to_veneer;
19352 		/* Original condition code of instruction, plus bit mask for
19353 		   ARM B instruction.  */
19354 		unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19355 				  | 0x0a000000;
19356 
19357 		/* The instruction is before the label.  */
19358 		target -= 4;
19359 
19360 		/* Above offset included in -4 below.  */
19361 		branch_to_veneer = errnode->u.b.veneer->vma
19362 				   - errnode->vma - 4;
19363 
19364 		if ((signed) branch_to_veneer < -(1 << 25)
19365 		    || (signed) branch_to_veneer >= (1 << 25))
19366 		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19367 					"range"), output_bfd);
19368 
19369 		insn |= (branch_to_veneer >> 2) & 0xffffff;
19370 		contents[endianflip ^ target] = insn & 0xff;
19371 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19372 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19373 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19374 	      }
19375 	      break;
19376 
19377 	    case VFP11_ERRATUM_ARM_VENEER:
19378 	      {
19379 		bfd_vma branch_from_veneer;
19380 		unsigned int insn;
19381 
19382 		/* Take size of veneer into account.  */
19383 		branch_from_veneer = errnode->u.v.branch->vma
19384 				     - errnode->vma - 12;
19385 
19386 		if ((signed) branch_from_veneer < -(1 << 25)
19387 		    || (signed) branch_from_veneer >= (1 << 25))
19388 		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19389 					"range"), output_bfd);
19390 
19391 		/* Original instruction.  */
19392 		insn = errnode->u.v.branch->u.b.vfp_insn;
19393 		contents[endianflip ^ target] = insn & 0xff;
19394 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19395 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19396 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19397 
19398 		/* Branch back to insn after original insn.  */
19399 		insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19400 		contents[endianflip ^ (target + 4)] = insn & 0xff;
19401 		contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19402 		contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19403 		contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19404 	      }
19405 	      break;
19406 
19407 	    default:
19408 	      abort ();
19409 	    }
19410 	}
19411     }
19412 
19413   if (arm_data->stm32l4xx_erratumcount != 0)
19414     {
19415       for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19416 	   stm32l4xx_errnode != 0;
19417 	   stm32l4xx_errnode = stm32l4xx_errnode->next)
19418 	{
19419 	  bfd_vma target = stm32l4xx_errnode->vma - offset;
19420 
19421 	  switch (stm32l4xx_errnode->type)
19422 	    {
19423 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19424 	      {
19425 		unsigned int insn;
19426 		bfd_vma branch_to_veneer =
19427 		  stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19428 
19429 		if ((signed) branch_to_veneer < -(1 << 24)
19430 		    || (signed) branch_to_veneer >= (1 << 24))
19431 		  {
19432 		    bfd_vma out_of_range =
19433 		      ((signed) branch_to_veneer < -(1 << 24)) ?
19434 		      - branch_to_veneer - (1 << 24) :
19435 		      ((signed) branch_to_veneer >= (1 << 24)) ?
19436 		      branch_to_veneer - (1 << 24) : 0;
19437 
19438 		    _bfd_error_handler
19439 		      (_("%pB(%#" PRIx64 "): error: "
19440 			 "cannot create STM32L4XX veneer; "
19441 			 "jump out of range by %" PRId64 " bytes; "
19442 			 "cannot encode branch instruction"),
19443 		       output_bfd,
19444 		       (uint64_t) (stm32l4xx_errnode->vma - 4),
19445 		       (int64_t) out_of_range);
19446 		    continue;
19447 		  }
19448 
19449 		insn = create_instruction_branch_absolute
19450 		  (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19451 
19452 		/* The instruction is before the label.  */
19453 		target -= 4;
19454 
19455 		put_thumb2_insn (globals, output_bfd,
19456 				 (bfd_vma) insn, contents + target);
19457 	      }
19458 	      break;
19459 
19460 	    case STM32L4XX_ERRATUM_VENEER:
19461 	      {
19462 		bfd_byte * veneer;
19463 		bfd_byte * veneer_r;
19464 		unsigned int insn;
19465 
19466 		veneer = contents + target;
19467 		veneer_r = veneer
19468 		  + stm32l4xx_errnode->u.b.veneer->vma
19469 		  - stm32l4xx_errnode->vma - 4;
19470 
19471 		if ((signed) (veneer_r - veneer -
19472 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19473 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19474 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19475 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19476 		    || (signed) (veneer_r - veneer) >= (1 << 24))
19477 		  {
19478 		    _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19479 					  "veneer"), output_bfd);
19480 		     continue;
19481 		  }
19482 
19483 		/* Original instruction.  */
19484 		insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19485 
19486 		stm32l4xx_create_replacing_stub
19487 		  (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19488 	      }
19489 	      break;
19490 
19491 	    default:
19492 	      abort ();
19493 	    }
19494 	}
19495     }
19496 
19497   if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19498     {
19499       arm_unwind_table_edit *edit_node
19500 	= arm_data->u.exidx.unwind_edit_list;
19501       /* Now, sec->size is the size of the section we will write.  The original
19502 	 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19503 	 markers) was sec->rawsize.  (This isn't the case if we perform no
19504 	 edits, then rawsize will be zero and we should use size).  */
19505       bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19506       unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19507       unsigned int in_index, out_index;
19508       bfd_vma add_to_offsets = 0;
19509 
19510       for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19511 	{
19512 	  if (edit_node)
19513 	    {
19514 	      unsigned int edit_index = edit_node->index;
19515 
19516 	      if (in_index < edit_index && in_index * 8 < input_size)
19517 		{
19518 		  copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19519 				    contents + in_index * 8, add_to_offsets);
19520 		  out_index++;
19521 		  in_index++;
19522 		}
19523 	      else if (in_index == edit_index
19524 		       || (in_index * 8 >= input_size
19525 			   && edit_index == UINT_MAX))
19526 		{
19527 		  switch (edit_node->type)
19528 		    {
19529 		    case DELETE_EXIDX_ENTRY:
19530 		      in_index++;
19531 		      add_to_offsets += 8;
19532 		      break;
19533 
19534 		    case INSERT_EXIDX_CANTUNWIND_AT_END:
19535 		      {
19536 			asection *text_sec = edit_node->linked_section;
19537 			bfd_vma text_offset = text_sec->output_section->vma
19538 					      + text_sec->output_offset
19539 					      + text_sec->size;
19540 			bfd_vma exidx_offset = offset + out_index * 8;
19541 			unsigned long prel31_offset;
19542 
19543 			/* Note: this is meant to be equivalent to an
19544 			   R_ARM_PREL31 relocation.  These synthetic
19545 			   EXIDX_CANTUNWIND markers are not relocated by the
19546 			   usual BFD method.  */
19547 			prel31_offset = (text_offset - exidx_offset)
19548 					& 0x7ffffffful;
19549 			if (bfd_link_relocatable (link_info))
19550 			  {
19551 			    /* Here relocation for new EXIDX_CANTUNWIND is
19552 			       created, so there is no need to
19553 			       adjust offset by hand.  */
19554 			    prel31_offset = text_sec->output_offset
19555 					    + text_sec->size;
19556 			  }
19557 
19558 			/* First address we can't unwind.  */
19559 			bfd_put_32 (output_bfd, prel31_offset,
19560 				    &edited_contents[out_index * 8]);
19561 
19562 			/* Code for EXIDX_CANTUNWIND.  */
19563 			bfd_put_32 (output_bfd, 0x1,
19564 				    &edited_contents[out_index * 8 + 4]);
19565 
19566 			out_index++;
19567 			add_to_offsets -= 8;
19568 		      }
19569 		      break;
19570 		    }
19571 
19572 		  edit_node = edit_node->next;
19573 		}
19574 	    }
19575 	  else
19576 	    {
19577 	      /* No more edits, copy remaining entries verbatim.  */
19578 	      copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19579 				contents + in_index * 8, add_to_offsets);
19580 	      out_index++;
19581 	      in_index++;
19582 	    }
19583 	}
19584 
19585       if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19586 	bfd_set_section_contents (output_bfd, sec->output_section,
19587 				  edited_contents,
19588 				  (file_ptr) sec->output_offset, sec->size);
19589 
19590       return TRUE;
19591     }
19592 
19593   /* Fix code to point to Cortex-A8 erratum stubs.  */
19594   if (globals->fix_cortex_a8)
19595     {
19596       struct a8_branch_to_stub_data data;
19597 
19598       data.writing_section = sec;
19599       data.contents = contents;
19600 
19601       bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19602 			 & data);
19603     }
19604 
19605   if (mapcount == 0)
19606     return FALSE;
19607 
19608   if (globals->byteswap_code)
19609     {
19610       qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19611 
19612       ptr = map[0].vma;
19613       for (i = 0; i < mapcount; i++)
19614 	{
19615 	  if (i == mapcount - 1)
19616 	    end = sec->size;
19617 	  else
19618 	    end = map[i + 1].vma;
19619 
19620 	  switch (map[i].type)
19621 	    {
19622 	    case 'a':
19623 	      /* Byte swap code words.  */
19624 	      while (ptr + 3 < end)
19625 		{
19626 		  tmp = contents[ptr];
19627 		  contents[ptr] = contents[ptr + 3];
19628 		  contents[ptr + 3] = tmp;
19629 		  tmp = contents[ptr + 1];
19630 		  contents[ptr + 1] = contents[ptr + 2];
19631 		  contents[ptr + 2] = tmp;
19632 		  ptr += 4;
19633 		}
19634 	      break;
19635 
19636 	    case 't':
19637 	      /* Byte swap code halfwords.  */
19638 	      while (ptr + 1 < end)
19639 		{
19640 		  tmp = contents[ptr];
19641 		  contents[ptr] = contents[ptr + 1];
19642 		  contents[ptr + 1] = tmp;
19643 		  ptr += 2;
19644 		}
19645 	      break;
19646 
19647 	    case 'd':
19648 	      /* Leave data alone.  */
19649 	      break;
19650 	    }
19651 	  ptr = end;
19652 	}
19653     }
19654 
19655   free (map);
19656   arm_data->mapcount = -1;
19657   arm_data->mapsize = 0;
19658   arm_data->map = NULL;
19659 
19660   return FALSE;
19661 }
19662 
19663 /* Mangle thumb function symbols as we read them in.  */
19664 
19665 static bfd_boolean
19666 elf32_arm_swap_symbol_in (bfd * abfd,
19667 			  const void *psrc,
19668 			  const void *pshn,
19669 			  Elf_Internal_Sym *dst)
19670 {
19671   Elf_Internal_Shdr *symtab_hdr;
19672   const char *name = NULL;
19673 
19674   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19675     return FALSE;
19676   dst->st_target_internal = 0;
19677 
19678   /* New EABI objects mark thumb function symbols by setting the low bit of
19679      the address.  */
19680   if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19681       || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19682     {
19683       if (dst->st_value & 1)
19684 	{
19685 	  dst->st_value &= ~(bfd_vma) 1;
19686 	  ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19687 				   ST_BRANCH_TO_THUMB);
19688 	}
19689       else
19690 	ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19691     }
19692   else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19693     {
19694       dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19695       ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19696     }
19697   else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19698     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19699   else
19700     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19701 
19702   /* Mark CMSE special symbols.  */
19703   symtab_hdr = & elf_symtab_hdr (abfd);
19704   if (symtab_hdr->sh_size)
19705     name = bfd_elf_sym_name (abfd, symtab_hdr, dst, NULL);
19706   if (name && CONST_STRNEQ (name, CMSE_PREFIX))
19707     ARM_SET_SYM_CMSE_SPCL (dst->st_target_internal);
19708 
19709   return TRUE;
19710 }
19711 
19712 
19713 /* Mangle thumb function symbols as we write them out.  */
19714 
19715 static void
19716 elf32_arm_swap_symbol_out (bfd *abfd,
19717 			   const Elf_Internal_Sym *src,
19718 			   void *cdst,
19719 			   void *shndx)
19720 {
19721   Elf_Internal_Sym newsym;
19722 
19723   /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19724      of the address set, as per the new EABI.  We do this unconditionally
19725      because objcopy does not set the elf header flags until after
19726      it writes out the symbol table.  */
19727   if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19728     {
19729       newsym = *src;
19730       if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19731 	newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19732       if (newsym.st_shndx != SHN_UNDEF)
19733 	{
19734 	  /* Do this only for defined symbols. At link type, the static
19735 	     linker will simulate the work of dynamic linker of resolving
19736 	     symbols and will carry over the thumbness of found symbols to
19737 	     the output symbol table. It's not clear how it happens, but
19738 	     the thumbness of undefined symbols can well be different at
19739 	     runtime, and writing '1' for them will be confusing for users
19740 	     and possibly for dynamic linker itself.
19741 	  */
19742 	  newsym.st_value |= 1;
19743 	}
19744 
19745       src = &newsym;
19746     }
19747   bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19748 }
19749 
19750 /* Add the PT_ARM_EXIDX program header.  */
19751 
19752 static bfd_boolean
19753 elf32_arm_modify_segment_map (bfd *abfd,
19754 			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19755 {
19756   struct elf_segment_map *m;
19757   asection *sec;
19758 
19759   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19760   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19761     {
19762       /* If there is already a PT_ARM_EXIDX header, then we do not
19763 	 want to add another one.  This situation arises when running
19764 	 "strip"; the input binary already has the header.  */
19765       m = elf_seg_map (abfd);
19766       while (m && m->p_type != PT_ARM_EXIDX)
19767 	m = m->next;
19768       if (!m)
19769 	{
19770 	  m = (struct elf_segment_map *)
19771 	      bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19772 	  if (m == NULL)
19773 	    return FALSE;
19774 	  m->p_type = PT_ARM_EXIDX;
19775 	  m->count = 1;
19776 	  m->sections[0] = sec;
19777 
19778 	  m->next = elf_seg_map (abfd);
19779 	  elf_seg_map (abfd) = m;
19780 	}
19781     }
19782 
19783   return TRUE;
19784 }
19785 
19786 /* We may add a PT_ARM_EXIDX program header.  */
19787 
19788 static int
19789 elf32_arm_additional_program_headers (bfd *abfd,
19790 				      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19791 {
19792   asection *sec;
19793 
19794   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19795   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19796     return 1;
19797   else
19798     return 0;
19799 }
19800 
19801 /* Hook called by the linker routine which adds symbols from an object
19802    file.  */
19803 
19804 static bfd_boolean
19805 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19806 			   Elf_Internal_Sym *sym, const char **namep,
19807 			   flagword *flagsp, asection **secp, bfd_vma *valp)
19808 {
19809   if (elf32_arm_hash_table (info) == NULL)
19810     return FALSE;
19811 
19812   if (elf32_arm_hash_table (info)->vxworks_p
19813       && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19814 				       flagsp, secp, valp))
19815     return FALSE;
19816 
19817   return TRUE;
19818 }
19819 
19820 /* We use this to override swap_symbol_in and swap_symbol_out.  */
19821 const struct elf_size_info elf32_arm_size_info =
19822 {
19823   sizeof (Elf32_External_Ehdr),
19824   sizeof (Elf32_External_Phdr),
19825   sizeof (Elf32_External_Shdr),
19826   sizeof (Elf32_External_Rel),
19827   sizeof (Elf32_External_Rela),
19828   sizeof (Elf32_External_Sym),
19829   sizeof (Elf32_External_Dyn),
19830   sizeof (Elf_External_Note),
19831   4,
19832   1,
19833   32, 2,
19834   ELFCLASS32, EV_CURRENT,
19835   bfd_elf32_write_out_phdrs,
19836   bfd_elf32_write_shdrs_and_ehdr,
19837   bfd_elf32_checksum_contents,
19838   bfd_elf32_write_relocs,
19839   elf32_arm_swap_symbol_in,
19840   elf32_arm_swap_symbol_out,
19841   bfd_elf32_slurp_reloc_table,
19842   bfd_elf32_slurp_symbol_table,
19843   bfd_elf32_swap_dyn_in,
19844   bfd_elf32_swap_dyn_out,
19845   bfd_elf32_swap_reloc_in,
19846   bfd_elf32_swap_reloc_out,
19847   bfd_elf32_swap_reloca_in,
19848   bfd_elf32_swap_reloca_out
19849 };
19850 
19851 static bfd_vma
19852 read_code32 (const bfd *abfd, const bfd_byte *addr)
19853 {
19854   /* V7 BE8 code is always little endian.  */
19855   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19856     return bfd_getl32 (addr);
19857 
19858   return bfd_get_32 (abfd, addr);
19859 }
19860 
19861 static bfd_vma
19862 read_code16 (const bfd *abfd, const bfd_byte *addr)
19863 {
19864   /* V7 BE8 code is always little endian.  */
19865   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
19866     return bfd_getl16 (addr);
19867 
19868   return bfd_get_16 (abfd, addr);
19869 }
19870 
19871 /* Return size of plt0 entry starting at ADDR
19872    or (bfd_vma) -1 if size can not be determined.  */
19873 
19874 static bfd_vma
19875 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
19876 {
19877   bfd_vma first_word;
19878   bfd_vma plt0_size;
19879 
19880   first_word = read_code32 (abfd, addr);
19881 
19882   if (first_word == elf32_arm_plt0_entry[0])
19883     plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
19884   else if (first_word == elf32_thumb2_plt0_entry[0])
19885     plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
19886   else
19887     /* We don't yet handle this PLT format.  */
19888     return (bfd_vma) -1;
19889 
19890   return plt0_size;
19891 }
19892 
19893 /* Return size of plt entry starting at offset OFFSET
19894    of plt section located at address START
19895    or (bfd_vma) -1 if size can not be determined.  */
19896 
19897 static bfd_vma
19898 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
19899 {
19900   bfd_vma first_insn;
19901   bfd_vma plt_size = 0;
19902   const bfd_byte *addr = start + offset;
19903 
19904   /* PLT entry size if fixed on Thumb-only platforms.  */
19905   if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
19906       return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
19907 
19908   /* Respect Thumb stub if necessary.  */
19909   if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
19910     {
19911       plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
19912     }
19913 
19914   /* Strip immediate from first add.  */
19915   first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
19916 
19917 #ifdef FOUR_WORD_PLT
19918   if (first_insn == elf32_arm_plt_entry[0])
19919     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
19920 #else
19921   if (first_insn == elf32_arm_plt_entry_long[0])
19922     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
19923   else if (first_insn == elf32_arm_plt_entry_short[0])
19924     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
19925 #endif
19926   else
19927     /* We don't yet handle this PLT format.  */
19928     return (bfd_vma) -1;
19929 
19930   return plt_size;
19931 }
19932 
19933 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
19934 
19935 static long
19936 elf32_arm_get_synthetic_symtab (bfd *abfd,
19937 			       long symcount ATTRIBUTE_UNUSED,
19938 			       asymbol **syms ATTRIBUTE_UNUSED,
19939 			       long dynsymcount,
19940 			       asymbol **dynsyms,
19941 			       asymbol **ret)
19942 {
19943   asection *relplt;
19944   asymbol *s;
19945   arelent *p;
19946   long count, i, n;
19947   size_t size;
19948   Elf_Internal_Shdr *hdr;
19949   char *names;
19950   asection *plt;
19951   bfd_vma offset;
19952   bfd_byte *data;
19953 
19954   *ret = NULL;
19955 
19956   if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
19957     return 0;
19958 
19959   if (dynsymcount <= 0)
19960     return 0;
19961 
19962   relplt = bfd_get_section_by_name (abfd, ".rel.plt");
19963   if (relplt == NULL)
19964     return 0;
19965 
19966   hdr = &elf_section_data (relplt)->this_hdr;
19967   if (hdr->sh_link != elf_dynsymtab (abfd)
19968       || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
19969     return 0;
19970 
19971   plt = bfd_get_section_by_name (abfd, ".plt");
19972   if (plt == NULL)
19973     return 0;
19974 
19975   if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
19976     return -1;
19977 
19978   data = plt->contents;
19979   if (data == NULL)
19980     {
19981       if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
19982 	return -1;
19983       bfd_cache_section_contents((asection *) plt, data);
19984     }
19985 
19986   count = relplt->size / hdr->sh_entsize;
19987   size = count * sizeof (asymbol);
19988   p = relplt->relocation;
19989   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
19990     {
19991       size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
19992       if (p->addend != 0)
19993 	size += sizeof ("+0x") - 1 + 8;
19994     }
19995 
19996   s = *ret = (asymbol *) bfd_malloc (size);
19997   if (s == NULL)
19998     return -1;
19999 
20000   offset = elf32_arm_plt0_size (abfd, data);
20001   if (offset == (bfd_vma) -1)
20002     return -1;
20003 
20004   names = (char *) (s + count);
20005   p = relplt->relocation;
20006   n = 0;
20007   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20008     {
20009       size_t len;
20010 
20011       bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20012       if (plt_size == (bfd_vma) -1)
20013 	break;
20014 
20015       *s = **p->sym_ptr_ptr;
20016       /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
20017 	 we are defining a symbol, ensure one of them is set.  */
20018       if ((s->flags & BSF_LOCAL) == 0)
20019 	s->flags |= BSF_GLOBAL;
20020       s->flags |= BSF_SYNTHETIC;
20021       s->section = plt;
20022       s->value = offset;
20023       s->name = names;
20024       s->udata.p = NULL;
20025       len = strlen ((*p->sym_ptr_ptr)->name);
20026       memcpy (names, (*p->sym_ptr_ptr)->name, len);
20027       names += len;
20028       if (p->addend != 0)
20029 	{
20030 	  char buf[30], *a;
20031 
20032 	  memcpy (names, "+0x", sizeof ("+0x") - 1);
20033 	  names += sizeof ("+0x") - 1;
20034 	  bfd_sprintf_vma (abfd, buf, p->addend);
20035 	  for (a = buf; *a == '0'; ++a)
20036 	    ;
20037 	  len = strlen (a);
20038 	  memcpy (names, a, len);
20039 	  names += len;
20040 	}
20041       memcpy (names, "@plt", sizeof ("@plt"));
20042       names += sizeof ("@plt");
20043       ++s, ++n;
20044       offset += plt_size;
20045     }
20046 
20047   return n;
20048 }
20049 
20050 static bfd_boolean
20051 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20052 {
20053   if (hdr->sh_flags & SHF_ARM_PURECODE)
20054     *flags |= SEC_ELF_PURECODE;
20055   return TRUE;
20056 }
20057 
20058 static flagword
20059 elf32_arm_lookup_section_flags (char *flag_name)
20060 {
20061   if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20062     return SHF_ARM_PURECODE;
20063 
20064   return SEC_NO_FLAGS;
20065 }
20066 
20067 static unsigned int
20068 elf32_arm_count_additional_relocs (asection *sec)
20069 {
20070   struct _arm_elf_section_data *arm_data;
20071   arm_data = get_arm_elf_section_data (sec);
20072 
20073   return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20074 }
20075 
20076 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20077    has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised
20078    FALSE otherwise.  ISECTION is the best guess matching section from the
20079    input bfd IBFD, but it might be NULL.  */
20080 
20081 static bfd_boolean
20082 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20083 				       bfd *obfd ATTRIBUTE_UNUSED,
20084 				       const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20085 				       Elf_Internal_Shdr *osection)
20086 {
20087   switch (osection->sh_type)
20088     {
20089     case SHT_ARM_EXIDX:
20090       {
20091 	Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20092 	Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20093 	unsigned i = 0;
20094 
20095 	osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20096 	osection->sh_info = 0;
20097 
20098 	/* The sh_link field must be set to the text section associated with
20099 	   this index section.  Unfortunately the ARM EHABI does not specify
20100 	   exactly how to determine this association.  Our caller does try
20101 	   to match up OSECTION with its corresponding input section however
20102 	   so that is a good first guess.  */
20103 	if (isection != NULL
20104 	    && osection->bfd_section != NULL
20105 	    && isection->bfd_section != NULL
20106 	    && isection->bfd_section->output_section != NULL
20107 	    && isection->bfd_section->output_section == osection->bfd_section
20108 	    && iheaders != NULL
20109 	    && isection->sh_link > 0
20110 	    && isection->sh_link < elf_numsections (ibfd)
20111 	    && iheaders[isection->sh_link]->bfd_section != NULL
20112 	    && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20113 	    )
20114 	  {
20115 	    for (i = elf_numsections (obfd); i-- > 0;)
20116 	      if (oheaders[i]->bfd_section
20117 		  == iheaders[isection->sh_link]->bfd_section->output_section)
20118 		break;
20119 	  }
20120 
20121 	if (i == 0)
20122 	  {
20123 	    /* Failing that we have to find a matching section ourselves.  If
20124 	       we had the output section name available we could compare that
20125 	       with input section names.  Unfortunately we don't.  So instead
20126 	       we use a simple heuristic and look for the nearest executable
20127 	       section before this one.  */
20128 	    for (i = elf_numsections (obfd); i-- > 0;)
20129 	      if (oheaders[i] == osection)
20130 		break;
20131 	    if (i == 0)
20132 	      break;
20133 
20134 	    while (i-- > 0)
20135 	      if (oheaders[i]->sh_type == SHT_PROGBITS
20136 		  && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20137 		  == (SHF_ALLOC | SHF_EXECINSTR))
20138 		break;
20139 	  }
20140 
20141 	if (i)
20142 	  {
20143 	    osection->sh_link = i;
20144 	    /* If the text section was part of a group
20145 	       then the index section should be too.  */
20146 	    if (oheaders[i]->sh_flags & SHF_GROUP)
20147 	      osection->sh_flags |= SHF_GROUP;
20148 	    return TRUE;
20149 	  }
20150       }
20151       break;
20152 
20153     case SHT_ARM_PREEMPTMAP:
20154       osection->sh_flags = SHF_ALLOC;
20155       break;
20156 
20157     case SHT_ARM_ATTRIBUTES:
20158     case SHT_ARM_DEBUGOVERLAY:
20159     case SHT_ARM_OVERLAYSECTION:
20160     default:
20161       break;
20162     }
20163 
20164   return FALSE;
20165 }
20166 
20167 /* Returns TRUE if NAME is an ARM mapping symbol.
20168    Traditionally the symbols $a, $d and $t have been used.
20169    The ARM ELF standard also defines $x (for A64 code).  It also allows a
20170    period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20171    Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20172    not support them here.  $t.x indicates the start of ThumbEE instructions.  */
20173 
20174 static bfd_boolean
20175 is_arm_mapping_symbol (const char * name)
20176 {
20177   return name != NULL /* Paranoia.  */
20178     && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20179 			 the mapping symbols could have acquired a prefix.
20180 			 We do not support this here, since such symbols no
20181 			 longer conform to the ARM ELF ABI.  */
20182     && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20183     && (name[2] == 0 || name[2] == '.');
20184   /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20185      any characters that follow the period are legal characters for the body
20186      of a symbol's name.  For now we just assume that this is the case.  */
20187 }
20188 
20189 /* Make sure that mapping symbols in object files are not removed via the
20190    "strip --strip-unneeded" tool.  These symbols are needed in order to
20191    correctly generate interworking veneers, and for byte swapping code
20192    regions.  Once an object file has been linked, it is safe to remove the
20193    symbols as they will no longer be needed.  */
20194 
20195 static void
20196 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20197 {
20198   if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20199       && sym->section != bfd_abs_section_ptr
20200       && is_arm_mapping_symbol (sym->name))
20201     sym->flags |= BSF_KEEP;
20202 }
20203 
20204 #undef  elf_backend_copy_special_section_fields
20205 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20206 
20207 #define ELF_ARCH			bfd_arch_arm
20208 #define ELF_TARGET_ID			ARM_ELF_DATA
20209 #define ELF_MACHINE_CODE		EM_ARM
20210 #ifdef __QNXTARGET__
20211 #define ELF_MAXPAGESIZE			0x1000
20212 #else
20213 #define ELF_MAXPAGESIZE			0x10000
20214 #endif
20215 #define ELF_MINPAGESIZE			0x1000
20216 #define ELF_COMMONPAGESIZE		0x1000
20217 
20218 #define bfd_elf32_mkobject			elf32_arm_mkobject
20219 
20220 #define bfd_elf32_bfd_copy_private_bfd_data	elf32_arm_copy_private_bfd_data
20221 #define bfd_elf32_bfd_merge_private_bfd_data	elf32_arm_merge_private_bfd_data
20222 #define bfd_elf32_bfd_set_private_flags		elf32_arm_set_private_flags
20223 #define bfd_elf32_bfd_print_private_bfd_data	elf32_arm_print_private_bfd_data
20224 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_link_hash_table_create
20225 #define bfd_elf32_bfd_reloc_type_lookup		elf32_arm_reloc_type_lookup
20226 #define bfd_elf32_bfd_reloc_name_lookup		elf32_arm_reloc_name_lookup
20227 #define bfd_elf32_find_nearest_line		elf32_arm_find_nearest_line
20228 #define bfd_elf32_find_inliner_info		elf32_arm_find_inliner_info
20229 #define bfd_elf32_new_section_hook		elf32_arm_new_section_hook
20230 #define bfd_elf32_bfd_is_target_special_symbol	elf32_arm_is_target_special_symbol
20231 #define bfd_elf32_bfd_final_link		elf32_arm_final_link
20232 #define bfd_elf32_get_synthetic_symtab	elf32_arm_get_synthetic_symtab
20233 
20234 #define elf_backend_get_symbol_type		elf32_arm_get_symbol_type
20235 #define elf_backend_gc_mark_hook		elf32_arm_gc_mark_hook
20236 #define elf_backend_gc_mark_extra_sections	elf32_arm_gc_mark_extra_sections
20237 #define elf_backend_check_relocs		elf32_arm_check_relocs
20238 #define elf_backend_update_relocs		elf32_arm_update_relocs
20239 #define elf_backend_relocate_section		elf32_arm_relocate_section
20240 #define elf_backend_write_section		elf32_arm_write_section
20241 #define elf_backend_adjust_dynamic_symbol	elf32_arm_adjust_dynamic_symbol
20242 #define elf_backend_create_dynamic_sections	elf32_arm_create_dynamic_sections
20243 #define elf_backend_finish_dynamic_symbol	elf32_arm_finish_dynamic_symbol
20244 #define elf_backend_finish_dynamic_sections	elf32_arm_finish_dynamic_sections
20245 #define elf_backend_size_dynamic_sections	elf32_arm_size_dynamic_sections
20246 #define elf_backend_always_size_sections	elf32_arm_always_size_sections
20247 #define elf_backend_init_index_section		_bfd_elf_init_2_index_sections
20248 #define elf_backend_post_process_headers	elf32_arm_post_process_headers
20249 #define elf_backend_reloc_type_class		elf32_arm_reloc_type_class
20250 #define elf_backend_object_p			elf32_arm_object_p
20251 #define elf_backend_fake_sections		elf32_arm_fake_sections
20252 #define elf_backend_section_from_shdr		elf32_arm_section_from_shdr
20253 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20254 #define elf_backend_copy_indirect_symbol	elf32_arm_copy_indirect_symbol
20255 #define elf_backend_size_info			elf32_arm_size_info
20256 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20257 #define elf_backend_additional_program_headers	elf32_arm_additional_program_headers
20258 #define elf_backend_output_arch_local_syms	elf32_arm_output_arch_local_syms
20259 #define elf_backend_filter_implib_symbols	elf32_arm_filter_implib_symbols
20260 #define elf_backend_begin_write_processing	elf32_arm_begin_write_processing
20261 #define elf_backend_add_symbol_hook		elf32_arm_add_symbol_hook
20262 #define elf_backend_count_additional_relocs	elf32_arm_count_additional_relocs
20263 #define elf_backend_symbol_processing		elf32_arm_backend_symbol_processing
20264 
20265 #define elf_backend_can_refcount       1
20266 #define elf_backend_can_gc_sections    1
20267 #define elf_backend_plt_readonly       1
20268 #define elf_backend_want_got_plt       1
20269 #define elf_backend_want_plt_sym       0
20270 #define elf_backend_want_dynrelro      1
20271 #define elf_backend_may_use_rel_p      1
20272 #define elf_backend_may_use_rela_p     0
20273 #define elf_backend_default_use_rela_p 0
20274 #define elf_backend_dtrel_excludes_plt 1
20275 
20276 #define elf_backend_got_header_size	12
20277 #define elf_backend_extern_protected_data 1
20278 
20279 #undef	elf_backend_obj_attrs_vendor
20280 #define elf_backend_obj_attrs_vendor		"aeabi"
20281 #undef	elf_backend_obj_attrs_section
20282 #define elf_backend_obj_attrs_section		".ARM.attributes"
20283 #undef	elf_backend_obj_attrs_arg_type
20284 #define elf_backend_obj_attrs_arg_type		elf32_arm_obj_attrs_arg_type
20285 #undef	elf_backend_obj_attrs_section_type
20286 #define elf_backend_obj_attrs_section_type	SHT_ARM_ATTRIBUTES
20287 #define elf_backend_obj_attrs_order		elf32_arm_obj_attrs_order
20288 #define elf_backend_obj_attrs_handle_unknown	elf32_arm_obj_attrs_handle_unknown
20289 
20290 #undef	elf_backend_section_flags
20291 #define elf_backend_section_flags		elf32_arm_section_flags
20292 #undef	elf_backend_lookup_section_flags_hook
20293 #define elf_backend_lookup_section_flags_hook	elf32_arm_lookup_section_flags
20294 
20295 #define elf_backend_linux_prpsinfo32_ugid16	TRUE
20296 
20297 #include "elf32-target.h"
20298 
20299 /* Native Client targets.  */
20300 
20301 #undef	TARGET_LITTLE_SYM
20302 #define TARGET_LITTLE_SYM		arm_elf32_nacl_le_vec
20303 #undef	TARGET_LITTLE_NAME
20304 #define TARGET_LITTLE_NAME		"elf32-littlearm-nacl"
20305 #undef	TARGET_BIG_SYM
20306 #define TARGET_BIG_SYM			arm_elf32_nacl_be_vec
20307 #undef	TARGET_BIG_NAME
20308 #define TARGET_BIG_NAME			"elf32-bigarm-nacl"
20309 
20310 /* Like elf32_arm_link_hash_table_create -- but overrides
20311    appropriately for NaCl.  */
20312 
20313 static struct bfd_link_hash_table *
20314 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20315 {
20316   struct bfd_link_hash_table *ret;
20317 
20318   ret = elf32_arm_link_hash_table_create (abfd);
20319   if (ret)
20320     {
20321       struct elf32_arm_link_hash_table *htab
20322 	= (struct elf32_arm_link_hash_table *) ret;
20323 
20324       htab->nacl_p = 1;
20325 
20326       htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20327       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20328     }
20329   return ret;
20330 }
20331 
20332 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20333    really need to use elf32_arm_modify_segment_map.  But we do it
20334    anyway just to reduce gratuitous differences with the stock ARM backend.  */
20335 
20336 static bfd_boolean
20337 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20338 {
20339   return (elf32_arm_modify_segment_map (abfd, info)
20340 	  && nacl_modify_segment_map (abfd, info));
20341 }
20342 
20343 static void
20344 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
20345 {
20346   elf32_arm_final_write_processing (abfd, linker);
20347   nacl_final_write_processing (abfd, linker);
20348 }
20349 
20350 static bfd_vma
20351 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20352 			    const arelent *rel ATTRIBUTE_UNUSED)
20353 {
20354   return plt->vma
20355     + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20356 	   i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20357 }
20358 
20359 #undef	elf32_bed
20360 #define elf32_bed				elf32_arm_nacl_bed
20361 #undef  bfd_elf32_bfd_link_hash_table_create
20362 #define bfd_elf32_bfd_link_hash_table_create	\
20363   elf32_arm_nacl_link_hash_table_create
20364 #undef	elf_backend_plt_alignment
20365 #define elf_backend_plt_alignment		4
20366 #undef	elf_backend_modify_segment_map
20367 #define	elf_backend_modify_segment_map		elf32_arm_nacl_modify_segment_map
20368 #undef	elf_backend_modify_program_headers
20369 #define	elf_backend_modify_program_headers	nacl_modify_program_headers
20370 #undef  elf_backend_final_write_processing
20371 #define elf_backend_final_write_processing	elf32_arm_nacl_final_write_processing
20372 #undef bfd_elf32_get_synthetic_symtab
20373 #undef  elf_backend_plt_sym_val
20374 #define elf_backend_plt_sym_val			elf32_arm_nacl_plt_sym_val
20375 #undef  elf_backend_copy_special_section_fields
20376 
20377 #undef	ELF_MINPAGESIZE
20378 #undef	ELF_COMMONPAGESIZE
20379 
20380 
20381 #include "elf32-target.h"
20382 
20383 /* Reset to defaults.  */
20384 #undef	elf_backend_plt_alignment
20385 #undef	elf_backend_modify_segment_map
20386 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20387 #undef	elf_backend_modify_program_headers
20388 #undef  elf_backend_final_write_processing
20389 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20390 #undef	ELF_MINPAGESIZE
20391 #define ELF_MINPAGESIZE			0x1000
20392 #undef	ELF_COMMONPAGESIZE
20393 #define ELF_COMMONPAGESIZE		0x1000
20394 
20395 
20396 /* FDPIC Targets.  */
20397 
20398 #undef  TARGET_LITTLE_SYM
20399 #define TARGET_LITTLE_SYM		arm_elf32_fdpic_le_vec
20400 #undef  TARGET_LITTLE_NAME
20401 #define TARGET_LITTLE_NAME		"elf32-littlearm-fdpic"
20402 #undef  TARGET_BIG_SYM
20403 #define TARGET_BIG_SYM			arm_elf32_fdpic_be_vec
20404 #undef  TARGET_BIG_NAME
20405 #define TARGET_BIG_NAME			"elf32-bigarm-fdpic"
20406 #undef elf_match_priority
20407 #define elf_match_priority		128
20408 #undef ELF_OSABI
20409 #define ELF_OSABI		ELFOSABI_ARM_FDPIC
20410 
20411 /* Like elf32_arm_link_hash_table_create -- but overrides
20412    appropriately for FDPIC.  */
20413 
20414 static struct bfd_link_hash_table *
20415 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20416 {
20417   struct bfd_link_hash_table *ret;
20418 
20419   ret = elf32_arm_link_hash_table_create (abfd);
20420   if (ret)
20421     {
20422       struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20423 
20424       htab->fdpic_p = 1;
20425     }
20426   return ret;
20427 }
20428 
20429 /* We need dynamic symbols for every section, since segments can
20430    relocate independently.  */
20431 static bfd_boolean
20432 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20433 				    struct bfd_link_info *info
20434 				    ATTRIBUTE_UNUSED,
20435 				    asection *p ATTRIBUTE_UNUSED)
20436 {
20437   switch (elf_section_data (p)->this_hdr.sh_type)
20438     {
20439     case SHT_PROGBITS:
20440     case SHT_NOBITS:
20441       /* If sh_type is yet undecided, assume it could be
20442 	 SHT_PROGBITS/SHT_NOBITS.  */
20443     case SHT_NULL:
20444       return FALSE;
20445 
20446       /* There shouldn't be section relative relocations
20447 	 against any other section.  */
20448     default:
20449       return TRUE;
20450     }
20451 }
20452 
20453 #undef  elf32_bed
20454 #define elf32_bed				elf32_arm_fdpic_bed
20455 
20456 #undef  bfd_elf32_bfd_link_hash_table_create
20457 #define bfd_elf32_bfd_link_hash_table_create 	elf32_arm_fdpic_link_hash_table_create
20458 
20459 #undef elf_backend_omit_section_dynsym
20460 #define elf_backend_omit_section_dynsym		elf32_arm_fdpic_omit_section_dynsym
20461 
20462 #include "elf32-target.h"
20463 
20464 #undef elf_match_priority
20465 #undef ELF_OSABI
20466 #undef elf_backend_omit_section_dynsym
20467 
20468 /* VxWorks Targets.  */
20469 
20470 #undef	TARGET_LITTLE_SYM
20471 #define TARGET_LITTLE_SYM		arm_elf32_vxworks_le_vec
20472 #undef	TARGET_LITTLE_NAME
20473 #define TARGET_LITTLE_NAME		"elf32-littlearm-vxworks"
20474 #undef	TARGET_BIG_SYM
20475 #define TARGET_BIG_SYM			arm_elf32_vxworks_be_vec
20476 #undef	TARGET_BIG_NAME
20477 #define TARGET_BIG_NAME			"elf32-bigarm-vxworks"
20478 
20479 /* Like elf32_arm_link_hash_table_create -- but overrides
20480    appropriately for VxWorks.  */
20481 
20482 static struct bfd_link_hash_table *
20483 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20484 {
20485   struct bfd_link_hash_table *ret;
20486 
20487   ret = elf32_arm_link_hash_table_create (abfd);
20488   if (ret)
20489     {
20490       struct elf32_arm_link_hash_table *htab
20491 	= (struct elf32_arm_link_hash_table *) ret;
20492       htab->use_rel = 0;
20493       htab->vxworks_p = 1;
20494     }
20495   return ret;
20496 }
20497 
20498 static void
20499 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
20500 {
20501   elf32_arm_final_write_processing (abfd, linker);
20502   elf_vxworks_final_write_processing (abfd, linker);
20503 }
20504 
20505 #undef  elf32_bed
20506 #define elf32_bed elf32_arm_vxworks_bed
20507 
20508 #undef  bfd_elf32_bfd_link_hash_table_create
20509 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_vxworks_link_hash_table_create
20510 #undef  elf_backend_final_write_processing
20511 #define elf_backend_final_write_processing	elf32_arm_vxworks_final_write_processing
20512 #undef  elf_backend_emit_relocs
20513 #define elf_backend_emit_relocs			elf_vxworks_emit_relocs
20514 
20515 #undef  elf_backend_may_use_rel_p
20516 #define elf_backend_may_use_rel_p	0
20517 #undef  elf_backend_may_use_rela_p
20518 #define elf_backend_may_use_rela_p	1
20519 #undef  elf_backend_default_use_rela_p
20520 #define elf_backend_default_use_rela_p	1
20521 #undef  elf_backend_want_plt_sym
20522 #define elf_backend_want_plt_sym	1
20523 #undef  ELF_MAXPAGESIZE
20524 #define ELF_MAXPAGESIZE			0x1000
20525 
20526 #include "elf32-target.h"
20527 
20528 
20529 /* Merge backend specific data from an object file to the output
20530    object file when linking.  */
20531 
20532 static bfd_boolean
20533 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20534 {
20535   bfd *obfd = info->output_bfd;
20536   flagword out_flags;
20537   flagword in_flags;
20538   bfd_boolean flags_compatible = TRUE;
20539   asection *sec;
20540 
20541   /* Check if we have the same endianness.  */
20542   if (! _bfd_generic_verify_endian_match (ibfd, info))
20543     return FALSE;
20544 
20545   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20546     return TRUE;
20547 
20548   if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20549     return FALSE;
20550 
20551   /* The input BFD must have had its flags initialised.  */
20552   /* The following seems bogus to me -- The flags are initialized in
20553      the assembler but I don't think an elf_flags_init field is
20554      written into the object.  */
20555   /* BFD_ASSERT (elf_flags_init (ibfd)); */
20556 
20557   in_flags  = elf_elfheader (ibfd)->e_flags;
20558   out_flags = elf_elfheader (obfd)->e_flags;
20559 
20560   /* In theory there is no reason why we couldn't handle this.  However
20561      in practice it isn't even close to working and there is no real
20562      reason to want it.  */
20563   if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20564       && !(ibfd->flags & DYNAMIC)
20565       && (in_flags & EF_ARM_BE8))
20566     {
20567       _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20568 			  ibfd);
20569       return FALSE;
20570     }
20571 
20572   if (!elf_flags_init (obfd))
20573     {
20574       /* If the input is the default architecture and had the default
20575 	 flags then do not bother setting the flags for the output
20576 	 architecture, instead allow future merges to do this.  If no
20577 	 future merges ever set these flags then they will retain their
20578 	 uninitialised values, which surprise surprise, correspond
20579 	 to the default values.  */
20580       if (bfd_get_arch_info (ibfd)->the_default
20581 	  && elf_elfheader (ibfd)->e_flags == 0)
20582 	return TRUE;
20583 
20584       elf_flags_init (obfd) = TRUE;
20585       elf_elfheader (obfd)->e_flags = in_flags;
20586 
20587       if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20588 	  && bfd_get_arch_info (obfd)->the_default)
20589 	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20590 
20591       return TRUE;
20592     }
20593 
20594   /* Determine what should happen if the input ARM architecture
20595      does not match the output ARM architecture.  */
20596   if (! bfd_arm_merge_machines (ibfd, obfd))
20597     return FALSE;
20598 
20599   /* Identical flags must be compatible.  */
20600   if (in_flags == out_flags)
20601     return TRUE;
20602 
20603   /* Check to see if the input BFD actually contains any sections.  If
20604      not, its flags may not have been initialised either, but it
20605      cannot actually cause any incompatiblity.  Do not short-circuit
20606      dynamic objects; their section list may be emptied by
20607     elf_link_add_object_symbols.
20608 
20609     Also check to see if there are no code sections in the input.
20610     In this case there is no need to check for code specific flags.
20611     XXX - do we need to worry about floating-point format compatability
20612     in data sections ?  */
20613   if (!(ibfd->flags & DYNAMIC))
20614     {
20615       bfd_boolean null_input_bfd = TRUE;
20616       bfd_boolean only_data_sections = TRUE;
20617 
20618       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20619 	{
20620 	  /* Ignore synthetic glue sections.  */
20621 	  if (strcmp (sec->name, ".glue_7")
20622 	      && strcmp (sec->name, ".glue_7t"))
20623 	    {
20624 	      if ((bfd_get_section_flags (ibfd, sec)
20625 		   & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20626 		  == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20627 		only_data_sections = FALSE;
20628 
20629 	      null_input_bfd = FALSE;
20630 	      break;
20631 	    }
20632 	}
20633 
20634       if (null_input_bfd || only_data_sections)
20635 	return TRUE;
20636     }
20637 
20638   /* Complain about various flag mismatches.  */
20639   if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20640 				      EF_ARM_EABI_VERSION (out_flags)))
20641     {
20642       _bfd_error_handler
20643 	(_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20644 	 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20645 	 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20646       return FALSE;
20647     }
20648 
20649   /* Not sure what needs to be checked for EABI versions >= 1.  */
20650   /* VxWorks libraries do not use these flags.  */
20651   if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20652       && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20653       && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20654     {
20655       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20656 	{
20657 	  _bfd_error_handler
20658 	    (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20659 	     ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20660 	     obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20661 	  flags_compatible = FALSE;
20662 	}
20663 
20664       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20665 	{
20666 	  if (in_flags & EF_ARM_APCS_FLOAT)
20667 	    _bfd_error_handler
20668 	      (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20669 	       ibfd, obfd);
20670 	  else
20671 	    _bfd_error_handler
20672 	      (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20673 	       ibfd, obfd);
20674 
20675 	  flags_compatible = FALSE;
20676 	}
20677 
20678       if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20679 	{
20680 	  if (in_flags & EF_ARM_VFP_FLOAT)
20681 	    _bfd_error_handler
20682 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20683 	       ibfd, "VFP", obfd);
20684 	  else
20685 	    _bfd_error_handler
20686 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20687 	       ibfd, "FPA", obfd);
20688 
20689 	  flags_compatible = FALSE;
20690 	}
20691 
20692       if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20693 	{
20694 	  if (in_flags & EF_ARM_MAVERICK_FLOAT)
20695 	    _bfd_error_handler
20696 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20697 	       ibfd, "Maverick", obfd);
20698 	  else
20699 	    _bfd_error_handler
20700 	      (_("error: %pB does not use %s instructions, whereas %pB does"),
20701 	       ibfd, "Maverick", obfd);
20702 
20703 	  flags_compatible = FALSE;
20704 	}
20705 
20706 #ifdef EF_ARM_SOFT_FLOAT
20707       if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20708 	{
20709 	  /* We can allow interworking between code that is VFP format
20710 	     layout, and uses either soft float or integer regs for
20711 	     passing floating point arguments and results.  We already
20712 	     know that the APCS_FLOAT flags match; similarly for VFP
20713 	     flags.  */
20714 	  if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20715 	      || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20716 	    {
20717 	      if (in_flags & EF_ARM_SOFT_FLOAT)
20718 		_bfd_error_handler
20719 		  (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20720 		   ibfd, obfd);
20721 	      else
20722 		_bfd_error_handler
20723 		  (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20724 		   ibfd, obfd);
20725 
20726 	      flags_compatible = FALSE;
20727 	    }
20728 	}
20729 #endif
20730 
20731       /* Interworking mismatch is only a warning.  */
20732       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20733 	{
20734 	  if (in_flags & EF_ARM_INTERWORK)
20735 	    {
20736 	      _bfd_error_handler
20737 		(_("warning: %pB supports interworking, whereas %pB does not"),
20738 		 ibfd, obfd);
20739 	    }
20740 	  else
20741 	    {
20742 	      _bfd_error_handler
20743 		(_("warning: %pB does not support interworking, whereas %pB does"),
20744 		 ibfd, obfd);
20745 	    }
20746 	}
20747     }
20748 
20749   return flags_compatible;
20750 }
20751 
20752 
20753 /* Symbian OS Targets.  */
20754 
20755 #undef	TARGET_LITTLE_SYM
20756 #define TARGET_LITTLE_SYM		arm_elf32_symbian_le_vec
20757 #undef	TARGET_LITTLE_NAME
20758 #define TARGET_LITTLE_NAME		"elf32-littlearm-symbian"
20759 #undef	TARGET_BIG_SYM
20760 #define TARGET_BIG_SYM			arm_elf32_symbian_be_vec
20761 #undef	TARGET_BIG_NAME
20762 #define TARGET_BIG_NAME			"elf32-bigarm-symbian"
20763 
20764 /* Like elf32_arm_link_hash_table_create -- but overrides
20765    appropriately for Symbian OS.  */
20766 
20767 static struct bfd_link_hash_table *
20768 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20769 {
20770   struct bfd_link_hash_table *ret;
20771 
20772   ret = elf32_arm_link_hash_table_create (abfd);
20773   if (ret)
20774     {
20775       struct elf32_arm_link_hash_table *htab
20776 	= (struct elf32_arm_link_hash_table *)ret;
20777       /* There is no PLT header for Symbian OS.  */
20778       htab->plt_header_size = 0;
20779       /* The PLT entries are each one instruction and one word.  */
20780       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20781       htab->symbian_p = 1;
20782       /* Symbian uses armv5t or above, so use_blx is always true.  */
20783       htab->use_blx = 1;
20784       htab->root.is_relocatable_executable = 1;
20785     }
20786   return ret;
20787 }
20788 
20789 static const struct bfd_elf_special_section
20790 elf32_arm_symbian_special_sections[] =
20791 {
20792   /* In a BPABI executable, the dynamic linking sections do not go in
20793      the loadable read-only segment.  The post-linker may wish to
20794      refer to these sections, but they are not part of the final
20795      program image.  */
20796   { STRING_COMMA_LEN (".dynamic"),	 0, SHT_DYNAMIC,  0 },
20797   { STRING_COMMA_LEN (".dynstr"),	 0, SHT_STRTAB,	  0 },
20798   { STRING_COMMA_LEN (".dynsym"),	 0, SHT_DYNSYM,	  0 },
20799   { STRING_COMMA_LEN (".got"),		 0, SHT_PROGBITS, 0 },
20800   { STRING_COMMA_LEN (".hash"),		 0, SHT_HASH,	  0 },
20801   /* These sections do not need to be writable as the SymbianOS
20802      postlinker will arrange things so that no dynamic relocation is
20803      required.  */
20804   { STRING_COMMA_LEN (".init_array"),	 0, SHT_INIT_ARRAY,    SHF_ALLOC },
20805   { STRING_COMMA_LEN (".fini_array"),	 0, SHT_FINI_ARRAY,    SHF_ALLOC },
20806   { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20807   { NULL,			      0, 0, 0,		       0 }
20808 };
20809 
20810 static void
20811 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20812 					  struct bfd_link_info *link_info)
20813 {
20814   /* BPABI objects are never loaded directly by an OS kernel; they are
20815      processed by a postlinker first, into an OS-specific format.  If
20816      the D_PAGED bit is set on the file, BFD will align segments on
20817      page boundaries, so that an OS can directly map the file.  With
20818      BPABI objects, that just results in wasted space.  In addition,
20819      because we clear the D_PAGED bit, map_sections_to_segments will
20820      recognize that the program headers should not be mapped into any
20821      loadable segment.  */
20822   abfd->flags &= ~D_PAGED;
20823   elf32_arm_begin_write_processing (abfd, link_info);
20824 }
20825 
20826 static bfd_boolean
20827 elf32_arm_symbian_modify_segment_map (bfd *abfd,
20828 				      struct bfd_link_info *info)
20829 {
20830   struct elf_segment_map *m;
20831   asection *dynsec;
20832 
20833   /* BPABI shared libraries and executables should have a PT_DYNAMIC
20834      segment.  However, because the .dynamic section is not marked
20835      with SEC_LOAD, the generic ELF code will not create such a
20836      segment.  */
20837   dynsec = bfd_get_section_by_name (abfd, ".dynamic");
20838   if (dynsec)
20839     {
20840       for (m = elf_seg_map (abfd); m != NULL; m = m->next)
20841 	if (m->p_type == PT_DYNAMIC)
20842 	  break;
20843 
20844       if (m == NULL)
20845 	{
20846 	  m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
20847 	  m->next = elf_seg_map (abfd);
20848 	  elf_seg_map (abfd) = m;
20849 	}
20850     }
20851 
20852   /* Also call the generic arm routine.  */
20853   return elf32_arm_modify_segment_map (abfd, info);
20854 }
20855 
20856 /* Return address for Ith PLT stub in section PLT, for relocation REL
20857    or (bfd_vma) -1 if it should not be included.  */
20858 
20859 static bfd_vma
20860 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
20861 			       const arelent *rel ATTRIBUTE_UNUSED)
20862 {
20863   return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
20864 }
20865 
20866 #undef  elf32_bed
20867 #define elf32_bed elf32_arm_symbian_bed
20868 
20869 /* The dynamic sections are not allocated on SymbianOS; the postlinker
20870    will process them and then discard them.  */
20871 #undef  ELF_DYNAMIC_SEC_FLAGS
20872 #define ELF_DYNAMIC_SEC_FLAGS \
20873   (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
20874 
20875 #undef elf_backend_emit_relocs
20876 
20877 #undef  bfd_elf32_bfd_link_hash_table_create
20878 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_symbian_link_hash_table_create
20879 #undef  elf_backend_special_sections
20880 #define elf_backend_special_sections		elf32_arm_symbian_special_sections
20881 #undef  elf_backend_begin_write_processing
20882 #define elf_backend_begin_write_processing	elf32_arm_symbian_begin_write_processing
20883 #undef  elf_backend_final_write_processing
20884 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20885 
20886 #undef  elf_backend_modify_segment_map
20887 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
20888 
20889 /* There is no .got section for BPABI objects, and hence no header.  */
20890 #undef  elf_backend_got_header_size
20891 #define elf_backend_got_header_size 0
20892 
20893 /* Similarly, there is no .got.plt section.  */
20894 #undef  elf_backend_want_got_plt
20895 #define elf_backend_want_got_plt 0
20896 
20897 #undef  elf_backend_plt_sym_val
20898 #define elf_backend_plt_sym_val		elf32_arm_symbian_plt_sym_val
20899 
20900 #undef  elf_backend_may_use_rel_p
20901 #define elf_backend_may_use_rel_p	1
20902 #undef  elf_backend_may_use_rela_p
20903 #define elf_backend_may_use_rela_p	0
20904 #undef  elf_backend_default_use_rela_p
20905 #define elf_backend_default_use_rela_p	0
20906 #undef  elf_backend_want_plt_sym
20907 #define elf_backend_want_plt_sym	0
20908 #undef  elf_backend_dtrel_excludes_plt
20909 #define elf_backend_dtrel_excludes_plt	0
20910 #undef  ELF_MAXPAGESIZE
20911 #define ELF_MAXPAGESIZE			0x8000
20912 
20913 #include "elf32-target.h"
20914