xref: /netbsd-src/external/gpl3/binutils/dist/bfd/elf32-arm.c (revision 122b5006ee1bd67145794b4cde92f4fe4781a5ec)
1 /* 32-bit ELF support for ARM
2    Copyright (C) 1998-2020 Free Software Foundation, Inc.
3 
4    This file is part of BFD, the Binary File Descriptor library.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19    MA 02110-1301, USA.  */
20 
21 #include "sysdep.h"
22 #include <limits.h>
23 
24 #include "bfd.h"
25 #include "libiberty.h"
26 #include "libbfd.h"
27 #include "elf-bfd.h"
28 #include "elf-nacl.h"
29 #include "elf-vxworks.h"
30 #include "elf/arm.h"
31 #include "elf32-arm.h"
32 #include "cpu-arm.h"
33 
34 /* Return the relocation section associated with NAME.  HTAB is the
35    bfd's elf32_arm_link_hash_entry.  */
36 #define RELOC_SECTION(HTAB, NAME) \
37   ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
38 
39 /* Return size of a relocation entry.  HTAB is the bfd's
40    elf32_arm_link_hash_entry.  */
41 #define RELOC_SIZE(HTAB) \
42   ((HTAB)->use_rel \
43    ? sizeof (Elf32_External_Rel) \
44    : sizeof (Elf32_External_Rela))
45 
46 /* Return function to swap relocations in.  HTAB is the bfd's
47    elf32_arm_link_hash_entry.  */
48 #define SWAP_RELOC_IN(HTAB) \
49   ((HTAB)->use_rel \
50    ? bfd_elf32_swap_reloc_in \
51    : bfd_elf32_swap_reloca_in)
52 
53 /* Return function to swap relocations out.  HTAB is the bfd's
54    elf32_arm_link_hash_entry.  */
55 #define SWAP_RELOC_OUT(HTAB) \
56   ((HTAB)->use_rel \
57    ? bfd_elf32_swap_reloc_out \
58    : bfd_elf32_swap_reloca_out)
59 
60 #define elf_info_to_howto		NULL
61 #define elf_info_to_howto_rel		elf32_arm_info_to_howto
62 
63 #define ARM_ELF_ABI_VERSION		0
64 #define ARM_ELF_OS_ABI_VERSION		ELFOSABI_ARM
65 
66 /* The Adjusted Place, as defined by AAELF.  */
67 #define Pa(X) ((X) & 0xfffffffc)
68 
69 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
70 					    struct bfd_link_info *link_info,
71 					    asection *sec,
72 					    bfd_byte *contents);
73 
74 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
75    R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
76    in that slot.  */
77 
78 static reloc_howto_type elf32_arm_howto_table_1[] =
79 {
80   /* No relocation.  */
81   HOWTO (R_ARM_NONE,		/* type */
82 	 0,			/* rightshift */
83 	 3,			/* size (0 = byte, 1 = short, 2 = long) */
84 	 0,			/* bitsize */
85 	 FALSE,			/* pc_relative */
86 	 0,			/* bitpos */
87 	 complain_overflow_dont,/* complain_on_overflow */
88 	 bfd_elf_generic_reloc,	/* special_function */
89 	 "R_ARM_NONE",		/* name */
90 	 FALSE,			/* partial_inplace */
91 	 0,			/* src_mask */
92 	 0,			/* dst_mask */
93 	 FALSE),		/* pcrel_offset */
94 
95   HOWTO (R_ARM_PC24,		/* type */
96 	 2,			/* rightshift */
97 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
98 	 24,			/* bitsize */
99 	 TRUE,			/* pc_relative */
100 	 0,			/* bitpos */
101 	 complain_overflow_signed,/* complain_on_overflow */
102 	 bfd_elf_generic_reloc,	/* special_function */
103 	 "R_ARM_PC24",		/* name */
104 	 FALSE,			/* partial_inplace */
105 	 0x00ffffff,		/* src_mask */
106 	 0x00ffffff,		/* dst_mask */
107 	 TRUE),			/* pcrel_offset */
108 
109   /* 32 bit absolute */
110   HOWTO (R_ARM_ABS32,		/* type */
111 	 0,			/* rightshift */
112 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
113 	 32,			/* bitsize */
114 	 FALSE,			/* pc_relative */
115 	 0,			/* bitpos */
116 	 complain_overflow_bitfield,/* complain_on_overflow */
117 	 bfd_elf_generic_reloc,	/* special_function */
118 	 "R_ARM_ABS32",		/* name */
119 	 FALSE,			/* partial_inplace */
120 	 0xffffffff,		/* src_mask */
121 	 0xffffffff,		/* dst_mask */
122 	 FALSE),		/* pcrel_offset */
123 
124   /* standard 32bit pc-relative reloc */
125   HOWTO (R_ARM_REL32,		/* type */
126 	 0,			/* rightshift */
127 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
128 	 32,			/* bitsize */
129 	 TRUE,			/* pc_relative */
130 	 0,			/* bitpos */
131 	 complain_overflow_bitfield,/* complain_on_overflow */
132 	 bfd_elf_generic_reloc,	/* special_function */
133 	 "R_ARM_REL32",		/* name */
134 	 FALSE,			/* partial_inplace */
135 	 0xffffffff,		/* src_mask */
136 	 0xffffffff,		/* dst_mask */
137 	 TRUE),			/* pcrel_offset */
138 
139   /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
140   HOWTO (R_ARM_LDR_PC_G0,	/* type */
141 	 0,			/* rightshift */
142 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
143 	 32,			/* bitsize */
144 	 TRUE,			/* pc_relative */
145 	 0,			/* bitpos */
146 	 complain_overflow_dont,/* complain_on_overflow */
147 	 bfd_elf_generic_reloc,	/* special_function */
148 	 "R_ARM_LDR_PC_G0",     /* name */
149 	 FALSE,			/* partial_inplace */
150 	 0xffffffff,		/* src_mask */
151 	 0xffffffff,		/* dst_mask */
152 	 TRUE),			/* pcrel_offset */
153 
154    /* 16 bit absolute */
155   HOWTO (R_ARM_ABS16,		/* type */
156 	 0,			/* rightshift */
157 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
158 	 16,			/* bitsize */
159 	 FALSE,			/* pc_relative */
160 	 0,			/* bitpos */
161 	 complain_overflow_bitfield,/* complain_on_overflow */
162 	 bfd_elf_generic_reloc,	/* special_function */
163 	 "R_ARM_ABS16",		/* name */
164 	 FALSE,			/* partial_inplace */
165 	 0x0000ffff,		/* src_mask */
166 	 0x0000ffff,		/* dst_mask */
167 	 FALSE),		/* pcrel_offset */
168 
169   /* 12 bit absolute */
170   HOWTO (R_ARM_ABS12,		/* type */
171 	 0,			/* rightshift */
172 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
173 	 12,			/* bitsize */
174 	 FALSE,			/* pc_relative */
175 	 0,			/* bitpos */
176 	 complain_overflow_bitfield,/* complain_on_overflow */
177 	 bfd_elf_generic_reloc,	/* special_function */
178 	 "R_ARM_ABS12",		/* name */
179 	 FALSE,			/* partial_inplace */
180 	 0x00000fff,		/* src_mask */
181 	 0x00000fff,		/* dst_mask */
182 	 FALSE),		/* pcrel_offset */
183 
184   HOWTO (R_ARM_THM_ABS5,	/* type */
185 	 6,			/* rightshift */
186 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
187 	 5,			/* bitsize */
188 	 FALSE,			/* pc_relative */
189 	 0,			/* bitpos */
190 	 complain_overflow_bitfield,/* complain_on_overflow */
191 	 bfd_elf_generic_reloc,	/* special_function */
192 	 "R_ARM_THM_ABS5",	/* name */
193 	 FALSE,			/* partial_inplace */
194 	 0x000007e0,		/* src_mask */
195 	 0x000007e0,		/* dst_mask */
196 	 FALSE),		/* pcrel_offset */
197 
198   /* 8 bit absolute */
199   HOWTO (R_ARM_ABS8,		/* type */
200 	 0,			/* rightshift */
201 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
202 	 8,			/* bitsize */
203 	 FALSE,			/* pc_relative */
204 	 0,			/* bitpos */
205 	 complain_overflow_bitfield,/* complain_on_overflow */
206 	 bfd_elf_generic_reloc,	/* special_function */
207 	 "R_ARM_ABS8",		/* name */
208 	 FALSE,			/* partial_inplace */
209 	 0x000000ff,		/* src_mask */
210 	 0x000000ff,		/* dst_mask */
211 	 FALSE),		/* pcrel_offset */
212 
213   HOWTO (R_ARM_SBREL32,		/* type */
214 	 0,			/* rightshift */
215 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
216 	 32,			/* bitsize */
217 	 FALSE,			/* pc_relative */
218 	 0,			/* bitpos */
219 	 complain_overflow_dont,/* complain_on_overflow */
220 	 bfd_elf_generic_reloc,	/* special_function */
221 	 "R_ARM_SBREL32",	/* name */
222 	 FALSE,			/* partial_inplace */
223 	 0xffffffff,		/* src_mask */
224 	 0xffffffff,		/* dst_mask */
225 	 FALSE),		/* pcrel_offset */
226 
227   HOWTO (R_ARM_THM_CALL,	/* type */
228 	 1,			/* rightshift */
229 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
230 	 24,			/* bitsize */
231 	 TRUE,			/* pc_relative */
232 	 0,			/* bitpos */
233 	 complain_overflow_signed,/* complain_on_overflow */
234 	 bfd_elf_generic_reloc,	/* special_function */
235 	 "R_ARM_THM_CALL",	/* name */
236 	 FALSE,			/* partial_inplace */
237 	 0x07ff2fff,		/* src_mask */
238 	 0x07ff2fff,		/* dst_mask */
239 	 TRUE),			/* pcrel_offset */
240 
241   HOWTO (R_ARM_THM_PC8,		/* type */
242 	 1,			/* rightshift */
243 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
244 	 8,			/* bitsize */
245 	 TRUE,			/* pc_relative */
246 	 0,			/* bitpos */
247 	 complain_overflow_signed,/* complain_on_overflow */
248 	 bfd_elf_generic_reloc,	/* special_function */
249 	 "R_ARM_THM_PC8",	/* name */
250 	 FALSE,			/* partial_inplace */
251 	 0x000000ff,		/* src_mask */
252 	 0x000000ff,		/* dst_mask */
253 	 TRUE),			/* pcrel_offset */
254 
255   HOWTO (R_ARM_BREL_ADJ,	/* type */
256 	 1,			/* rightshift */
257 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
258 	 32,			/* bitsize */
259 	 FALSE,			/* pc_relative */
260 	 0,			/* bitpos */
261 	 complain_overflow_signed,/* complain_on_overflow */
262 	 bfd_elf_generic_reloc,	/* special_function */
263 	 "R_ARM_BREL_ADJ",	/* name */
264 	 FALSE,			/* partial_inplace */
265 	 0xffffffff,		/* src_mask */
266 	 0xffffffff,		/* dst_mask */
267 	 FALSE),		/* pcrel_offset */
268 
269   HOWTO (R_ARM_TLS_DESC,	/* type */
270 	 0,			/* rightshift */
271 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
272 	 32,			/* bitsize */
273 	 FALSE,			/* pc_relative */
274 	 0,			/* bitpos */
275 	 complain_overflow_bitfield,/* complain_on_overflow */
276 	 bfd_elf_generic_reloc,	/* special_function */
277 	 "R_ARM_TLS_DESC",	/* name */
278 	 FALSE,			/* partial_inplace */
279 	 0xffffffff,		/* src_mask */
280 	 0xffffffff,		/* dst_mask */
281 	 FALSE),		/* pcrel_offset */
282 
283   HOWTO (R_ARM_THM_SWI8,	/* type */
284 	 0,			/* rightshift */
285 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
286 	 0,			/* bitsize */
287 	 FALSE,			/* pc_relative */
288 	 0,			/* bitpos */
289 	 complain_overflow_signed,/* complain_on_overflow */
290 	 bfd_elf_generic_reloc,	/* special_function */
291 	 "R_ARM_SWI8",		/* name */
292 	 FALSE,			/* partial_inplace */
293 	 0x00000000,		/* src_mask */
294 	 0x00000000,		/* dst_mask */
295 	 FALSE),		/* pcrel_offset */
296 
297   /* BLX instruction for the ARM.  */
298   HOWTO (R_ARM_XPC25,		/* type */
299 	 2,			/* rightshift */
300 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
301 	 24,			/* bitsize */
302 	 TRUE,			/* pc_relative */
303 	 0,			/* bitpos */
304 	 complain_overflow_signed,/* complain_on_overflow */
305 	 bfd_elf_generic_reloc,	/* special_function */
306 	 "R_ARM_XPC25",		/* name */
307 	 FALSE,			/* partial_inplace */
308 	 0x00ffffff,		/* src_mask */
309 	 0x00ffffff,		/* dst_mask */
310 	 TRUE),			/* pcrel_offset */
311 
312   /* BLX instruction for the Thumb.  */
313   HOWTO (R_ARM_THM_XPC22,	/* type */
314 	 2,			/* rightshift */
315 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
316 	 24,			/* bitsize */
317 	 TRUE,			/* pc_relative */
318 	 0,			/* bitpos */
319 	 complain_overflow_signed,/* complain_on_overflow */
320 	 bfd_elf_generic_reloc,	/* special_function */
321 	 "R_ARM_THM_XPC22",	/* name */
322 	 FALSE,			/* partial_inplace */
323 	 0x07ff2fff,		/* src_mask */
324 	 0x07ff2fff,		/* dst_mask */
325 	 TRUE),			/* pcrel_offset */
326 
327   /* Dynamic TLS relocations.  */
328 
329   HOWTO (R_ARM_TLS_DTPMOD32,	/* type */
330 	 0,			/* rightshift */
331 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
332 	 32,			/* bitsize */
333 	 FALSE,			/* pc_relative */
334 	 0,			/* bitpos */
335 	 complain_overflow_bitfield,/* complain_on_overflow */
336 	 bfd_elf_generic_reloc, /* special_function */
337 	 "R_ARM_TLS_DTPMOD32",	/* name */
338 	 TRUE,			/* partial_inplace */
339 	 0xffffffff,		/* src_mask */
340 	 0xffffffff,		/* dst_mask */
341 	 FALSE),		/* pcrel_offset */
342 
343   HOWTO (R_ARM_TLS_DTPOFF32,	/* type */
344 	 0,			/* rightshift */
345 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
346 	 32,			/* bitsize */
347 	 FALSE,			/* pc_relative */
348 	 0,			/* bitpos */
349 	 complain_overflow_bitfield,/* complain_on_overflow */
350 	 bfd_elf_generic_reloc, /* special_function */
351 	 "R_ARM_TLS_DTPOFF32",	/* name */
352 	 TRUE,			/* partial_inplace */
353 	 0xffffffff,		/* src_mask */
354 	 0xffffffff,		/* dst_mask */
355 	 FALSE),		/* pcrel_offset */
356 
357   HOWTO (R_ARM_TLS_TPOFF32,	/* type */
358 	 0,			/* rightshift */
359 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
360 	 32,			/* bitsize */
361 	 FALSE,			/* pc_relative */
362 	 0,			/* bitpos */
363 	 complain_overflow_bitfield,/* complain_on_overflow */
364 	 bfd_elf_generic_reloc, /* special_function */
365 	 "R_ARM_TLS_TPOFF32",	/* name */
366 	 TRUE,			/* partial_inplace */
367 	 0xffffffff,		/* src_mask */
368 	 0xffffffff,		/* dst_mask */
369 	 FALSE),		/* pcrel_offset */
370 
371   /* Relocs used in ARM Linux */
372 
373   HOWTO (R_ARM_COPY,		/* type */
374 	 0,			/* rightshift */
375 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
376 	 32,			/* bitsize */
377 	 FALSE,			/* pc_relative */
378 	 0,			/* bitpos */
379 	 complain_overflow_bitfield,/* complain_on_overflow */
380 	 bfd_elf_generic_reloc, /* special_function */
381 	 "R_ARM_COPY",		/* name */
382 	 TRUE,			/* partial_inplace */
383 	 0xffffffff,		/* src_mask */
384 	 0xffffffff,		/* dst_mask */
385 	 FALSE),		/* pcrel_offset */
386 
387   HOWTO (R_ARM_GLOB_DAT,	/* type */
388 	 0,			/* rightshift */
389 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
390 	 32,			/* bitsize */
391 	 FALSE,			/* pc_relative */
392 	 0,			/* bitpos */
393 	 complain_overflow_bitfield,/* complain_on_overflow */
394 	 bfd_elf_generic_reloc, /* special_function */
395 	 "R_ARM_GLOB_DAT",	/* name */
396 	 TRUE,			/* partial_inplace */
397 	 0xffffffff,		/* src_mask */
398 	 0xffffffff,		/* dst_mask */
399 	 FALSE),		/* pcrel_offset */
400 
401   HOWTO (R_ARM_JUMP_SLOT,	/* type */
402 	 0,			/* rightshift */
403 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
404 	 32,			/* bitsize */
405 	 FALSE,			/* pc_relative */
406 	 0,			/* bitpos */
407 	 complain_overflow_bitfield,/* complain_on_overflow */
408 	 bfd_elf_generic_reloc, /* special_function */
409 	 "R_ARM_JUMP_SLOT",	/* name */
410 	 TRUE,			/* partial_inplace */
411 	 0xffffffff,		/* src_mask */
412 	 0xffffffff,		/* dst_mask */
413 	 FALSE),		/* pcrel_offset */
414 
415   HOWTO (R_ARM_RELATIVE,	/* type */
416 	 0,			/* rightshift */
417 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
418 	 32,			/* bitsize */
419 	 FALSE,			/* pc_relative */
420 	 0,			/* bitpos */
421 	 complain_overflow_bitfield,/* complain_on_overflow */
422 	 bfd_elf_generic_reloc, /* special_function */
423 	 "R_ARM_RELATIVE",	/* name */
424 	 TRUE,			/* partial_inplace */
425 	 0xffffffff,		/* src_mask */
426 	 0xffffffff,		/* dst_mask */
427 	 FALSE),		/* pcrel_offset */
428 
429   HOWTO (R_ARM_GOTOFF32,	/* type */
430 	 0,			/* rightshift */
431 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
432 	 32,			/* bitsize */
433 	 FALSE,			/* pc_relative */
434 	 0,			/* bitpos */
435 	 complain_overflow_bitfield,/* complain_on_overflow */
436 	 bfd_elf_generic_reloc, /* special_function */
437 	 "R_ARM_GOTOFF32",	/* name */
438 	 TRUE,			/* partial_inplace */
439 	 0xffffffff,		/* src_mask */
440 	 0xffffffff,		/* dst_mask */
441 	 FALSE),		/* pcrel_offset */
442 
443   HOWTO (R_ARM_GOTPC,		/* type */
444 	 0,			/* rightshift */
445 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
446 	 32,			/* bitsize */
447 	 TRUE,			/* pc_relative */
448 	 0,			/* bitpos */
449 	 complain_overflow_bitfield,/* complain_on_overflow */
450 	 bfd_elf_generic_reloc, /* special_function */
451 	 "R_ARM_GOTPC",		/* name */
452 	 TRUE,			/* partial_inplace */
453 	 0xffffffff,		/* src_mask */
454 	 0xffffffff,		/* dst_mask */
455 	 TRUE),			/* pcrel_offset */
456 
457   HOWTO (R_ARM_GOT32,		/* type */
458 	 0,			/* rightshift */
459 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
460 	 32,			/* bitsize */
461 	 FALSE,			/* pc_relative */
462 	 0,			/* bitpos */
463 	 complain_overflow_bitfield,/* complain_on_overflow */
464 	 bfd_elf_generic_reloc, /* special_function */
465 	 "R_ARM_GOT32",		/* name */
466 	 TRUE,			/* partial_inplace */
467 	 0xffffffff,		/* src_mask */
468 	 0xffffffff,		/* dst_mask */
469 	 FALSE),		/* pcrel_offset */
470 
471   HOWTO (R_ARM_PLT32,		/* type */
472 	 2,			/* rightshift */
473 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
474 	 24,			/* bitsize */
475 	 TRUE,			/* pc_relative */
476 	 0,			/* bitpos */
477 	 complain_overflow_bitfield,/* complain_on_overflow */
478 	 bfd_elf_generic_reloc, /* special_function */
479 	 "R_ARM_PLT32",		/* name */
480 	 FALSE,			/* partial_inplace */
481 	 0x00ffffff,		/* src_mask */
482 	 0x00ffffff,		/* dst_mask */
483 	 TRUE),			/* pcrel_offset */
484 
485   HOWTO (R_ARM_CALL,		/* type */
486 	 2,			/* rightshift */
487 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
488 	 24,			/* bitsize */
489 	 TRUE,			/* pc_relative */
490 	 0,			/* bitpos */
491 	 complain_overflow_signed,/* complain_on_overflow */
492 	 bfd_elf_generic_reloc,	/* special_function */
493 	 "R_ARM_CALL",		/* name */
494 	 FALSE,			/* partial_inplace */
495 	 0x00ffffff,		/* src_mask */
496 	 0x00ffffff,		/* dst_mask */
497 	 TRUE),			/* pcrel_offset */
498 
499   HOWTO (R_ARM_JUMP24,		/* type */
500 	 2,			/* rightshift */
501 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
502 	 24,			/* bitsize */
503 	 TRUE,			/* pc_relative */
504 	 0,			/* bitpos */
505 	 complain_overflow_signed,/* complain_on_overflow */
506 	 bfd_elf_generic_reloc,	/* special_function */
507 	 "R_ARM_JUMP24",	/* name */
508 	 FALSE,			/* partial_inplace */
509 	 0x00ffffff,		/* src_mask */
510 	 0x00ffffff,		/* dst_mask */
511 	 TRUE),			/* pcrel_offset */
512 
513   HOWTO (R_ARM_THM_JUMP24,	/* type */
514 	 1,			/* rightshift */
515 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
516 	 24,			/* bitsize */
517 	 TRUE,			/* pc_relative */
518 	 0,			/* bitpos */
519 	 complain_overflow_signed,/* complain_on_overflow */
520 	 bfd_elf_generic_reloc,	/* special_function */
521 	 "R_ARM_THM_JUMP24",	/* name */
522 	 FALSE,			/* partial_inplace */
523 	 0x07ff2fff,		/* src_mask */
524 	 0x07ff2fff,		/* dst_mask */
525 	 TRUE),			/* pcrel_offset */
526 
527   HOWTO (R_ARM_BASE_ABS,	/* type */
528 	 0,			/* rightshift */
529 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
530 	 32,			/* bitsize */
531 	 FALSE,			/* pc_relative */
532 	 0,			/* bitpos */
533 	 complain_overflow_dont,/* complain_on_overflow */
534 	 bfd_elf_generic_reloc,	/* special_function */
535 	 "R_ARM_BASE_ABS",	/* name */
536 	 FALSE,			/* partial_inplace */
537 	 0xffffffff,		/* src_mask */
538 	 0xffffffff,		/* dst_mask */
539 	 FALSE),		/* pcrel_offset */
540 
541   HOWTO (R_ARM_ALU_PCREL7_0,	/* type */
542 	 0,			/* rightshift */
543 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
544 	 12,			/* bitsize */
545 	 TRUE,			/* pc_relative */
546 	 0,			/* bitpos */
547 	 complain_overflow_dont,/* complain_on_overflow */
548 	 bfd_elf_generic_reloc,	/* special_function */
549 	 "R_ARM_ALU_PCREL_7_0",	/* name */
550 	 FALSE,			/* partial_inplace */
551 	 0x00000fff,		/* src_mask */
552 	 0x00000fff,		/* dst_mask */
553 	 TRUE),			/* pcrel_offset */
554 
555   HOWTO (R_ARM_ALU_PCREL15_8,	/* type */
556 	 0,			/* rightshift */
557 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
558 	 12,			/* bitsize */
559 	 TRUE,			/* pc_relative */
560 	 8,			/* bitpos */
561 	 complain_overflow_dont,/* complain_on_overflow */
562 	 bfd_elf_generic_reloc,	/* special_function */
563 	 "R_ARM_ALU_PCREL_15_8",/* name */
564 	 FALSE,			/* partial_inplace */
565 	 0x00000fff,		/* src_mask */
566 	 0x00000fff,		/* dst_mask */
567 	 TRUE),			/* pcrel_offset */
568 
569   HOWTO (R_ARM_ALU_PCREL23_15,	/* type */
570 	 0,			/* rightshift */
571 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
572 	 12,			/* bitsize */
573 	 TRUE,			/* pc_relative */
574 	 16,			/* bitpos */
575 	 complain_overflow_dont,/* complain_on_overflow */
576 	 bfd_elf_generic_reloc,	/* special_function */
577 	 "R_ARM_ALU_PCREL_23_15",/* name */
578 	 FALSE,			/* partial_inplace */
579 	 0x00000fff,		/* src_mask */
580 	 0x00000fff,		/* dst_mask */
581 	 TRUE),			/* pcrel_offset */
582 
583   HOWTO (R_ARM_LDR_SBREL_11_0,	/* type */
584 	 0,			/* rightshift */
585 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
586 	 12,			/* bitsize */
587 	 FALSE,			/* pc_relative */
588 	 0,			/* bitpos */
589 	 complain_overflow_dont,/* complain_on_overflow */
590 	 bfd_elf_generic_reloc,	/* special_function */
591 	 "R_ARM_LDR_SBREL_11_0",/* name */
592 	 FALSE,			/* partial_inplace */
593 	 0x00000fff,		/* src_mask */
594 	 0x00000fff,		/* dst_mask */
595 	 FALSE),		/* pcrel_offset */
596 
597   HOWTO (R_ARM_ALU_SBREL_19_12,	/* type */
598 	 0,			/* rightshift */
599 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
600 	 8,			/* bitsize */
601 	 FALSE,			/* pc_relative */
602 	 12,			/* bitpos */
603 	 complain_overflow_dont,/* complain_on_overflow */
604 	 bfd_elf_generic_reloc,	/* special_function */
605 	 "R_ARM_ALU_SBREL_19_12",/* name */
606 	 FALSE,			/* partial_inplace */
607 	 0x000ff000,		/* src_mask */
608 	 0x000ff000,		/* dst_mask */
609 	 FALSE),		/* pcrel_offset */
610 
611   HOWTO (R_ARM_ALU_SBREL_27_20,	/* type */
612 	 0,			/* rightshift */
613 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
614 	 8,			/* bitsize */
615 	 FALSE,			/* pc_relative */
616 	 20,			/* bitpos */
617 	 complain_overflow_dont,/* complain_on_overflow */
618 	 bfd_elf_generic_reloc,	/* special_function */
619 	 "R_ARM_ALU_SBREL_27_20",/* name */
620 	 FALSE,			/* partial_inplace */
621 	 0x0ff00000,		/* src_mask */
622 	 0x0ff00000,		/* dst_mask */
623 	 FALSE),		/* pcrel_offset */
624 
625   HOWTO (R_ARM_TARGET1,		/* type */
626 	 0,			/* rightshift */
627 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
628 	 32,			/* bitsize */
629 	 FALSE,			/* pc_relative */
630 	 0,			/* bitpos */
631 	 complain_overflow_dont,/* complain_on_overflow */
632 	 bfd_elf_generic_reloc,	/* special_function */
633 	 "R_ARM_TARGET1",	/* name */
634 	 FALSE,			/* partial_inplace */
635 	 0xffffffff,		/* src_mask */
636 	 0xffffffff,		/* dst_mask */
637 	 FALSE),		/* pcrel_offset */
638 
639   HOWTO (R_ARM_ROSEGREL32,	/* type */
640 	 0,			/* rightshift */
641 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
642 	 32,			/* bitsize */
643 	 FALSE,			/* pc_relative */
644 	 0,			/* bitpos */
645 	 complain_overflow_dont,/* complain_on_overflow */
646 	 bfd_elf_generic_reloc,	/* special_function */
647 	 "R_ARM_ROSEGREL32",	/* name */
648 	 FALSE,			/* partial_inplace */
649 	 0xffffffff,		/* src_mask */
650 	 0xffffffff,		/* dst_mask */
651 	 FALSE),		/* pcrel_offset */
652 
653   HOWTO (R_ARM_V4BX,		/* type */
654 	 0,			/* rightshift */
655 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
656 	 32,			/* bitsize */
657 	 FALSE,			/* pc_relative */
658 	 0,			/* bitpos */
659 	 complain_overflow_dont,/* complain_on_overflow */
660 	 bfd_elf_generic_reloc,	/* special_function */
661 	 "R_ARM_V4BX",		/* name */
662 	 FALSE,			/* partial_inplace */
663 	 0xffffffff,		/* src_mask */
664 	 0xffffffff,		/* dst_mask */
665 	 FALSE),		/* pcrel_offset */
666 
667   HOWTO (R_ARM_TARGET2,		/* type */
668 	 0,			/* rightshift */
669 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
670 	 32,			/* bitsize */
671 	 FALSE,			/* pc_relative */
672 	 0,			/* bitpos */
673 	 complain_overflow_signed,/* complain_on_overflow */
674 	 bfd_elf_generic_reloc,	/* special_function */
675 	 "R_ARM_TARGET2",	/* name */
676 	 FALSE,			/* partial_inplace */
677 	 0xffffffff,		/* src_mask */
678 	 0xffffffff,		/* dst_mask */
679 	 TRUE),			/* pcrel_offset */
680 
681   HOWTO (R_ARM_PREL31,		/* type */
682 	 0,			/* rightshift */
683 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
684 	 31,			/* bitsize */
685 	 TRUE,			/* pc_relative */
686 	 0,			/* bitpos */
687 	 complain_overflow_signed,/* complain_on_overflow */
688 	 bfd_elf_generic_reloc,	/* special_function */
689 	 "R_ARM_PREL31",	/* name */
690 	 FALSE,			/* partial_inplace */
691 	 0x7fffffff,		/* src_mask */
692 	 0x7fffffff,		/* dst_mask */
693 	 TRUE),			/* pcrel_offset */
694 
695   HOWTO (R_ARM_MOVW_ABS_NC,	/* type */
696 	 0,			/* rightshift */
697 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
698 	 16,			/* bitsize */
699 	 FALSE,			/* pc_relative */
700 	 0,			/* bitpos */
701 	 complain_overflow_dont,/* complain_on_overflow */
702 	 bfd_elf_generic_reloc,	/* special_function */
703 	 "R_ARM_MOVW_ABS_NC",	/* name */
704 	 FALSE,			/* partial_inplace */
705 	 0x000f0fff,		/* src_mask */
706 	 0x000f0fff,		/* dst_mask */
707 	 FALSE),		/* pcrel_offset */
708 
709   HOWTO (R_ARM_MOVT_ABS,	/* type */
710 	 0,			/* rightshift */
711 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
712 	 16,			/* bitsize */
713 	 FALSE,			/* pc_relative */
714 	 0,			/* bitpos */
715 	 complain_overflow_bitfield,/* complain_on_overflow */
716 	 bfd_elf_generic_reloc,	/* special_function */
717 	 "R_ARM_MOVT_ABS",	/* name */
718 	 FALSE,			/* partial_inplace */
719 	 0x000f0fff,		/* src_mask */
720 	 0x000f0fff,		/* dst_mask */
721 	 FALSE),		/* pcrel_offset */
722 
723   HOWTO (R_ARM_MOVW_PREL_NC,	/* type */
724 	 0,			/* rightshift */
725 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
726 	 16,			/* bitsize */
727 	 TRUE,			/* pc_relative */
728 	 0,			/* bitpos */
729 	 complain_overflow_dont,/* complain_on_overflow */
730 	 bfd_elf_generic_reloc,	/* special_function */
731 	 "R_ARM_MOVW_PREL_NC",	/* name */
732 	 FALSE,			/* partial_inplace */
733 	 0x000f0fff,		/* src_mask */
734 	 0x000f0fff,		/* dst_mask */
735 	 TRUE),			/* pcrel_offset */
736 
737   HOWTO (R_ARM_MOVT_PREL,	/* type */
738 	 0,			/* rightshift */
739 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
740 	 16,			/* bitsize */
741 	 TRUE,			/* pc_relative */
742 	 0,			/* bitpos */
743 	 complain_overflow_bitfield,/* complain_on_overflow */
744 	 bfd_elf_generic_reloc,	/* special_function */
745 	 "R_ARM_MOVT_PREL",	/* name */
746 	 FALSE,			/* partial_inplace */
747 	 0x000f0fff,		/* src_mask */
748 	 0x000f0fff,		/* dst_mask */
749 	 TRUE),			/* pcrel_offset */
750 
751   HOWTO (R_ARM_THM_MOVW_ABS_NC,	/* type */
752 	 0,			/* rightshift */
753 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
754 	 16,			/* bitsize */
755 	 FALSE,			/* pc_relative */
756 	 0,			/* bitpos */
757 	 complain_overflow_dont,/* complain_on_overflow */
758 	 bfd_elf_generic_reloc,	/* special_function */
759 	 "R_ARM_THM_MOVW_ABS_NC",/* name */
760 	 FALSE,			/* partial_inplace */
761 	 0x040f70ff,		/* src_mask */
762 	 0x040f70ff,		/* dst_mask */
763 	 FALSE),		/* pcrel_offset */
764 
765   HOWTO (R_ARM_THM_MOVT_ABS,	/* type */
766 	 0,			/* rightshift */
767 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
768 	 16,			/* bitsize */
769 	 FALSE,			/* pc_relative */
770 	 0,			/* bitpos */
771 	 complain_overflow_bitfield,/* complain_on_overflow */
772 	 bfd_elf_generic_reloc,	/* special_function */
773 	 "R_ARM_THM_MOVT_ABS",	/* name */
774 	 FALSE,			/* partial_inplace */
775 	 0x040f70ff,		/* src_mask */
776 	 0x040f70ff,		/* dst_mask */
777 	 FALSE),		/* pcrel_offset */
778 
779   HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
780 	 0,			/* rightshift */
781 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
782 	 16,			/* bitsize */
783 	 TRUE,			/* pc_relative */
784 	 0,			/* bitpos */
785 	 complain_overflow_dont,/* complain_on_overflow */
786 	 bfd_elf_generic_reloc,	/* special_function */
787 	 "R_ARM_THM_MOVW_PREL_NC",/* name */
788 	 FALSE,			/* partial_inplace */
789 	 0x040f70ff,		/* src_mask */
790 	 0x040f70ff,		/* dst_mask */
791 	 TRUE),			/* pcrel_offset */
792 
793   HOWTO (R_ARM_THM_MOVT_PREL,	/* type */
794 	 0,			/* rightshift */
795 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
796 	 16,			/* bitsize */
797 	 TRUE,			/* pc_relative */
798 	 0,			/* bitpos */
799 	 complain_overflow_bitfield,/* complain_on_overflow */
800 	 bfd_elf_generic_reloc,	/* special_function */
801 	 "R_ARM_THM_MOVT_PREL",	/* name */
802 	 FALSE,			/* partial_inplace */
803 	 0x040f70ff,		/* src_mask */
804 	 0x040f70ff,		/* dst_mask */
805 	 TRUE),			/* pcrel_offset */
806 
807   HOWTO (R_ARM_THM_JUMP19,	/* type */
808 	 1,			/* rightshift */
809 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
810 	 19,			/* bitsize */
811 	 TRUE,			/* pc_relative */
812 	 0,			/* bitpos */
813 	 complain_overflow_signed,/* complain_on_overflow */
814 	 bfd_elf_generic_reloc, /* special_function */
815 	 "R_ARM_THM_JUMP19",	/* name */
816 	 FALSE,			/* partial_inplace */
817 	 0x043f2fff,		/* src_mask */
818 	 0x043f2fff,		/* dst_mask */
819 	 TRUE),			/* pcrel_offset */
820 
821   HOWTO (R_ARM_THM_JUMP6,	/* type */
822 	 1,			/* rightshift */
823 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
824 	 6,			/* bitsize */
825 	 TRUE,			/* pc_relative */
826 	 0,			/* bitpos */
827 	 complain_overflow_unsigned,/* complain_on_overflow */
828 	 bfd_elf_generic_reloc,	/* special_function */
829 	 "R_ARM_THM_JUMP6",	/* name */
830 	 FALSE,			/* partial_inplace */
831 	 0x02f8,		/* src_mask */
832 	 0x02f8,		/* dst_mask */
833 	 TRUE),			/* pcrel_offset */
834 
835   /* These are declared as 13-bit signed relocations because we can
836      address -4095 .. 4095(base) by altering ADDW to SUBW or vice
837      versa.  */
838   HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
839 	 0,			/* rightshift */
840 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
841 	 13,			/* bitsize */
842 	 TRUE,			/* pc_relative */
843 	 0,			/* bitpos */
844 	 complain_overflow_dont,/* complain_on_overflow */
845 	 bfd_elf_generic_reloc,	/* special_function */
846 	 "R_ARM_THM_ALU_PREL_11_0",/* name */
847 	 FALSE,			/* partial_inplace */
848 	 0xffffffff,		/* src_mask */
849 	 0xffffffff,		/* dst_mask */
850 	 TRUE),			/* pcrel_offset */
851 
852   HOWTO (R_ARM_THM_PC12,	/* type */
853 	 0,			/* rightshift */
854 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
855 	 13,			/* bitsize */
856 	 TRUE,			/* pc_relative */
857 	 0,			/* bitpos */
858 	 complain_overflow_dont,/* complain_on_overflow */
859 	 bfd_elf_generic_reloc,	/* special_function */
860 	 "R_ARM_THM_PC12",	/* name */
861 	 FALSE,			/* partial_inplace */
862 	 0xffffffff,		/* src_mask */
863 	 0xffffffff,		/* dst_mask */
864 	 TRUE),			/* pcrel_offset */
865 
866   HOWTO (R_ARM_ABS32_NOI,	/* type */
867 	 0,			/* rightshift */
868 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
869 	 32,			/* bitsize */
870 	 FALSE,			/* pc_relative */
871 	 0,			/* bitpos */
872 	 complain_overflow_dont,/* complain_on_overflow */
873 	 bfd_elf_generic_reloc,	/* special_function */
874 	 "R_ARM_ABS32_NOI",	/* name */
875 	 FALSE,			/* partial_inplace */
876 	 0xffffffff,		/* src_mask */
877 	 0xffffffff,		/* dst_mask */
878 	 FALSE),		/* pcrel_offset */
879 
880   HOWTO (R_ARM_REL32_NOI,	/* type */
881 	 0,			/* rightshift */
882 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
883 	 32,			/* bitsize */
884 	 TRUE,			/* pc_relative */
885 	 0,			/* bitpos */
886 	 complain_overflow_dont,/* complain_on_overflow */
887 	 bfd_elf_generic_reloc,	/* special_function */
888 	 "R_ARM_REL32_NOI",	/* name */
889 	 FALSE,			/* partial_inplace */
890 	 0xffffffff,		/* src_mask */
891 	 0xffffffff,		/* dst_mask */
892 	 FALSE),		/* pcrel_offset */
893 
894   /* Group relocations.  */
895 
896   HOWTO (R_ARM_ALU_PC_G0_NC,	/* type */
897 	 0,			/* rightshift */
898 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
899 	 32,			/* bitsize */
900 	 TRUE,			/* pc_relative */
901 	 0,			/* bitpos */
902 	 complain_overflow_dont,/* complain_on_overflow */
903 	 bfd_elf_generic_reloc,	/* special_function */
904 	 "R_ARM_ALU_PC_G0_NC",	/* name */
905 	 FALSE,			/* partial_inplace */
906 	 0xffffffff,		/* src_mask */
907 	 0xffffffff,		/* dst_mask */
908 	 TRUE),			/* pcrel_offset */
909 
910   HOWTO (R_ARM_ALU_PC_G0,	/* type */
911 	 0,			/* rightshift */
912 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
913 	 32,			/* bitsize */
914 	 TRUE,			/* pc_relative */
915 	 0,			/* bitpos */
916 	 complain_overflow_dont,/* complain_on_overflow */
917 	 bfd_elf_generic_reloc,	/* special_function */
918 	 "R_ARM_ALU_PC_G0",	/* name */
919 	 FALSE,			/* partial_inplace */
920 	 0xffffffff,		/* src_mask */
921 	 0xffffffff,		/* dst_mask */
922 	 TRUE),			/* pcrel_offset */
923 
924   HOWTO (R_ARM_ALU_PC_G1_NC,	/* type */
925 	 0,			/* rightshift */
926 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
927 	 32,			/* bitsize */
928 	 TRUE,			/* pc_relative */
929 	 0,			/* bitpos */
930 	 complain_overflow_dont,/* complain_on_overflow */
931 	 bfd_elf_generic_reloc,	/* special_function */
932 	 "R_ARM_ALU_PC_G1_NC",	/* name */
933 	 FALSE,			/* partial_inplace */
934 	 0xffffffff,		/* src_mask */
935 	 0xffffffff,		/* dst_mask */
936 	 TRUE),			/* pcrel_offset */
937 
938   HOWTO (R_ARM_ALU_PC_G1,	/* type */
939 	 0,			/* rightshift */
940 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
941 	 32,			/* bitsize */
942 	 TRUE,			/* pc_relative */
943 	 0,			/* bitpos */
944 	 complain_overflow_dont,/* complain_on_overflow */
945 	 bfd_elf_generic_reloc,	/* special_function */
946 	 "R_ARM_ALU_PC_G1",	/* name */
947 	 FALSE,			/* partial_inplace */
948 	 0xffffffff,		/* src_mask */
949 	 0xffffffff,		/* dst_mask */
950 	 TRUE),			/* pcrel_offset */
951 
952   HOWTO (R_ARM_ALU_PC_G2,	/* type */
953 	 0,			/* rightshift */
954 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
955 	 32,			/* bitsize */
956 	 TRUE,			/* pc_relative */
957 	 0,			/* bitpos */
958 	 complain_overflow_dont,/* complain_on_overflow */
959 	 bfd_elf_generic_reloc,	/* special_function */
960 	 "R_ARM_ALU_PC_G2",	/* name */
961 	 FALSE,			/* partial_inplace */
962 	 0xffffffff,		/* src_mask */
963 	 0xffffffff,		/* dst_mask */
964 	 TRUE),			/* pcrel_offset */
965 
966   HOWTO (R_ARM_LDR_PC_G1,	/* type */
967 	 0,			/* rightshift */
968 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
969 	 32,			/* bitsize */
970 	 TRUE,			/* pc_relative */
971 	 0,			/* bitpos */
972 	 complain_overflow_dont,/* complain_on_overflow */
973 	 bfd_elf_generic_reloc,	/* special_function */
974 	 "R_ARM_LDR_PC_G1",	/* name */
975 	 FALSE,			/* partial_inplace */
976 	 0xffffffff,		/* src_mask */
977 	 0xffffffff,		/* dst_mask */
978 	 TRUE),			/* pcrel_offset */
979 
980   HOWTO (R_ARM_LDR_PC_G2,	/* type */
981 	 0,			/* rightshift */
982 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
983 	 32,			/* bitsize */
984 	 TRUE,			/* pc_relative */
985 	 0,			/* bitpos */
986 	 complain_overflow_dont,/* complain_on_overflow */
987 	 bfd_elf_generic_reloc,	/* special_function */
988 	 "R_ARM_LDR_PC_G2",	/* name */
989 	 FALSE,			/* partial_inplace */
990 	 0xffffffff,		/* src_mask */
991 	 0xffffffff,		/* dst_mask */
992 	 TRUE),			/* pcrel_offset */
993 
994   HOWTO (R_ARM_LDRS_PC_G0,	/* type */
995 	 0,			/* rightshift */
996 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
997 	 32,			/* bitsize */
998 	 TRUE,			/* pc_relative */
999 	 0,			/* bitpos */
1000 	 complain_overflow_dont,/* complain_on_overflow */
1001 	 bfd_elf_generic_reloc,	/* special_function */
1002 	 "R_ARM_LDRS_PC_G0",	/* name */
1003 	 FALSE,			/* partial_inplace */
1004 	 0xffffffff,		/* src_mask */
1005 	 0xffffffff,		/* dst_mask */
1006 	 TRUE),			/* pcrel_offset */
1007 
1008   HOWTO (R_ARM_LDRS_PC_G1,	/* type */
1009 	 0,			/* rightshift */
1010 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1011 	 32,			/* bitsize */
1012 	 TRUE,			/* pc_relative */
1013 	 0,			/* bitpos */
1014 	 complain_overflow_dont,/* complain_on_overflow */
1015 	 bfd_elf_generic_reloc,	/* special_function */
1016 	 "R_ARM_LDRS_PC_G1",	/* name */
1017 	 FALSE,			/* partial_inplace */
1018 	 0xffffffff,		/* src_mask */
1019 	 0xffffffff,		/* dst_mask */
1020 	 TRUE),			/* pcrel_offset */
1021 
1022   HOWTO (R_ARM_LDRS_PC_G2,	/* type */
1023 	 0,			/* rightshift */
1024 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1025 	 32,			/* bitsize */
1026 	 TRUE,			/* pc_relative */
1027 	 0,			/* bitpos */
1028 	 complain_overflow_dont,/* complain_on_overflow */
1029 	 bfd_elf_generic_reloc,	/* special_function */
1030 	 "R_ARM_LDRS_PC_G2",	/* name */
1031 	 FALSE,			/* partial_inplace */
1032 	 0xffffffff,		/* src_mask */
1033 	 0xffffffff,		/* dst_mask */
1034 	 TRUE),			/* pcrel_offset */
1035 
1036   HOWTO (R_ARM_LDC_PC_G0,	/* type */
1037 	 0,			/* rightshift */
1038 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1039 	 32,			/* bitsize */
1040 	 TRUE,			/* pc_relative */
1041 	 0,			/* bitpos */
1042 	 complain_overflow_dont,/* complain_on_overflow */
1043 	 bfd_elf_generic_reloc,	/* special_function */
1044 	 "R_ARM_LDC_PC_G0",	/* name */
1045 	 FALSE,			/* partial_inplace */
1046 	 0xffffffff,		/* src_mask */
1047 	 0xffffffff,		/* dst_mask */
1048 	 TRUE),			/* pcrel_offset */
1049 
1050   HOWTO (R_ARM_LDC_PC_G1,	/* type */
1051 	 0,			/* rightshift */
1052 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1053 	 32,			/* bitsize */
1054 	 TRUE,			/* pc_relative */
1055 	 0,			/* bitpos */
1056 	 complain_overflow_dont,/* complain_on_overflow */
1057 	 bfd_elf_generic_reloc,	/* special_function */
1058 	 "R_ARM_LDC_PC_G1",	/* name */
1059 	 FALSE,			/* partial_inplace */
1060 	 0xffffffff,		/* src_mask */
1061 	 0xffffffff,		/* dst_mask */
1062 	 TRUE),			/* pcrel_offset */
1063 
1064   HOWTO (R_ARM_LDC_PC_G2,	/* type */
1065 	 0,			/* rightshift */
1066 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1067 	 32,			/* bitsize */
1068 	 TRUE,			/* pc_relative */
1069 	 0,			/* bitpos */
1070 	 complain_overflow_dont,/* complain_on_overflow */
1071 	 bfd_elf_generic_reloc,	/* special_function */
1072 	 "R_ARM_LDC_PC_G2",	/* name */
1073 	 FALSE,			/* partial_inplace */
1074 	 0xffffffff,		/* src_mask */
1075 	 0xffffffff,		/* dst_mask */
1076 	 TRUE),			/* pcrel_offset */
1077 
1078   HOWTO (R_ARM_ALU_SB_G0_NC,	/* type */
1079 	 0,			/* rightshift */
1080 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1081 	 32,			/* bitsize */
1082 	 TRUE,			/* pc_relative */
1083 	 0,			/* bitpos */
1084 	 complain_overflow_dont,/* complain_on_overflow */
1085 	 bfd_elf_generic_reloc,	/* special_function */
1086 	 "R_ARM_ALU_SB_G0_NC",	/* name */
1087 	 FALSE,			/* partial_inplace */
1088 	 0xffffffff,		/* src_mask */
1089 	 0xffffffff,		/* dst_mask */
1090 	 TRUE),			/* pcrel_offset */
1091 
1092   HOWTO (R_ARM_ALU_SB_G0,	/* type */
1093 	 0,			/* rightshift */
1094 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1095 	 32,			/* bitsize */
1096 	 TRUE,			/* pc_relative */
1097 	 0,			/* bitpos */
1098 	 complain_overflow_dont,/* complain_on_overflow */
1099 	 bfd_elf_generic_reloc,	/* special_function */
1100 	 "R_ARM_ALU_SB_G0",	/* name */
1101 	 FALSE,			/* partial_inplace */
1102 	 0xffffffff,		/* src_mask */
1103 	 0xffffffff,		/* dst_mask */
1104 	 TRUE),			/* pcrel_offset */
1105 
1106   HOWTO (R_ARM_ALU_SB_G1_NC,	/* type */
1107 	 0,			/* rightshift */
1108 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1109 	 32,			/* bitsize */
1110 	 TRUE,			/* pc_relative */
1111 	 0,			/* bitpos */
1112 	 complain_overflow_dont,/* complain_on_overflow */
1113 	 bfd_elf_generic_reloc,	/* special_function */
1114 	 "R_ARM_ALU_SB_G1_NC",	/* name */
1115 	 FALSE,			/* partial_inplace */
1116 	 0xffffffff,		/* src_mask */
1117 	 0xffffffff,		/* dst_mask */
1118 	 TRUE),			/* pcrel_offset */
1119 
1120   HOWTO (R_ARM_ALU_SB_G1,	/* type */
1121 	 0,			/* rightshift */
1122 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1123 	 32,			/* bitsize */
1124 	 TRUE,			/* pc_relative */
1125 	 0,			/* bitpos */
1126 	 complain_overflow_dont,/* complain_on_overflow */
1127 	 bfd_elf_generic_reloc,	/* special_function */
1128 	 "R_ARM_ALU_SB_G1",	/* name */
1129 	 FALSE,			/* partial_inplace */
1130 	 0xffffffff,		/* src_mask */
1131 	 0xffffffff,		/* dst_mask */
1132 	 TRUE),			/* pcrel_offset */
1133 
1134   HOWTO (R_ARM_ALU_SB_G2,	/* type */
1135 	 0,			/* rightshift */
1136 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1137 	 32,			/* bitsize */
1138 	 TRUE,			/* pc_relative */
1139 	 0,			/* bitpos */
1140 	 complain_overflow_dont,/* complain_on_overflow */
1141 	 bfd_elf_generic_reloc,	/* special_function */
1142 	 "R_ARM_ALU_SB_G2",	/* name */
1143 	 FALSE,			/* partial_inplace */
1144 	 0xffffffff,		/* src_mask */
1145 	 0xffffffff,		/* dst_mask */
1146 	 TRUE),			/* pcrel_offset */
1147 
1148   HOWTO (R_ARM_LDR_SB_G0,	/* type */
1149 	 0,			/* rightshift */
1150 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1151 	 32,			/* bitsize */
1152 	 TRUE,			/* pc_relative */
1153 	 0,			/* bitpos */
1154 	 complain_overflow_dont,/* complain_on_overflow */
1155 	 bfd_elf_generic_reloc,	/* special_function */
1156 	 "R_ARM_LDR_SB_G0",	/* name */
1157 	 FALSE,			/* partial_inplace */
1158 	 0xffffffff,		/* src_mask */
1159 	 0xffffffff,		/* dst_mask */
1160 	 TRUE),			/* pcrel_offset */
1161 
1162   HOWTO (R_ARM_LDR_SB_G1,	/* type */
1163 	 0,			/* rightshift */
1164 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1165 	 32,			/* bitsize */
1166 	 TRUE,			/* pc_relative */
1167 	 0,			/* bitpos */
1168 	 complain_overflow_dont,/* complain_on_overflow */
1169 	 bfd_elf_generic_reloc,	/* special_function */
1170 	 "R_ARM_LDR_SB_G1",	/* name */
1171 	 FALSE,			/* partial_inplace */
1172 	 0xffffffff,		/* src_mask */
1173 	 0xffffffff,		/* dst_mask */
1174 	 TRUE),			/* pcrel_offset */
1175 
1176   HOWTO (R_ARM_LDR_SB_G2,	/* type */
1177 	 0,			/* rightshift */
1178 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1179 	 32,			/* bitsize */
1180 	 TRUE,			/* pc_relative */
1181 	 0,			/* bitpos */
1182 	 complain_overflow_dont,/* complain_on_overflow */
1183 	 bfd_elf_generic_reloc,	/* special_function */
1184 	 "R_ARM_LDR_SB_G2",	/* name */
1185 	 FALSE,			/* partial_inplace */
1186 	 0xffffffff,		/* src_mask */
1187 	 0xffffffff,		/* dst_mask */
1188 	 TRUE),			/* pcrel_offset */
1189 
1190   HOWTO (R_ARM_LDRS_SB_G0,	/* type */
1191 	 0,			/* rightshift */
1192 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1193 	 32,			/* bitsize */
1194 	 TRUE,			/* pc_relative */
1195 	 0,			/* bitpos */
1196 	 complain_overflow_dont,/* complain_on_overflow */
1197 	 bfd_elf_generic_reloc,	/* special_function */
1198 	 "R_ARM_LDRS_SB_G0",	/* name */
1199 	 FALSE,			/* partial_inplace */
1200 	 0xffffffff,		/* src_mask */
1201 	 0xffffffff,		/* dst_mask */
1202 	 TRUE),			/* pcrel_offset */
1203 
1204   HOWTO (R_ARM_LDRS_SB_G1,	/* type */
1205 	 0,			/* rightshift */
1206 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1207 	 32,			/* bitsize */
1208 	 TRUE,			/* pc_relative */
1209 	 0,			/* bitpos */
1210 	 complain_overflow_dont,/* complain_on_overflow */
1211 	 bfd_elf_generic_reloc,	/* special_function */
1212 	 "R_ARM_LDRS_SB_G1",	/* name */
1213 	 FALSE,			/* partial_inplace */
1214 	 0xffffffff,		/* src_mask */
1215 	 0xffffffff,		/* dst_mask */
1216 	 TRUE),			/* pcrel_offset */
1217 
1218   HOWTO (R_ARM_LDRS_SB_G2,	/* type */
1219 	 0,			/* rightshift */
1220 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1221 	 32,			/* bitsize */
1222 	 TRUE,			/* pc_relative */
1223 	 0,			/* bitpos */
1224 	 complain_overflow_dont,/* complain_on_overflow */
1225 	 bfd_elf_generic_reloc,	/* special_function */
1226 	 "R_ARM_LDRS_SB_G2",	/* name */
1227 	 FALSE,			/* partial_inplace */
1228 	 0xffffffff,		/* src_mask */
1229 	 0xffffffff,		/* dst_mask */
1230 	 TRUE),			/* pcrel_offset */
1231 
1232   HOWTO (R_ARM_LDC_SB_G0,	/* type */
1233 	 0,			/* rightshift */
1234 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1235 	 32,			/* bitsize */
1236 	 TRUE,			/* pc_relative */
1237 	 0,			/* bitpos */
1238 	 complain_overflow_dont,/* complain_on_overflow */
1239 	 bfd_elf_generic_reloc,	/* special_function */
1240 	 "R_ARM_LDC_SB_G0",	/* name */
1241 	 FALSE,			/* partial_inplace */
1242 	 0xffffffff,		/* src_mask */
1243 	 0xffffffff,		/* dst_mask */
1244 	 TRUE),			/* pcrel_offset */
1245 
1246   HOWTO (R_ARM_LDC_SB_G1,	/* type */
1247 	 0,			/* rightshift */
1248 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1249 	 32,			/* bitsize */
1250 	 TRUE,			/* pc_relative */
1251 	 0,			/* bitpos */
1252 	 complain_overflow_dont,/* complain_on_overflow */
1253 	 bfd_elf_generic_reloc,	/* special_function */
1254 	 "R_ARM_LDC_SB_G1",	/* name */
1255 	 FALSE,			/* partial_inplace */
1256 	 0xffffffff,		/* src_mask */
1257 	 0xffffffff,		/* dst_mask */
1258 	 TRUE),			/* pcrel_offset */
1259 
1260   HOWTO (R_ARM_LDC_SB_G2,	/* type */
1261 	 0,			/* rightshift */
1262 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1263 	 32,			/* bitsize */
1264 	 TRUE,			/* pc_relative */
1265 	 0,			/* bitpos */
1266 	 complain_overflow_dont,/* complain_on_overflow */
1267 	 bfd_elf_generic_reloc,	/* special_function */
1268 	 "R_ARM_LDC_SB_G2",	/* name */
1269 	 FALSE,			/* partial_inplace */
1270 	 0xffffffff,		/* src_mask */
1271 	 0xffffffff,		/* dst_mask */
1272 	 TRUE),			/* pcrel_offset */
1273 
1274   /* End of group relocations.  */
1275 
1276   HOWTO (R_ARM_MOVW_BREL_NC,	/* type */
1277 	 0,			/* rightshift */
1278 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1279 	 16,			/* bitsize */
1280 	 FALSE,			/* pc_relative */
1281 	 0,			/* bitpos */
1282 	 complain_overflow_dont,/* complain_on_overflow */
1283 	 bfd_elf_generic_reloc,	/* special_function */
1284 	 "R_ARM_MOVW_BREL_NC",	/* name */
1285 	 FALSE,			/* partial_inplace */
1286 	 0x0000ffff,		/* src_mask */
1287 	 0x0000ffff,		/* dst_mask */
1288 	 FALSE),		/* pcrel_offset */
1289 
1290   HOWTO (R_ARM_MOVT_BREL,	/* type */
1291 	 0,			/* rightshift */
1292 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1293 	 16,			/* bitsize */
1294 	 FALSE,			/* pc_relative */
1295 	 0,			/* bitpos */
1296 	 complain_overflow_bitfield,/* complain_on_overflow */
1297 	 bfd_elf_generic_reloc,	/* special_function */
1298 	 "R_ARM_MOVT_BREL",	/* name */
1299 	 FALSE,			/* partial_inplace */
1300 	 0x0000ffff,		/* src_mask */
1301 	 0x0000ffff,		/* dst_mask */
1302 	 FALSE),		/* pcrel_offset */
1303 
1304   HOWTO (R_ARM_MOVW_BREL,	/* type */
1305 	 0,			/* rightshift */
1306 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1307 	 16,			/* bitsize */
1308 	 FALSE,			/* pc_relative */
1309 	 0,			/* bitpos */
1310 	 complain_overflow_dont,/* complain_on_overflow */
1311 	 bfd_elf_generic_reloc,	/* special_function */
1312 	 "R_ARM_MOVW_BREL",	/* name */
1313 	 FALSE,			/* partial_inplace */
1314 	 0x0000ffff,		/* src_mask */
1315 	 0x0000ffff,		/* dst_mask */
1316 	 FALSE),		/* pcrel_offset */
1317 
1318   HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1319 	 0,			/* rightshift */
1320 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1321 	 16,			/* bitsize */
1322 	 FALSE,			/* pc_relative */
1323 	 0,			/* bitpos */
1324 	 complain_overflow_dont,/* complain_on_overflow */
1325 	 bfd_elf_generic_reloc,	/* special_function */
1326 	 "R_ARM_THM_MOVW_BREL_NC",/* name */
1327 	 FALSE,			/* partial_inplace */
1328 	 0x040f70ff,		/* src_mask */
1329 	 0x040f70ff,		/* dst_mask */
1330 	 FALSE),		/* pcrel_offset */
1331 
1332   HOWTO (R_ARM_THM_MOVT_BREL,	/* type */
1333 	 0,			/* rightshift */
1334 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1335 	 16,			/* bitsize */
1336 	 FALSE,			/* pc_relative */
1337 	 0,			/* bitpos */
1338 	 complain_overflow_bitfield,/* complain_on_overflow */
1339 	 bfd_elf_generic_reloc,	/* special_function */
1340 	 "R_ARM_THM_MOVT_BREL",	/* name */
1341 	 FALSE,			/* partial_inplace */
1342 	 0x040f70ff,		/* src_mask */
1343 	 0x040f70ff,		/* dst_mask */
1344 	 FALSE),		/* pcrel_offset */
1345 
1346   HOWTO (R_ARM_THM_MOVW_BREL,	/* type */
1347 	 0,			/* rightshift */
1348 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1349 	 16,			/* bitsize */
1350 	 FALSE,			/* pc_relative */
1351 	 0,			/* bitpos */
1352 	 complain_overflow_dont,/* complain_on_overflow */
1353 	 bfd_elf_generic_reloc,	/* special_function */
1354 	 "R_ARM_THM_MOVW_BREL",	/* name */
1355 	 FALSE,			/* partial_inplace */
1356 	 0x040f70ff,		/* src_mask */
1357 	 0x040f70ff,		/* dst_mask */
1358 	 FALSE),		/* pcrel_offset */
1359 
1360   HOWTO (R_ARM_TLS_GOTDESC,	/* type */
1361 	 0,			/* rightshift */
1362 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1363 	 32,			/* bitsize */
1364 	 FALSE,			/* pc_relative */
1365 	 0,			/* bitpos */
1366 	 complain_overflow_bitfield,/* complain_on_overflow */
1367 	 NULL,			/* special_function */
1368 	 "R_ARM_TLS_GOTDESC",	/* name */
1369 	 TRUE,			/* partial_inplace */
1370 	 0xffffffff,		/* src_mask */
1371 	 0xffffffff,		/* dst_mask */
1372 	 FALSE),		/* pcrel_offset */
1373 
1374   HOWTO (R_ARM_TLS_CALL,	/* type */
1375 	 0,			/* rightshift */
1376 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1377 	 24,			/* bitsize */
1378 	 FALSE,			/* pc_relative */
1379 	 0,			/* bitpos */
1380 	 complain_overflow_dont,/* complain_on_overflow */
1381 	 bfd_elf_generic_reloc,	/* special_function */
1382 	 "R_ARM_TLS_CALL",	/* name */
1383 	 FALSE,			/* partial_inplace */
1384 	 0x00ffffff,		/* src_mask */
1385 	 0x00ffffff,		/* dst_mask */
1386 	 FALSE),		/* pcrel_offset */
1387 
1388   HOWTO (R_ARM_TLS_DESCSEQ,	/* type */
1389 	 0,			/* rightshift */
1390 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1391 	 0,			/* bitsize */
1392 	 FALSE,			/* pc_relative */
1393 	 0,			/* bitpos */
1394 	 complain_overflow_bitfield,/* complain_on_overflow */
1395 	 bfd_elf_generic_reloc,	/* special_function */
1396 	 "R_ARM_TLS_DESCSEQ",	/* name */
1397 	 FALSE,			/* partial_inplace */
1398 	 0x00000000,		/* src_mask */
1399 	 0x00000000,		/* dst_mask */
1400 	 FALSE),		/* pcrel_offset */
1401 
1402   HOWTO (R_ARM_THM_TLS_CALL,	/* type */
1403 	 0,			/* rightshift */
1404 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1405 	 24,			/* bitsize */
1406 	 FALSE,			/* pc_relative */
1407 	 0,			/* bitpos */
1408 	 complain_overflow_dont,/* complain_on_overflow */
1409 	 bfd_elf_generic_reloc,	/* special_function */
1410 	 "R_ARM_THM_TLS_CALL",	/* name */
1411 	 FALSE,			/* partial_inplace */
1412 	 0x07ff07ff,		/* src_mask */
1413 	 0x07ff07ff,		/* dst_mask */
1414 	 FALSE),		/* pcrel_offset */
1415 
1416   HOWTO (R_ARM_PLT32_ABS,	/* type */
1417 	 0,			/* rightshift */
1418 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1419 	 32,			/* bitsize */
1420 	 FALSE,			/* pc_relative */
1421 	 0,			/* bitpos */
1422 	 complain_overflow_dont,/* complain_on_overflow */
1423 	 bfd_elf_generic_reloc,	/* special_function */
1424 	 "R_ARM_PLT32_ABS",	/* name */
1425 	 FALSE,			/* partial_inplace */
1426 	 0xffffffff,		/* src_mask */
1427 	 0xffffffff,		/* dst_mask */
1428 	 FALSE),		/* pcrel_offset */
1429 
1430   HOWTO (R_ARM_GOT_ABS,		/* type */
1431 	 0,			/* rightshift */
1432 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1433 	 32,			/* bitsize */
1434 	 FALSE,			/* pc_relative */
1435 	 0,			/* bitpos */
1436 	 complain_overflow_dont,/* complain_on_overflow */
1437 	 bfd_elf_generic_reloc,	/* special_function */
1438 	 "R_ARM_GOT_ABS",	/* name */
1439 	 FALSE,			/* partial_inplace */
1440 	 0xffffffff,		/* src_mask */
1441 	 0xffffffff,		/* dst_mask */
1442 	 FALSE),			/* pcrel_offset */
1443 
1444   HOWTO (R_ARM_GOT_PREL,	/* type */
1445 	 0,			/* rightshift */
1446 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1447 	 32,			/* bitsize */
1448 	 TRUE,			/* pc_relative */
1449 	 0,			/* bitpos */
1450 	 complain_overflow_dont,	/* complain_on_overflow */
1451 	 bfd_elf_generic_reloc,	/* special_function */
1452 	 "R_ARM_GOT_PREL",	/* name */
1453 	 FALSE,			/* partial_inplace */
1454 	 0xffffffff,		/* src_mask */
1455 	 0xffffffff,		/* dst_mask */
1456 	 TRUE),			/* pcrel_offset */
1457 
1458   HOWTO (R_ARM_GOT_BREL12,	/* type */
1459 	 0,			/* rightshift */
1460 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1461 	 12,			/* bitsize */
1462 	 FALSE,			/* pc_relative */
1463 	 0,			/* bitpos */
1464 	 complain_overflow_bitfield,/* complain_on_overflow */
1465 	 bfd_elf_generic_reloc,	/* special_function */
1466 	 "R_ARM_GOT_BREL12",	/* name */
1467 	 FALSE,			/* partial_inplace */
1468 	 0x00000fff,		/* src_mask */
1469 	 0x00000fff,		/* dst_mask */
1470 	 FALSE),		/* pcrel_offset */
1471 
1472   HOWTO (R_ARM_GOTOFF12,	/* type */
1473 	 0,			/* rightshift */
1474 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1475 	 12,			/* bitsize */
1476 	 FALSE,			/* pc_relative */
1477 	 0,			/* bitpos */
1478 	 complain_overflow_bitfield,/* complain_on_overflow */
1479 	 bfd_elf_generic_reloc,	/* special_function */
1480 	 "R_ARM_GOTOFF12",	/* name */
1481 	 FALSE,			/* partial_inplace */
1482 	 0x00000fff,		/* src_mask */
1483 	 0x00000fff,		/* dst_mask */
1484 	 FALSE),		/* pcrel_offset */
1485 
1486   EMPTY_HOWTO (R_ARM_GOTRELAX),	 /* reserved for future GOT-load optimizations */
1487 
1488   /* GNU extension to record C++ vtable member usage */
1489   HOWTO (R_ARM_GNU_VTENTRY,	/* type */
1490 	 0,			/* rightshift */
1491 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1492 	 0,			/* bitsize */
1493 	 FALSE,			/* pc_relative */
1494 	 0,			/* bitpos */
1495 	 complain_overflow_dont, /* complain_on_overflow */
1496 	 _bfd_elf_rel_vtable_reloc_fn,	/* special_function */
1497 	 "R_ARM_GNU_VTENTRY",	/* name */
1498 	 FALSE,			/* partial_inplace */
1499 	 0,			/* src_mask */
1500 	 0,			/* dst_mask */
1501 	 FALSE),		/* pcrel_offset */
1502 
1503   /* GNU extension to record C++ vtable hierarchy */
1504   HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1505 	 0,			/* rightshift */
1506 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1507 	 0,			/* bitsize */
1508 	 FALSE,			/* pc_relative */
1509 	 0,			/* bitpos */
1510 	 complain_overflow_dont, /* complain_on_overflow */
1511 	 NULL,			/* special_function */
1512 	 "R_ARM_GNU_VTINHERIT", /* name */
1513 	 FALSE,			/* partial_inplace */
1514 	 0,			/* src_mask */
1515 	 0,			/* dst_mask */
1516 	 FALSE),		/* pcrel_offset */
1517 
1518   HOWTO (R_ARM_THM_JUMP11,	/* type */
1519 	 1,			/* rightshift */
1520 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1521 	 11,			/* bitsize */
1522 	 TRUE,			/* pc_relative */
1523 	 0,			/* bitpos */
1524 	 complain_overflow_signed,	/* complain_on_overflow */
1525 	 bfd_elf_generic_reloc,	/* special_function */
1526 	 "R_ARM_THM_JUMP11",	/* name */
1527 	 FALSE,			/* partial_inplace */
1528 	 0x000007ff,		/* src_mask */
1529 	 0x000007ff,		/* dst_mask */
1530 	 TRUE),			/* pcrel_offset */
1531 
1532   HOWTO (R_ARM_THM_JUMP8,	/* type */
1533 	 1,			/* rightshift */
1534 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1535 	 8,			/* bitsize */
1536 	 TRUE,			/* pc_relative */
1537 	 0,			/* bitpos */
1538 	 complain_overflow_signed,	/* complain_on_overflow */
1539 	 bfd_elf_generic_reloc,	/* special_function */
1540 	 "R_ARM_THM_JUMP8",	/* name */
1541 	 FALSE,			/* partial_inplace */
1542 	 0x000000ff,		/* src_mask */
1543 	 0x000000ff,		/* dst_mask */
1544 	 TRUE),			/* pcrel_offset */
1545 
1546   /* TLS relocations */
1547   HOWTO (R_ARM_TLS_GD32,	/* type */
1548 	 0,			/* rightshift */
1549 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1550 	 32,			/* bitsize */
1551 	 FALSE,			/* pc_relative */
1552 	 0,			/* bitpos */
1553 	 complain_overflow_bitfield,/* complain_on_overflow */
1554 	 NULL,			/* special_function */
1555 	 "R_ARM_TLS_GD32",	/* name */
1556 	 TRUE,			/* partial_inplace */
1557 	 0xffffffff,		/* src_mask */
1558 	 0xffffffff,		/* dst_mask */
1559 	 FALSE),		/* pcrel_offset */
1560 
1561   HOWTO (R_ARM_TLS_LDM32,	/* type */
1562 	 0,			/* rightshift */
1563 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1564 	 32,			/* bitsize */
1565 	 FALSE,			/* pc_relative */
1566 	 0,			/* bitpos */
1567 	 complain_overflow_bitfield,/* complain_on_overflow */
1568 	 bfd_elf_generic_reloc, /* special_function */
1569 	 "R_ARM_TLS_LDM32",	/* name */
1570 	 TRUE,			/* partial_inplace */
1571 	 0xffffffff,		/* src_mask */
1572 	 0xffffffff,		/* dst_mask */
1573 	 FALSE),		/* pcrel_offset */
1574 
1575   HOWTO (R_ARM_TLS_LDO32,	/* type */
1576 	 0,			/* rightshift */
1577 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1578 	 32,			/* bitsize */
1579 	 FALSE,			/* pc_relative */
1580 	 0,			/* bitpos */
1581 	 complain_overflow_bitfield,/* complain_on_overflow */
1582 	 bfd_elf_generic_reloc, /* special_function */
1583 	 "R_ARM_TLS_LDO32",	/* name */
1584 	 TRUE,			/* partial_inplace */
1585 	 0xffffffff,		/* src_mask */
1586 	 0xffffffff,		/* dst_mask */
1587 	 FALSE),		/* pcrel_offset */
1588 
1589   HOWTO (R_ARM_TLS_IE32,	/* type */
1590 	 0,			/* rightshift */
1591 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1592 	 32,			/* bitsize */
1593 	 FALSE,			 /* pc_relative */
1594 	 0,			/* bitpos */
1595 	 complain_overflow_bitfield,/* complain_on_overflow */
1596 	 NULL,			/* special_function */
1597 	 "R_ARM_TLS_IE32",	/* name */
1598 	 TRUE,			/* partial_inplace */
1599 	 0xffffffff,		/* src_mask */
1600 	 0xffffffff,		/* dst_mask */
1601 	 FALSE),		/* pcrel_offset */
1602 
1603   HOWTO (R_ARM_TLS_LE32,	/* type */
1604 	 0,			/* rightshift */
1605 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1606 	 32,			/* bitsize */
1607 	 FALSE,			/* pc_relative */
1608 	 0,			/* bitpos */
1609 	 complain_overflow_bitfield,/* complain_on_overflow */
1610 	 NULL,			/* special_function */
1611 	 "R_ARM_TLS_LE32",	/* name */
1612 	 TRUE,			/* partial_inplace */
1613 	 0xffffffff,		/* src_mask */
1614 	 0xffffffff,		/* dst_mask */
1615 	 FALSE),		/* pcrel_offset */
1616 
1617   HOWTO (R_ARM_TLS_LDO12,	/* type */
1618 	 0,			/* rightshift */
1619 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1620 	 12,			/* bitsize */
1621 	 FALSE,			/* pc_relative */
1622 	 0,			/* bitpos */
1623 	 complain_overflow_bitfield,/* complain_on_overflow */
1624 	 bfd_elf_generic_reloc,	/* special_function */
1625 	 "R_ARM_TLS_LDO12",	/* name */
1626 	 FALSE,			/* partial_inplace */
1627 	 0x00000fff,		/* src_mask */
1628 	 0x00000fff,		/* dst_mask */
1629 	 FALSE),		/* pcrel_offset */
1630 
1631   HOWTO (R_ARM_TLS_LE12,	/* type */
1632 	 0,			/* rightshift */
1633 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1634 	 12,			/* bitsize */
1635 	 FALSE,			/* pc_relative */
1636 	 0,			/* bitpos */
1637 	 complain_overflow_bitfield,/* complain_on_overflow */
1638 	 bfd_elf_generic_reloc,	/* special_function */
1639 	 "R_ARM_TLS_LE12",	/* name */
1640 	 FALSE,			/* partial_inplace */
1641 	 0x00000fff,		/* src_mask */
1642 	 0x00000fff,		/* dst_mask */
1643 	 FALSE),		/* pcrel_offset */
1644 
1645   HOWTO (R_ARM_TLS_IE12GP,	/* type */
1646 	 0,			/* rightshift */
1647 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1648 	 12,			/* bitsize */
1649 	 FALSE,			/* pc_relative */
1650 	 0,			/* bitpos */
1651 	 complain_overflow_bitfield,/* complain_on_overflow */
1652 	 bfd_elf_generic_reloc,	/* special_function */
1653 	 "R_ARM_TLS_IE12GP",	/* name */
1654 	 FALSE,			/* partial_inplace */
1655 	 0x00000fff,		/* src_mask */
1656 	 0x00000fff,		/* dst_mask */
1657 	 FALSE),		/* pcrel_offset */
1658 
1659   /* 112-127 private relocations.  */
1660   EMPTY_HOWTO (112),
1661   EMPTY_HOWTO (113),
1662   EMPTY_HOWTO (114),
1663   EMPTY_HOWTO (115),
1664   EMPTY_HOWTO (116),
1665   EMPTY_HOWTO (117),
1666   EMPTY_HOWTO (118),
1667   EMPTY_HOWTO (119),
1668   EMPTY_HOWTO (120),
1669   EMPTY_HOWTO (121),
1670   EMPTY_HOWTO (122),
1671   EMPTY_HOWTO (123),
1672   EMPTY_HOWTO (124),
1673   EMPTY_HOWTO (125),
1674   EMPTY_HOWTO (126),
1675   EMPTY_HOWTO (127),
1676 
1677   /* R_ARM_ME_TOO, obsolete.  */
1678   EMPTY_HOWTO (128),
1679 
1680   HOWTO (R_ARM_THM_TLS_DESCSEQ,	/* type */
1681 	 0,			/* rightshift */
1682 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1683 	 0,			/* bitsize */
1684 	 FALSE,			/* pc_relative */
1685 	 0,			/* bitpos */
1686 	 complain_overflow_bitfield,/* complain_on_overflow */
1687 	 bfd_elf_generic_reloc,	/* special_function */
1688 	 "R_ARM_THM_TLS_DESCSEQ",/* name */
1689 	 FALSE,			/* partial_inplace */
1690 	 0x00000000,		/* src_mask */
1691 	 0x00000000,		/* dst_mask */
1692 	 FALSE),		/* pcrel_offset */
1693   EMPTY_HOWTO (130),
1694   EMPTY_HOWTO (131),
1695   HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
1696 	 0,			/* rightshift.  */
1697 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1698 	 16,			/* bitsize.  */
1699 	 FALSE,			/* pc_relative.  */
1700 	 0,			/* bitpos.  */
1701 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1702 	 bfd_elf_generic_reloc,	/* special_function.  */
1703 	 "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
1704 	 FALSE,			/* partial_inplace.  */
1705 	 0x00000000,		/* src_mask.  */
1706 	 0x00000000,		/* dst_mask.  */
1707 	 FALSE),		/* pcrel_offset.  */
1708   HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
1709 	 0,			/* rightshift.  */
1710 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1711 	 16,			/* bitsize.  */
1712 	 FALSE,			/* pc_relative.  */
1713 	 0,			/* bitpos.  */
1714 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1715 	 bfd_elf_generic_reloc,	/* special_function.  */
1716 	 "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
1717 	 FALSE,			/* partial_inplace.  */
1718 	 0x00000000,		/* src_mask.  */
1719 	 0x00000000,		/* dst_mask.  */
1720 	 FALSE),		/* pcrel_offset.  */
1721   HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
1722 	 0,			/* rightshift.  */
1723 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1724 	 16,			/* bitsize.  */
1725 	 FALSE,			/* pc_relative.  */
1726 	 0,			/* bitpos.  */
1727 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1728 	 bfd_elf_generic_reloc,	/* special_function.  */
1729 	 "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
1730 	 FALSE,			/* partial_inplace.  */
1731 	 0x00000000,		/* src_mask.  */
1732 	 0x00000000,		/* dst_mask.  */
1733 	 FALSE),		/* pcrel_offset.  */
1734   HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
1735 	 0,			/* rightshift.  */
1736 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1737 	 16,			/* bitsize.  */
1738 	 FALSE,			/* pc_relative.  */
1739 	 0,			/* bitpos.  */
1740 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1741 	 bfd_elf_generic_reloc,	/* special_function.  */
1742 	 "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
1743 	 FALSE,			/* partial_inplace.  */
1744 	 0x00000000,		/* src_mask.  */
1745 	 0x00000000,		/* dst_mask.  */
1746 	 FALSE),		/* pcrel_offset.  */
1747   /* Relocations for Armv8.1-M Mainline.  */
1748   HOWTO (R_ARM_THM_BF16,	/* type.  */
1749 	 0,			/* rightshift.  */
1750 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1751 	 16,			/* bitsize.  */
1752 	 TRUE,			/* pc_relative.  */
1753 	 0,			/* bitpos.  */
1754 	 complain_overflow_dont,/* do not complain_on_overflow.  */
1755 	 bfd_elf_generic_reloc,	/* special_function.  */
1756 	 "R_ARM_THM_BF16",	/* name.  */
1757 	 FALSE,			/* partial_inplace.  */
1758 	 0x001f0ffe,		/* src_mask.  */
1759 	 0x001f0ffe,		/* dst_mask.  */
1760 	 TRUE),			/* pcrel_offset.  */
1761   HOWTO (R_ARM_THM_BF12,	/* type.  */
1762 	 0,			/* rightshift.  */
1763 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1764 	 12,			/* bitsize.  */
1765 	 TRUE,			/* pc_relative.  */
1766 	 0,			/* bitpos.  */
1767 	 complain_overflow_dont,/* do not complain_on_overflow.  */
1768 	 bfd_elf_generic_reloc,	/* special_function.  */
1769 	 "R_ARM_THM_BF12",	/* name.  */
1770 	 FALSE,			/* partial_inplace.  */
1771 	 0x00010ffe,		/* src_mask.  */
1772 	 0x00010ffe,		/* dst_mask.  */
1773 	 TRUE),			/* pcrel_offset.  */
1774   HOWTO (R_ARM_THM_BF18,	/* type.  */
1775 	 0,			/* rightshift.  */
1776 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1777 	 18,			/* bitsize.  */
1778 	 TRUE,			/* pc_relative.  */
1779 	 0,			/* bitpos.  */
1780 	 complain_overflow_dont,/* do not complain_on_overflow.  */
1781 	 bfd_elf_generic_reloc,	/* special_function.  */
1782 	 "R_ARM_THM_BF18",	/* name.  */
1783 	 FALSE,			/* partial_inplace.  */
1784 	 0x007f0ffe,		/* src_mask.  */
1785 	 0x007f0ffe,		/* dst_mask.  */
1786 	 TRUE),			/* pcrel_offset.  */
1787 };
1788 
1789 /* 160 onwards: */
1790 static reloc_howto_type elf32_arm_howto_table_2[8] =
1791 {
1792   HOWTO (R_ARM_IRELATIVE,	/* type */
1793 	 0,			/* rightshift */
1794 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1795 	 32,			/* bitsize */
1796 	 FALSE,			/* pc_relative */
1797 	 0,			/* bitpos */
1798 	 complain_overflow_bitfield,/* complain_on_overflow */
1799 	 bfd_elf_generic_reloc, /* special_function */
1800 	 "R_ARM_IRELATIVE",	/* name */
1801 	 TRUE,			/* partial_inplace */
1802 	 0xffffffff,		/* src_mask */
1803 	 0xffffffff,		/* dst_mask */
1804 	 FALSE),		/* pcrel_offset */
1805   HOWTO (R_ARM_GOTFUNCDESC,	/* type */
1806 	 0,			/* rightshift */
1807 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1808 	 32,			/* bitsize */
1809 	 FALSE,			/* pc_relative */
1810 	 0,			/* bitpos */
1811 	 complain_overflow_bitfield,/* complain_on_overflow */
1812 	 bfd_elf_generic_reloc,	/* special_function */
1813 	 "R_ARM_GOTFUNCDESC",	/* name */
1814 	 FALSE,			/* partial_inplace */
1815 	 0,			/* src_mask */
1816 	 0xffffffff,		/* dst_mask */
1817 	 FALSE),		/* pcrel_offset */
1818   HOWTO (R_ARM_GOTOFFFUNCDESC, /* type */
1819 	 0,			/* rightshift */
1820 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1821 	 32,			/* bitsize */
1822 	 FALSE,			/* pc_relative */
1823 	 0,			/* bitpos */
1824 	 complain_overflow_bitfield,/* complain_on_overflow */
1825 	 bfd_elf_generic_reloc,	/* special_function */
1826 	 "R_ARM_GOTOFFFUNCDESC",/* name */
1827 	 FALSE,			/* partial_inplace */
1828 	 0,			/* src_mask */
1829 	 0xffffffff,		/* dst_mask */
1830 	 FALSE),		/* pcrel_offset */
1831   HOWTO (R_ARM_FUNCDESC,	/* type */
1832 	 0,			/* rightshift */
1833 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1834 	 32,			/* bitsize */
1835 	 FALSE,			/* pc_relative */
1836 	 0,			/* bitpos */
1837 	 complain_overflow_bitfield,/* complain_on_overflow */
1838 	 bfd_elf_generic_reloc,	/* special_function */
1839 	 "R_ARM_FUNCDESC",	/* name */
1840 	 FALSE,			/* partial_inplace */
1841 	 0,			/* src_mask */
1842 	 0xffffffff,		/* dst_mask */
1843 	 FALSE),		/* pcrel_offset */
1844   HOWTO (R_ARM_FUNCDESC_VALUE,	/* type */
1845 	 0,			/* rightshift */
1846 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1847 	 64,			/* bitsize */
1848 	 FALSE,			/* pc_relative */
1849 	 0,			/* bitpos */
1850 	 complain_overflow_bitfield,/* complain_on_overflow */
1851 	 bfd_elf_generic_reloc,	/* special_function */
1852 	 "R_ARM_FUNCDESC_VALUE",/* name */
1853 	 FALSE,			/* partial_inplace */
1854 	 0,			/* src_mask */
1855 	 0xffffffff,		/* dst_mask */
1856 	 FALSE),		/* pcrel_offset */
1857   HOWTO (R_ARM_TLS_GD32_FDPIC,	/* type */
1858 	 0,			/* rightshift */
1859 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1860 	 32,			/* bitsize */
1861 	 FALSE,			/* pc_relative */
1862 	 0,			/* bitpos */
1863 	 complain_overflow_bitfield,/* complain_on_overflow */
1864 	 bfd_elf_generic_reloc,	/* special_function */
1865 	 "R_ARM_TLS_GD32_FDPIC",/* name */
1866 	 FALSE,			/* partial_inplace */
1867 	 0,			/* src_mask */
1868 	 0xffffffff,		/* dst_mask */
1869 	 FALSE),		/* pcrel_offset */
1870   HOWTO (R_ARM_TLS_LDM32_FDPIC,	/* type */
1871 	 0,			/* rightshift */
1872 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1873 	 32,			/* bitsize */
1874 	 FALSE,			/* pc_relative */
1875 	 0,			/* bitpos */
1876 	 complain_overflow_bitfield,/* complain_on_overflow */
1877 	 bfd_elf_generic_reloc,	/* special_function */
1878 	 "R_ARM_TLS_LDM32_FDPIC",/* name */
1879 	 FALSE,			/* partial_inplace */
1880 	 0,			/* src_mask */
1881 	 0xffffffff,		/* dst_mask */
1882 	 FALSE),		/* pcrel_offset */
1883   HOWTO (R_ARM_TLS_IE32_FDPIC,	/* type */
1884 	 0,			/* rightshift */
1885 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1886 	 32,			/* bitsize */
1887 	 FALSE,			/* pc_relative */
1888 	 0,			/* bitpos */
1889 	 complain_overflow_bitfield,/* complain_on_overflow */
1890 	 bfd_elf_generic_reloc,	/* special_function */
1891 	 "R_ARM_TLS_IE32_FDPIC",/* name */
1892 	 FALSE,			/* partial_inplace */
1893 	 0,			/* src_mask */
1894 	 0xffffffff,		/* dst_mask */
1895 	 FALSE),		/* pcrel_offset */
1896 };
1897 
1898 /* 249-255 extended, currently unused, relocations:  */
1899 static reloc_howto_type elf32_arm_howto_table_3[4] =
1900 {
1901   HOWTO (R_ARM_RREL32,		/* type */
1902 	 0,			/* rightshift */
1903 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1904 	 0,			/* bitsize */
1905 	 FALSE,			/* pc_relative */
1906 	 0,			/* bitpos */
1907 	 complain_overflow_dont,/* complain_on_overflow */
1908 	 bfd_elf_generic_reloc,	/* special_function */
1909 	 "R_ARM_RREL32",	/* name */
1910 	 FALSE,			/* partial_inplace */
1911 	 0,			/* src_mask */
1912 	 0,			/* dst_mask */
1913 	 FALSE),		/* pcrel_offset */
1914 
1915   HOWTO (R_ARM_RABS32,		/* type */
1916 	 0,			/* rightshift */
1917 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1918 	 0,			/* bitsize */
1919 	 FALSE,			/* pc_relative */
1920 	 0,			/* bitpos */
1921 	 complain_overflow_dont,/* complain_on_overflow */
1922 	 bfd_elf_generic_reloc,	/* special_function */
1923 	 "R_ARM_RABS32",	/* name */
1924 	 FALSE,			/* partial_inplace */
1925 	 0,			/* src_mask */
1926 	 0,			/* dst_mask */
1927 	 FALSE),		/* pcrel_offset */
1928 
1929   HOWTO (R_ARM_RPC24,		/* type */
1930 	 0,			/* rightshift */
1931 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1932 	 0,			/* bitsize */
1933 	 FALSE,			/* pc_relative */
1934 	 0,			/* bitpos */
1935 	 complain_overflow_dont,/* complain_on_overflow */
1936 	 bfd_elf_generic_reloc,	/* special_function */
1937 	 "R_ARM_RPC24",		/* name */
1938 	 FALSE,			/* partial_inplace */
1939 	 0,			/* src_mask */
1940 	 0,			/* dst_mask */
1941 	 FALSE),		/* pcrel_offset */
1942 
1943   HOWTO (R_ARM_RBASE,		/* type */
1944 	 0,			/* rightshift */
1945 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1946 	 0,			/* bitsize */
1947 	 FALSE,			/* pc_relative */
1948 	 0,			/* bitpos */
1949 	 complain_overflow_dont,/* complain_on_overflow */
1950 	 bfd_elf_generic_reloc,	/* special_function */
1951 	 "R_ARM_RBASE",		/* name */
1952 	 FALSE,			/* partial_inplace */
1953 	 0,			/* src_mask */
1954 	 0,			/* dst_mask */
1955 	 FALSE)			/* pcrel_offset */
1956 };
1957 
1958 static reloc_howto_type *
1959 elf32_arm_howto_from_type (unsigned int r_type)
1960 {
1961   if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1962     return &elf32_arm_howto_table_1[r_type];
1963 
1964   if (r_type >= R_ARM_IRELATIVE
1965       && r_type < R_ARM_IRELATIVE + ARRAY_SIZE (elf32_arm_howto_table_2))
1966     return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1967 
1968   if (r_type >= R_ARM_RREL32
1969       && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1970     return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1971 
1972   return NULL;
1973 }
1974 
1975 static bfd_boolean
1976 elf32_arm_info_to_howto (bfd * abfd, arelent * bfd_reloc,
1977 			 Elf_Internal_Rela * elf_reloc)
1978 {
1979   unsigned int r_type;
1980 
1981   r_type = ELF32_R_TYPE (elf_reloc->r_info);
1982   if ((bfd_reloc->howto = elf32_arm_howto_from_type (r_type)) == NULL)
1983     {
1984       /* xgettext:c-format */
1985       _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
1986 			  abfd, r_type);
1987       bfd_set_error (bfd_error_bad_value);
1988       return FALSE;
1989     }
1990   return TRUE;
1991 }
1992 
1993 struct elf32_arm_reloc_map
1994   {
1995     bfd_reloc_code_real_type  bfd_reloc_val;
1996     unsigned char	      elf_reloc_val;
1997   };
1998 
1999 /* All entries in this list must also be present in elf32_arm_howto_table.  */
2000 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
2001   {
2002     {BFD_RELOC_NONE,		     R_ARM_NONE},
2003     {BFD_RELOC_ARM_PCREL_BRANCH,     R_ARM_PC24},
2004     {BFD_RELOC_ARM_PCREL_CALL,	     R_ARM_CALL},
2005     {BFD_RELOC_ARM_PCREL_JUMP,	     R_ARM_JUMP24},
2006     {BFD_RELOC_ARM_PCREL_BLX,	     R_ARM_XPC25},
2007     {BFD_RELOC_THUMB_PCREL_BLX,	     R_ARM_THM_XPC22},
2008     {BFD_RELOC_32,		     R_ARM_ABS32},
2009     {BFD_RELOC_32_PCREL,	     R_ARM_REL32},
2010     {BFD_RELOC_8,		     R_ARM_ABS8},
2011     {BFD_RELOC_16,		     R_ARM_ABS16},
2012     {BFD_RELOC_ARM_OFFSET_IMM,	     R_ARM_ABS12},
2013     {BFD_RELOC_ARM_THUMB_OFFSET,     R_ARM_THM_ABS5},
2014     {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
2015     {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
2016     {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
2017     {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
2018     {BFD_RELOC_THUMB_PCREL_BRANCH9,  R_ARM_THM_JUMP8},
2019     {BFD_RELOC_THUMB_PCREL_BRANCH7,  R_ARM_THM_JUMP6},
2020     {BFD_RELOC_ARM_GLOB_DAT,	     R_ARM_GLOB_DAT},
2021     {BFD_RELOC_ARM_JUMP_SLOT,	     R_ARM_JUMP_SLOT},
2022     {BFD_RELOC_ARM_RELATIVE,	     R_ARM_RELATIVE},
2023     {BFD_RELOC_ARM_GOTOFF,	     R_ARM_GOTOFF32},
2024     {BFD_RELOC_ARM_GOTPC,	     R_ARM_GOTPC},
2025     {BFD_RELOC_ARM_GOT_PREL,	     R_ARM_GOT_PREL},
2026     {BFD_RELOC_ARM_GOT32,	     R_ARM_GOT32},
2027     {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
2028     {BFD_RELOC_ARM_TARGET1,	     R_ARM_TARGET1},
2029     {BFD_RELOC_ARM_ROSEGREL32,	     R_ARM_ROSEGREL32},
2030     {BFD_RELOC_ARM_SBREL32,	     R_ARM_SBREL32},
2031     {BFD_RELOC_ARM_PREL31,	     R_ARM_PREL31},
2032     {BFD_RELOC_ARM_TARGET2,	     R_ARM_TARGET2},
2033     {BFD_RELOC_ARM_PLT32,	     R_ARM_PLT32},
2034     {BFD_RELOC_ARM_TLS_GOTDESC,	     R_ARM_TLS_GOTDESC},
2035     {BFD_RELOC_ARM_TLS_CALL,	     R_ARM_TLS_CALL},
2036     {BFD_RELOC_ARM_THM_TLS_CALL,     R_ARM_THM_TLS_CALL},
2037     {BFD_RELOC_ARM_TLS_DESCSEQ,	     R_ARM_TLS_DESCSEQ},
2038     {BFD_RELOC_ARM_THM_TLS_DESCSEQ,  R_ARM_THM_TLS_DESCSEQ},
2039     {BFD_RELOC_ARM_TLS_DESC,	     R_ARM_TLS_DESC},
2040     {BFD_RELOC_ARM_TLS_GD32,	     R_ARM_TLS_GD32},
2041     {BFD_RELOC_ARM_TLS_LDO32,	     R_ARM_TLS_LDO32},
2042     {BFD_RELOC_ARM_TLS_LDM32,	     R_ARM_TLS_LDM32},
2043     {BFD_RELOC_ARM_TLS_DTPMOD32,     R_ARM_TLS_DTPMOD32},
2044     {BFD_RELOC_ARM_TLS_DTPOFF32,     R_ARM_TLS_DTPOFF32},
2045     {BFD_RELOC_ARM_TLS_TPOFF32,	     R_ARM_TLS_TPOFF32},
2046     {BFD_RELOC_ARM_TLS_IE32,	     R_ARM_TLS_IE32},
2047     {BFD_RELOC_ARM_TLS_LE32,	     R_ARM_TLS_LE32},
2048     {BFD_RELOC_ARM_IRELATIVE,	     R_ARM_IRELATIVE},
2049     {BFD_RELOC_ARM_GOTFUNCDESC,      R_ARM_GOTFUNCDESC},
2050     {BFD_RELOC_ARM_GOTOFFFUNCDESC,   R_ARM_GOTOFFFUNCDESC},
2051     {BFD_RELOC_ARM_FUNCDESC,         R_ARM_FUNCDESC},
2052     {BFD_RELOC_ARM_FUNCDESC_VALUE,   R_ARM_FUNCDESC_VALUE},
2053     {BFD_RELOC_ARM_TLS_GD32_FDPIC,   R_ARM_TLS_GD32_FDPIC},
2054     {BFD_RELOC_ARM_TLS_LDM32_FDPIC,  R_ARM_TLS_LDM32_FDPIC},
2055     {BFD_RELOC_ARM_TLS_IE32_FDPIC,   R_ARM_TLS_IE32_FDPIC},
2056     {BFD_RELOC_VTABLE_INHERIT,	     R_ARM_GNU_VTINHERIT},
2057     {BFD_RELOC_VTABLE_ENTRY,	     R_ARM_GNU_VTENTRY},
2058     {BFD_RELOC_ARM_MOVW,	     R_ARM_MOVW_ABS_NC},
2059     {BFD_RELOC_ARM_MOVT,	     R_ARM_MOVT_ABS},
2060     {BFD_RELOC_ARM_MOVW_PCREL,	     R_ARM_MOVW_PREL_NC},
2061     {BFD_RELOC_ARM_MOVT_PCREL,	     R_ARM_MOVT_PREL},
2062     {BFD_RELOC_ARM_THUMB_MOVW,	     R_ARM_THM_MOVW_ABS_NC},
2063     {BFD_RELOC_ARM_THUMB_MOVT,	     R_ARM_THM_MOVT_ABS},
2064     {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
2065     {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
2066     {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
2067     {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
2068     {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
2069     {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
2070     {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
2071     {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
2072     {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
2073     {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
2074     {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
2075     {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
2076     {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
2077     {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
2078     {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
2079     {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
2080     {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
2081     {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
2082     {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
2083     {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
2084     {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
2085     {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
2086     {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
2087     {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
2088     {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
2089     {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
2090     {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
2091     {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
2092     {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
2093     {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
2094     {BFD_RELOC_ARM_V4BX,	     R_ARM_V4BX},
2095     {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
2096     {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
2097     {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
2098     {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC},
2099     {BFD_RELOC_ARM_THUMB_BF17, R_ARM_THM_BF16},
2100     {BFD_RELOC_ARM_THUMB_BF13, R_ARM_THM_BF12},
2101     {BFD_RELOC_ARM_THUMB_BF19, R_ARM_THM_BF18}
2102   };
2103 
2104 static reloc_howto_type *
2105 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2106 			     bfd_reloc_code_real_type code)
2107 {
2108   unsigned int i;
2109 
2110   for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
2111     if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
2112       return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
2113 
2114   return NULL;
2115 }
2116 
2117 static reloc_howto_type *
2118 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
2119 			     const char *r_name)
2120 {
2121   unsigned int i;
2122 
2123   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
2124     if (elf32_arm_howto_table_1[i].name != NULL
2125 	&& strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
2126       return &elf32_arm_howto_table_1[i];
2127 
2128   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
2129     if (elf32_arm_howto_table_2[i].name != NULL
2130 	&& strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
2131       return &elf32_arm_howto_table_2[i];
2132 
2133   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
2134     if (elf32_arm_howto_table_3[i].name != NULL
2135 	&& strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
2136       return &elf32_arm_howto_table_3[i];
2137 
2138   return NULL;
2139 }
2140 
2141 /* Support for core dump NOTE sections.  */
2142 
2143 static bfd_boolean
2144 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
2145 {
2146   int offset;
2147   size_t size;
2148 
2149   switch (note->descsz)
2150     {
2151       default:
2152 	return FALSE;
2153 
2154       case 148:		/* Linux/ARM 32-bit.  */
2155 	/* pr_cursig */
2156 	elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2157 
2158 	/* pr_pid */
2159 	elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2160 
2161 	/* pr_reg */
2162 	offset = 72;
2163 	size = 72;
2164 
2165 	break;
2166     }
2167 
2168   /* Make a ".reg/999" section.  */
2169   return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2170 					  size, note->descpos + offset);
2171 }
2172 
2173 static bfd_boolean
2174 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2175 {
2176   switch (note->descsz)
2177     {
2178       default:
2179 	return FALSE;
2180 
2181       case 124:		/* Linux/ARM elf_prpsinfo.  */
2182 	elf_tdata (abfd)->core->pid
2183 	 = bfd_get_32 (abfd, note->descdata + 12);
2184 	elf_tdata (abfd)->core->program
2185 	 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2186 	elf_tdata (abfd)->core->command
2187 	 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2188     }
2189 
2190   /* Note that for some reason, a spurious space is tacked
2191      onto the end of the args in some (at least one anyway)
2192      implementations, so strip it off if it exists.  */
2193   {
2194     char *command = elf_tdata (abfd)->core->command;
2195     int n = strlen (command);
2196 
2197     if (0 < n && command[n - 1] == ' ')
2198       command[n - 1] = '\0';
2199   }
2200 
2201   return TRUE;
2202 }
2203 
2204 static char *
2205 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2206 				int note_type, ...)
2207 {
2208   switch (note_type)
2209     {
2210     default:
2211       return NULL;
2212 
2213     case NT_PRPSINFO:
2214       {
2215 	char data[124] ATTRIBUTE_NONSTRING;
2216 	va_list ap;
2217 
2218 	va_start (ap, note_type);
2219 	memset (data, 0, sizeof (data));
2220 	strncpy (data + 28, va_arg (ap, const char *), 16);
2221 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2222 	DIAGNOSTIC_PUSH;
2223 	/* GCC 8.0 and 8.1 warn about 80 equals destination size with
2224 	   -Wstringop-truncation:
2225 	   https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85643
2226 	 */
2227 	DIAGNOSTIC_IGNORE_STRINGOP_TRUNCATION;
2228 #endif
2229 	strncpy (data + 44, va_arg (ap, const char *), 80);
2230 #if GCC_VERSION == 8000 || GCC_VERSION == 8001
2231 	DIAGNOSTIC_POP;
2232 #endif
2233 	va_end (ap);
2234 
2235 	return elfcore_write_note (abfd, buf, bufsiz,
2236 				   "CORE", note_type, data, sizeof (data));
2237       }
2238 
2239     case NT_PRSTATUS:
2240       {
2241 	char data[148];
2242 	va_list ap;
2243 	long pid;
2244 	int cursig;
2245 	const void *greg;
2246 
2247 	va_start (ap, note_type);
2248 	memset (data, 0, sizeof (data));
2249 	pid = va_arg (ap, long);
2250 	bfd_put_32 (abfd, pid, data + 24);
2251 	cursig = va_arg (ap, int);
2252 	bfd_put_16 (abfd, cursig, data + 12);
2253 	greg = va_arg (ap, const void *);
2254 	memcpy (data + 72, greg, 72);
2255 	va_end (ap);
2256 
2257 	return elfcore_write_note (abfd, buf, bufsiz,
2258 				   "CORE", note_type, data, sizeof (data));
2259       }
2260     }
2261 }
2262 
2263 #define TARGET_LITTLE_SYM		arm_elf32_le_vec
2264 #define TARGET_LITTLE_NAME		"elf32-littlearm"
2265 #define TARGET_BIG_SYM			arm_elf32_be_vec
2266 #define TARGET_BIG_NAME			"elf32-bigarm"
2267 
2268 #define elf_backend_grok_prstatus	elf32_arm_nabi_grok_prstatus
2269 #define elf_backend_grok_psinfo		elf32_arm_nabi_grok_psinfo
2270 #define elf_backend_write_core_note	elf32_arm_nabi_write_core_note
2271 
2272 typedef unsigned long int insn32;
2273 typedef unsigned short int insn16;
2274 
2275 /* In lieu of proper flags, assume all EABIv4 or later objects are
2276    interworkable.  */
2277 #define INTERWORK_FLAG(abfd)  \
2278   (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2279   || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2280   || ((abfd)->flags & BFD_LINKER_CREATED))
2281 
2282 /* The linker script knows the section names for placement.
2283    The entry_names are used to do simple name mangling on the stubs.
2284    Given a function name, and its type, the stub can be found. The
2285    name can be changed. The only requirement is the %s be present.  */
2286 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2287 #define THUMB2ARM_GLUE_ENTRY_NAME   "__%s_from_thumb"
2288 
2289 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2290 #define ARM2THUMB_GLUE_ENTRY_NAME   "__%s_from_arm"
2291 
2292 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2293 #define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
2294 
2295 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2296 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
2297 
2298 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2299 #define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
2300 
2301 #define STUB_ENTRY_NAME   "__%s_veneer"
2302 
2303 #define CMSE_PREFIX "__acle_se_"
2304 
2305 #define CMSE_STUB_NAME ".gnu.sgstubs"
2306 
2307 /* The name of the dynamic interpreter.  This is put in the .interp
2308    section.  */
2309 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
2310 
2311 /* FDPIC default stack size.  */
2312 #define DEFAULT_STACK_SIZE 0x8000
2313 
2314 static const unsigned long tls_trampoline [] =
2315 {
2316   0xe08e0000,		/* add r0, lr, r0 */
2317   0xe5901004,		/* ldr r1, [r0,#4] */
2318   0xe12fff11,		/* bx  r1 */
2319 };
2320 
2321 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2322 {
2323   0xe52d2004, /*	push    {r2}			*/
2324   0xe59f200c, /*      ldr     r2, [pc, #3f - . - 8]	*/
2325   0xe59f100c, /*      ldr     r1, [pc, #4f - . - 8]	*/
2326   0xe79f2002, /* 1:   ldr     r2, [pc, r2]		*/
2327   0xe081100f, /* 2:   add     r1, pc			*/
2328   0xe12fff12, /*      bx      r2			*/
2329   0x00000014, /* 3:   .word  _GLOBAL_OFFSET_TABLE_ - 1b - 8
2330 				+ dl_tlsdesc_lazy_resolver(GOT)   */
2331   0x00000018, /* 4:   .word  _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2332 };
2333 
2334 /* NOTE: [Thumb nop sequence]
2335    When adding code that transitions from Thumb to Arm the instruction that
2336    should be used for the alignment padding should be 0xe7fd (b .-2) instead of
2337    a nop for performance reasons.  */
2338 
2339 /* ARM FDPIC PLT entry.  */
2340 /* The last 5 words contain PLT lazy fragment code and data.  */
2341 static const bfd_vma elf32_arm_fdpic_plt_entry [] =
2342   {
2343     0xe59fc008,    /* ldr     r12, .L1 */
2344     0xe08cc009,    /* add     r12, r12, r9 */
2345     0xe59c9004,    /* ldr     r9, [r12, #4] */
2346     0xe59cf000,    /* ldr     pc, [r12] */
2347     0x00000000,    /* L1.     .word   foo(GOTOFFFUNCDESC) */
2348     0x00000000,    /* L1.     .word   foo(funcdesc_value_reloc_offset) */
2349     0xe51fc00c,    /* ldr     r12, [pc, #-12] */
2350     0xe92d1000,    /* push    {r12} */
2351     0xe599c004,    /* ldr     r12, [r9, #4] */
2352     0xe599f000,    /* ldr     pc, [r9] */
2353   };
2354 
2355 /* Thumb FDPIC PLT entry.  */
2356 /* The last 5 words contain PLT lazy fragment code and data.  */
2357 static const bfd_vma elf32_arm_fdpic_thumb_plt_entry [] =
2358   {
2359     0xc00cf8df,    /* ldr.w   r12, .L1 */
2360     0x0c09eb0c,    /* add.w   r12, r12, r9 */
2361     0x9004f8dc,    /* ldr.w   r9, [r12, #4] */
2362     0xf000f8dc,    /* ldr.w   pc, [r12] */
2363     0x00000000,    /* .L1     .word   foo(GOTOFFFUNCDESC) */
2364     0x00000000,    /* .L2     .word   foo(funcdesc_value_reloc_offset) */
2365     0xc008f85f,    /* ldr.w   r12, .L2 */
2366     0xcd04f84d,    /* push    {r12} */
2367     0xc004f8d9,    /* ldr.w   r12, [r9, #4] */
2368     0xf000f8d9,    /* ldr.w   pc, [r9] */
2369   };
2370 
2371 #ifdef FOUR_WORD_PLT
2372 
2373 /* The first entry in a procedure linkage table looks like
2374    this.  It is set up so that any shared library function that is
2375    called before the relocation has been set up calls the dynamic
2376    linker first.  */
2377 static const bfd_vma elf32_arm_plt0_entry [] =
2378 {
2379   0xe52de004,		/* str   lr, [sp, #-4]! */
2380   0xe59fe010,		/* ldr   lr, [pc, #16]  */
2381   0xe08fe00e,		/* add   lr, pc, lr     */
2382   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2383 };
2384 
2385 /* Subsequent entries in a procedure linkage table look like
2386    this.  */
2387 static const bfd_vma elf32_arm_plt_entry [] =
2388 {
2389   0xe28fc600,		/* add   ip, pc, #NN	*/
2390   0xe28cca00,		/* add	 ip, ip, #NN	*/
2391   0xe5bcf000,		/* ldr	 pc, [ip, #NN]! */
2392   0x00000000,		/* unused		*/
2393 };
2394 
2395 #else /* not FOUR_WORD_PLT */
2396 
2397 /* The first entry in a procedure linkage table looks like
2398    this.  It is set up so that any shared library function that is
2399    called before the relocation has been set up calls the dynamic
2400    linker first.  */
2401 static const bfd_vma elf32_arm_plt0_entry [] =
2402 {
2403   0xe52de004,		/* str	 lr, [sp, #-4]! */
2404   0xe59fe004,		/* ldr	 lr, [pc, #4]	*/
2405   0xe08fe00e,		/* add	 lr, pc, lr	*/
2406   0xe5bef008,		/* ldr	 pc, [lr, #8]!	*/
2407   0x00000000,		/* &GOT[0] - .		*/
2408 };
2409 
2410 /* By default subsequent entries in a procedure linkage table look like
2411    this. Offsets that don't fit into 28 bits will cause link error.  */
2412 static const bfd_vma elf32_arm_plt_entry_short [] =
2413 {
2414   0xe28fc600,		/* add   ip, pc, #0xNN00000 */
2415   0xe28cca00,		/* add	 ip, ip, #0xNN000   */
2416   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!  */
2417 };
2418 
2419 /* When explicitly asked, we'll use this "long" entry format
2420    which can cope with arbitrary displacements.  */
2421 static const bfd_vma elf32_arm_plt_entry_long [] =
2422 {
2423   0xe28fc200,		/* add	 ip, pc, #0xN0000000 */
2424   0xe28cc600,		/* add	 ip, ip, #0xNN00000  */
2425   0xe28cca00,		/* add	 ip, ip, #0xNN000    */
2426   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!   */
2427 };
2428 
2429 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2430 
2431 #endif /* not FOUR_WORD_PLT */
2432 
2433 /* The first entry in a procedure linkage table looks like this.
2434    It is set up so that any shared library function that is called before the
2435    relocation has been set up calls the dynamic linker first.  */
2436 static const bfd_vma elf32_thumb2_plt0_entry [] =
2437 {
2438   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2439      an instruction maybe encoded to one or two array elements.  */
2440   0xf8dfb500,		/* push	   {lr}		 */
2441   0x44fee008,		/* ldr.w   lr, [pc, #8]	 */
2442 			/* add	   lr, pc	 */
2443   0xff08f85e,		/* ldr.w   pc, [lr, #8]! */
2444   0x00000000,		/* &GOT[0] - .		 */
2445 };
2446 
2447 /* Subsequent entries in a procedure linkage table for thumb only target
2448    look like this.  */
2449 static const bfd_vma elf32_thumb2_plt_entry [] =
2450 {
2451   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2452      an instruction maybe encoded to one or two array elements.  */
2453   0x0c00f240,		/* movw	   ip, #0xNNNN	  */
2454   0x0c00f2c0,		/* movt	   ip, #0xNNNN	  */
2455   0xf8dc44fc,		/* add	   ip, pc	  */
2456   0xe7fdf000		/* ldr.w   pc, [ip]	  */
2457 			/* b      .-2		  */
2458 };
2459 
2460 /* The format of the first entry in the procedure linkage table
2461    for a VxWorks executable.  */
2462 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2463 {
2464   0xe52dc008,		/* str	  ip,[sp,#-8]!			*/
2465   0xe59fc000,		/* ldr	  ip,[pc]			*/
2466   0xe59cf008,		/* ldr	  pc,[ip,#8]			*/
2467   0x00000000,		/* .long  _GLOBAL_OFFSET_TABLE_		*/
2468 };
2469 
2470 /* The format of subsequent entries in a VxWorks executable.  */
2471 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2472 {
2473   0xe59fc000,	      /* ldr	ip,[pc]			*/
2474   0xe59cf000,	      /* ldr	pc,[ip]			*/
2475   0x00000000,	      /* .long	@got				*/
2476   0xe59fc000,	      /* ldr	ip,[pc]			*/
2477   0xea000000,	      /* b	_PLT				*/
2478   0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2479 };
2480 
2481 /* The format of entries in a VxWorks shared library.  */
2482 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2483 {
2484   0xe59fc000,	      /* ldr	ip,[pc]			*/
2485   0xe79cf009,	      /* ldr	pc,[ip,r9]			*/
2486   0x00000000,	      /* .long	@got				*/
2487   0xe59fc000,	      /* ldr	ip,[pc]			*/
2488   0xe599f008,	      /* ldr	pc,[r9,#8]			*/
2489   0x00000000,	      /* .long	@pltindex*sizeof(Elf32_Rela)	*/
2490 };
2491 
2492 /* An initial stub used if the PLT entry is referenced from Thumb code.  */
2493 #define PLT_THUMB_STUB_SIZE 4
2494 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2495 {
2496   0x4778,		/* bx pc */
2497   0xe7fd		/* b .-2 */
2498 };
2499 
2500 /* The entries in a PLT when using a DLL-based target with multiple
2501    address spaces.  */
2502 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2503 {
2504   0xe51ff004,	      /* ldr   pc, [pc, #-4] */
2505   0x00000000,	      /* dcd   R_ARM_GLOB_DAT(X) */
2506 };
2507 
2508 /* The first entry in a procedure linkage table looks like
2509    this.  It is set up so that any shared library function that is
2510    called before the relocation has been set up calls the dynamic
2511    linker first.  */
2512 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2513 {
2514   /* First bundle: */
2515   0xe300c000,		/* movw	ip, #:lower16:&GOT[2]-.+8	*/
2516   0xe340c000,		/* movt	ip, #:upper16:&GOT[2]-.+8	*/
2517   0xe08cc00f,		/* add	ip, ip, pc			*/
2518   0xe52dc008,		/* str	ip, [sp, #-8]!			*/
2519   /* Second bundle: */
2520   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2521   0xe59cc000,		/* ldr	ip, [ip]			*/
2522   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2523   0xe12fff1c,		/* bx	ip				*/
2524   /* Third bundle: */
2525   0xe320f000,		/* nop					*/
2526   0xe320f000,		/* nop					*/
2527   0xe320f000,		/* nop					*/
2528   /* .Lplt_tail: */
2529   0xe50dc004,		/* str	ip, [sp, #-4]			*/
2530   /* Fourth bundle: */
2531   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2532   0xe59cc000,		/* ldr	ip, [ip]			*/
2533   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2534   0xe12fff1c,		/* bx	ip				*/
2535 };
2536 #define ARM_NACL_PLT_TAIL_OFFSET	(11 * 4)
2537 
2538 /* Subsequent entries in a procedure linkage table look like this.  */
2539 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2540 {
2541   0xe300c000,		/* movw	ip, #:lower16:&GOT[n]-.+8	*/
2542   0xe340c000,		/* movt	ip, #:upper16:&GOT[n]-.+8	*/
2543   0xe08cc00f,		/* add	ip, ip, pc			*/
2544   0xea000000,		/* b	.Lplt_tail			*/
2545 };
2546 
2547 #define ARM_MAX_FWD_BRANCH_OFFSET  ((((1 << 23) - 1) << 2) + 8)
2548 #define ARM_MAX_BWD_BRANCH_OFFSET  ((-((1 << 23) << 2)) + 8)
2549 #define THM_MAX_FWD_BRANCH_OFFSET  ((1 << 22) -2 + 4)
2550 #define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
2551 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2552 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2553 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2554 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2555 
2556 enum stub_insn_type
2557 {
2558   THUMB16_TYPE = 1,
2559   THUMB32_TYPE,
2560   ARM_TYPE,
2561   DATA_TYPE
2562 };
2563 
2564 #define THUMB16_INSN(X)		{(X), THUMB16_TYPE, R_ARM_NONE, 0}
2565 /* A bit of a hack.  A Thumb conditional branch, in which the proper condition
2566    is inserted in arm_build_one_stub().  */
2567 #define THUMB16_BCOND_INSN(X)	{(X), THUMB16_TYPE, R_ARM_NONE, 1}
2568 #define THUMB32_INSN(X)		{(X), THUMB32_TYPE, R_ARM_NONE, 0}
2569 #define THUMB32_MOVT(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVT_ABS, 0}
2570 #define THUMB32_MOVW(X)		{(X), THUMB32_TYPE, R_ARM_THM_MOVW_ABS_NC, 0}
2571 #define THUMB32_B_INSN(X, Z)	{(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2572 #define ARM_INSN(X)		{(X), ARM_TYPE, R_ARM_NONE, 0}
2573 #define ARM_REL_INSN(X, Z)	{(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2574 #define DATA_WORD(X,Y,Z)	{(X), DATA_TYPE, (Y), (Z)}
2575 
2576 typedef struct
2577 {
2578   bfd_vma	       data;
2579   enum stub_insn_type  type;
2580   unsigned int	       r_type;
2581   int		       reloc_addend;
2582 }  insn_sequence;
2583 
2584 /* See note [Thumb nop sequence] when adding a veneer.  */
2585 
2586 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2587    to reach the stub if necessary.  */
2588 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2589 {
2590   ARM_INSN (0xe51ff004),	    /* ldr   pc, [pc, #-4] */
2591   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2592 };
2593 
2594 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2595    available.  */
2596 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2597 {
2598   ARM_INSN (0xe59fc000),	    /* ldr   ip, [pc, #0] */
2599   ARM_INSN (0xe12fff1c),	    /* bx    ip */
2600   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2601 };
2602 
2603 /* Thumb -> Thumb long branch stub. Used on M-profile architectures.  */
2604 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2605 {
2606   THUMB16_INSN (0xb401),	     /* push {r0} */
2607   THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2608   THUMB16_INSN (0x4684),	     /* mov  ip, r0 */
2609   THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2610   THUMB16_INSN (0x4760),	     /* bx   ip */
2611   THUMB16_INSN (0xbf00),	     /* nop */
2612   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2613 };
2614 
2615 /* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
2616 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2617 {
2618   THUMB32_INSN (0xf85ff000),	     /* ldr.w  pc, [pc, #-0] */
2619   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
2620 };
2621 
2622 /* Thumb -> Thumb long branch stub. Used for PureCode sections on Thumb2
2623    M-profile architectures.  */
2624 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only_pure[] =
2625 {
2626   THUMB32_MOVW (0xf2400c00),	     /* mov.w ip, R_ARM_MOVW_ABS_NC */
2627   THUMB32_MOVT (0xf2c00c00),	     /* movt  ip, R_ARM_MOVT_ABS << 16 */
2628   THUMB16_INSN (0x4760),	     /* bx   ip */
2629 };
2630 
2631 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2632    allowed.  */
2633 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2634 {
2635   THUMB16_INSN (0x4778),	     /* bx   pc */
2636   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2637   ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2638   ARM_INSN (0xe12fff1c),	     /* bx   ip */
2639   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2640 };
2641 
2642 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2643    available.  */
2644 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2645 {
2646   THUMB16_INSN (0x4778),	     /* bx   pc */
2647   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2648   ARM_INSN (0xe51ff004),	     /* ldr   pc, [pc, #-4] */
2649   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd   R_ARM_ABS32(X) */
2650 };
2651 
2652 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2653    one, when the destination is close enough.  */
2654 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2655 {
2656   THUMB16_INSN (0x4778),	     /* bx   pc */
2657   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2658   ARM_REL_INSN (0xea000000, -8),     /* b    (X-8) */
2659 };
2660 
2661 /* ARM/Thumb -> ARM long branch stub, PIC.  On V5T and above, use
2662    blx to reach the stub if necessary.  */
2663 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2664 {
2665   ARM_INSN (0xe59fc000),	     /* ldr   ip, [pc] */
2666   ARM_INSN (0xe08ff00c),	     /* add   pc, pc, ip */
2667   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2668 };
2669 
2670 /* ARM/Thumb -> Thumb long branch stub, PIC.  On V5T and above, use
2671    blx to reach the stub if necessary.  We can not add into pc;
2672    it is not guaranteed to mode switch (different in ARMv6 and
2673    ARMv7).  */
2674 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2675 {
2676   ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2677   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2678   ARM_INSN (0xe12fff1c),	     /* bx    ip */
2679   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2680 };
2681 
2682 /* V4T ARM -> ARM long branch stub, PIC.  */
2683 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2684 {
2685   ARM_INSN (0xe59fc004),	     /* ldr   ip, [pc, #4] */
2686   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2687   ARM_INSN (0xe12fff1c),	     /* bx    ip */
2688   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2689 };
2690 
2691 /* V4T Thumb -> ARM long branch stub, PIC.  */
2692 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2693 {
2694   THUMB16_INSN (0x4778),	     /* bx   pc */
2695   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2696   ARM_INSN (0xe59fc000),	     /* ldr  ip, [pc, #0] */
2697   ARM_INSN (0xe08cf00f),	     /* add  pc, ip, pc */
2698   DATA_WORD (0, R_ARM_REL32, -4),     /* dcd  R_ARM_REL32(X) */
2699 };
2700 
2701 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2702    architectures.  */
2703 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2704 {
2705   THUMB16_INSN (0xb401),	     /* push {r0} */
2706   THUMB16_INSN (0x4802),	     /* ldr  r0, [pc, #8] */
2707   THUMB16_INSN (0x46fc),	     /* mov  ip, pc */
2708   THUMB16_INSN (0x4484),	     /* add  ip, r0 */
2709   THUMB16_INSN (0xbc01),	     /* pop  {r0} */
2710   THUMB16_INSN (0x4760),	     /* bx   ip */
2711   DATA_WORD (0, R_ARM_REL32, 4),     /* dcd  R_ARM_REL32(X) */
2712 };
2713 
2714 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2715    allowed.  */
2716 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2717 {
2718   THUMB16_INSN (0x4778),	     /* bx   pc */
2719   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2720   ARM_INSN (0xe59fc004),	     /* ldr  ip, [pc, #4] */
2721   ARM_INSN (0xe08fc00c),	     /* add   ip, pc, ip */
2722   ARM_INSN (0xe12fff1c),	     /* bx   ip */
2723   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd  R_ARM_REL32(X) */
2724 };
2725 
2726 /* Thumb2/ARM -> TLS trampoline.  Lowest common denominator, which is a
2727    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2728 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2729 {
2730   ARM_INSN (0xe59f1000),	     /* ldr   r1, [pc] */
2731   ARM_INSN (0xe08ff001),	     /* add   pc, pc, r1 */
2732   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2733 };
2734 
2735 /* V4T Thumb -> TLS trampoline.  lowest common denominator, which is a
2736    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2737 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2738 {
2739   THUMB16_INSN (0x4778),	     /* bx   pc */
2740   THUMB16_INSN (0xe7fd),	     /* b   .-2 */
2741   ARM_INSN (0xe59f1000),	     /* ldr  r1, [pc, #0] */
2742   ARM_INSN (0xe081f00f),	     /* add  pc, r1, pc */
2743   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd  R_ARM_REL32(X) */
2744 };
2745 
2746 /* NaCl ARM -> ARM long branch stub.  */
2747 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2748 {
2749   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2750   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2751   ARM_INSN (0xe12fff1c),		/* bx	ip */
2752   ARM_INSN (0xe320f000),		/* nop */
2753   ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2754   DATA_WORD (0, R_ARM_ABS32, 0),	/* dcd	R_ARM_ABS32(X) */
2755   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2756   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2757 };
2758 
2759 /* NaCl ARM -> ARM long branch stub, PIC.  */
2760 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2761 {
2762   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2763   ARM_INSN (0xe08cc00f),		/* add	ip, ip, pc */
2764   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2765   ARM_INSN (0xe12fff1c),		/* bx	ip */
2766   ARM_INSN (0xe125be70),		/* bkpt	0x5be0 */
2767   DATA_WORD (0, R_ARM_REL32, 8),	/* dcd	R_ARM_REL32(X+8) */
2768   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2769   DATA_WORD (0, R_ARM_NONE, 0),		/* .word 0 */
2770 };
2771 
2772 /* Stub used for transition to secure state (aka SG veneer).  */
2773 static const insn_sequence elf32_arm_stub_cmse_branch_thumb_only[] =
2774 {
2775   THUMB32_INSN (0xe97fe97f),		/* sg.  */
2776   THUMB32_B_INSN (0xf000b800, -4),	/* b.w original_branch_dest.  */
2777 };
2778 
2779 
2780 /* Cortex-A8 erratum-workaround stubs.  */
2781 
2782 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2783    can't use a conditional branch to reach this stub).  */
2784 
2785 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2786 {
2787   THUMB16_BCOND_INSN (0xd001),	       /* b<cond>.n true.  */
2788   THUMB32_B_INSN (0xf000b800, -4),     /* b.w insn_after_original_branch.  */
2789   THUMB32_B_INSN (0xf000b800, -4)      /* true: b.w original_branch_dest.  */
2790 };
2791 
2792 /* Stub used for b.w and bl.w instructions.  */
2793 
2794 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2795 {
2796   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2797 };
2798 
2799 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2800 {
2801   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2802 };
2803 
2804 /* Stub used for Thumb-2 blx.w instructions.  We modified the original blx.w
2805    instruction (which switches to ARM mode) to point to this stub.  Jump to the
2806    real destination using an ARM-mode branch.  */
2807 
2808 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2809 {
2810   ARM_REL_INSN (0xea000000, -8)	/* b original_branch_dest.  */
2811 };
2812 
2813 /* For each section group there can be a specially created linker section
2814    to hold the stubs for that group.  The name of the stub section is based
2815    upon the name of another section within that group with the suffix below
2816    applied.
2817 
2818    PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2819    create what appeared to be a linker stub section when it actually
2820    contained user code/data.  For example, consider this fragment:
2821 
2822      const char * stubborn_problems[] = { "np" };
2823 
2824    If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2825    section called:
2826 
2827      .data.rel.local.stubborn_problems
2828 
2829    This then causes problems in arm32_arm_build_stubs() as it triggers:
2830 
2831       // Ignore non-stub sections.
2832       if (!strstr (stub_sec->name, STUB_SUFFIX))
2833 	continue;
2834 
2835    And so the section would be ignored instead of being processed.  Hence
2836    the change in definition of STUB_SUFFIX to a name that cannot be a valid
2837    C identifier.  */
2838 #define STUB_SUFFIX ".__stub"
2839 
2840 /* One entry per long/short branch stub defined above.  */
2841 #define DEF_STUBS \
2842   DEF_STUB(long_branch_any_any)	\
2843   DEF_STUB(long_branch_v4t_arm_thumb) \
2844   DEF_STUB(long_branch_thumb_only) \
2845   DEF_STUB(long_branch_v4t_thumb_thumb)	\
2846   DEF_STUB(long_branch_v4t_thumb_arm) \
2847   DEF_STUB(short_branch_v4t_thumb_arm) \
2848   DEF_STUB(long_branch_any_arm_pic) \
2849   DEF_STUB(long_branch_any_thumb_pic) \
2850   DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2851   DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2852   DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2853   DEF_STUB(long_branch_thumb_only_pic) \
2854   DEF_STUB(long_branch_any_tls_pic) \
2855   DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2856   DEF_STUB(long_branch_arm_nacl) \
2857   DEF_STUB(long_branch_arm_nacl_pic) \
2858   DEF_STUB(cmse_branch_thumb_only) \
2859   DEF_STUB(a8_veneer_b_cond) \
2860   DEF_STUB(a8_veneer_b) \
2861   DEF_STUB(a8_veneer_bl) \
2862   DEF_STUB(a8_veneer_blx) \
2863   DEF_STUB(long_branch_thumb2_only) \
2864   DEF_STUB(long_branch_thumb2_only_pure)
2865 
2866 #define DEF_STUB(x) arm_stub_##x,
2867 enum elf32_arm_stub_type
2868 {
2869   arm_stub_none,
2870   DEF_STUBS
2871   max_stub_type
2872 };
2873 #undef DEF_STUB
2874 
2875 /* Note the first a8_veneer type.  */
2876 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2877 
2878 typedef struct
2879 {
2880   const insn_sequence* template_sequence;
2881   int template_size;
2882 } stub_def;
2883 
2884 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2885 static const stub_def stub_definitions[] =
2886 {
2887   {NULL, 0},
2888   DEF_STUBS
2889 };
2890 
2891 struct elf32_arm_stub_hash_entry
2892 {
2893   /* Base hash table entry structure.  */
2894   struct bfd_hash_entry root;
2895 
2896   /* The stub section.  */
2897   asection *stub_sec;
2898 
2899   /* Offset within stub_sec of the beginning of this stub.  */
2900   bfd_vma stub_offset;
2901 
2902   /* Given the symbol's value and its section we can determine its final
2903      value when building the stubs (so the stub knows where to jump).  */
2904   bfd_vma target_value;
2905   asection *target_section;
2906 
2907   /* Same as above but for the source of the branch to the stub.  Used for
2908      Cortex-A8 erratum workaround to patch it to branch to the stub.  As
2909      such, source section does not need to be recorded since Cortex-A8 erratum
2910      workaround stubs are only generated when both source and target are in the
2911      same section.  */
2912   bfd_vma source_value;
2913 
2914   /* The instruction which caused this stub to be generated (only valid for
2915      Cortex-A8 erratum workaround stubs at present).  */
2916   unsigned long orig_insn;
2917 
2918   /* The stub type.  */
2919   enum elf32_arm_stub_type stub_type;
2920   /* Its encoding size in bytes.  */
2921   int stub_size;
2922   /* Its template.  */
2923   const insn_sequence *stub_template;
2924   /* The size of the template (number of entries).  */
2925   int stub_template_size;
2926 
2927   /* The symbol table entry, if any, that this was derived from.  */
2928   struct elf32_arm_link_hash_entry *h;
2929 
2930   /* Type of branch.  */
2931   enum arm_st_branch_type branch_type;
2932 
2933   /* Where this stub is being called from, or, in the case of combined
2934      stub sections, the first input section in the group.  */
2935   asection *id_sec;
2936 
2937   /* The name for the local symbol at the start of this stub.  The
2938      stub name in the hash table has to be unique; this does not, so
2939      it can be friendlier.  */
2940   char *output_name;
2941 };
2942 
2943 /* Used to build a map of a section.  This is required for mixed-endian
2944    code/data.  */
2945 
2946 typedef struct elf32_elf_section_map
2947 {
2948   bfd_vma vma;
2949   char type;
2950 }
2951 elf32_arm_section_map;
2952 
2953 /* Information about a VFP11 erratum veneer, or a branch to such a veneer.  */
2954 
2955 typedef enum
2956 {
2957   VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2958   VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2959   VFP11_ERRATUM_ARM_VENEER,
2960   VFP11_ERRATUM_THUMB_VENEER
2961 }
2962 elf32_vfp11_erratum_type;
2963 
2964 typedef struct elf32_vfp11_erratum_list
2965 {
2966   struct elf32_vfp11_erratum_list *next;
2967   bfd_vma vma;
2968   union
2969   {
2970     struct
2971     {
2972       struct elf32_vfp11_erratum_list *veneer;
2973       unsigned int vfp_insn;
2974     } b;
2975     struct
2976     {
2977       struct elf32_vfp11_erratum_list *branch;
2978       unsigned int id;
2979     } v;
2980   } u;
2981   elf32_vfp11_erratum_type type;
2982 }
2983 elf32_vfp11_erratum_list;
2984 
2985 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2986    veneer.  */
2987 typedef enum
2988 {
2989   STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2990   STM32L4XX_ERRATUM_VENEER
2991 }
2992 elf32_stm32l4xx_erratum_type;
2993 
2994 typedef struct elf32_stm32l4xx_erratum_list
2995 {
2996   struct elf32_stm32l4xx_erratum_list *next;
2997   bfd_vma vma;
2998   union
2999   {
3000     struct
3001     {
3002       struct elf32_stm32l4xx_erratum_list *veneer;
3003       unsigned int insn;
3004     } b;
3005     struct
3006     {
3007       struct elf32_stm32l4xx_erratum_list *branch;
3008       unsigned int id;
3009     } v;
3010   } u;
3011   elf32_stm32l4xx_erratum_type type;
3012 }
3013 elf32_stm32l4xx_erratum_list;
3014 
3015 typedef enum
3016 {
3017   DELETE_EXIDX_ENTRY,
3018   INSERT_EXIDX_CANTUNWIND_AT_END
3019 }
3020 arm_unwind_edit_type;
3021 
3022 /* A (sorted) list of edits to apply to an unwind table.  */
3023 typedef struct arm_unwind_table_edit
3024 {
3025   arm_unwind_edit_type type;
3026   /* Note: we sometimes want to insert an unwind entry corresponding to a
3027      section different from the one we're currently writing out, so record the
3028      (text) section this edit relates to here.  */
3029   asection *linked_section;
3030   unsigned int index;
3031   struct arm_unwind_table_edit *next;
3032 }
3033 arm_unwind_table_edit;
3034 
3035 typedef struct _arm_elf_section_data
3036 {
3037   /* Information about mapping symbols.  */
3038   struct bfd_elf_section_data elf;
3039   unsigned int mapcount;
3040   unsigned int mapsize;
3041   elf32_arm_section_map *map;
3042   /* Information about CPU errata.  */
3043   unsigned int erratumcount;
3044   elf32_vfp11_erratum_list *erratumlist;
3045   unsigned int stm32l4xx_erratumcount;
3046   elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
3047   unsigned int additional_reloc_count;
3048   /* Information about unwind tables.  */
3049   union
3050   {
3051     /* Unwind info attached to a text section.  */
3052     struct
3053     {
3054       asection *arm_exidx_sec;
3055     } text;
3056 
3057     /* Unwind info attached to an .ARM.exidx section.  */
3058     struct
3059     {
3060       arm_unwind_table_edit *unwind_edit_list;
3061       arm_unwind_table_edit *unwind_edit_tail;
3062     } exidx;
3063   } u;
3064 }
3065 _arm_elf_section_data;
3066 
3067 #define elf32_arm_section_data(sec) \
3068   ((_arm_elf_section_data *) elf_section_data (sec))
3069 
3070 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
3071    These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
3072    so may be created multiple times: we use an array of these entries whilst
3073    relaxing which we can refresh easily, then create stubs for each potentially
3074    erratum-triggering instruction once we've settled on a solution.  */
3075 
3076 struct a8_erratum_fix
3077 {
3078   bfd *input_bfd;
3079   asection *section;
3080   bfd_vma offset;
3081   bfd_vma target_offset;
3082   unsigned long orig_insn;
3083   char *stub_name;
3084   enum elf32_arm_stub_type stub_type;
3085   enum arm_st_branch_type branch_type;
3086 };
3087 
3088 /* A table of relocs applied to branches which might trigger Cortex-A8
3089    erratum.  */
3090 
3091 struct a8_erratum_reloc
3092 {
3093   bfd_vma from;
3094   bfd_vma destination;
3095   struct elf32_arm_link_hash_entry *hash;
3096   const char *sym_name;
3097   unsigned int r_type;
3098   enum arm_st_branch_type branch_type;
3099   bfd_boolean non_a8_stub;
3100 };
3101 
3102 /* The size of the thread control block.  */
3103 #define TCB_SIZE	8
3104 
3105 /* ARM-specific information about a PLT entry, over and above the usual
3106    gotplt_union.  */
3107 struct arm_plt_info
3108 {
3109   /* We reference count Thumb references to a PLT entry separately,
3110      so that we can emit the Thumb trampoline only if needed.  */
3111   bfd_signed_vma thumb_refcount;
3112 
3113   /* Some references from Thumb code may be eliminated by BL->BLX
3114      conversion, so record them separately.  */
3115   bfd_signed_vma maybe_thumb_refcount;
3116 
3117   /* How many of the recorded PLT accesses were from non-call relocations.
3118      This information is useful when deciding whether anything takes the
3119      address of an STT_GNU_IFUNC PLT.  A value of 0 means that all
3120      non-call references to the function should resolve directly to the
3121      real runtime target.  */
3122   unsigned int noncall_refcount;
3123 
3124   /* Since PLT entries have variable size if the Thumb prologue is
3125      used, we need to record the index into .got.plt instead of
3126      recomputing it from the PLT offset.  */
3127   bfd_signed_vma got_offset;
3128 };
3129 
3130 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol.  */
3131 struct arm_local_iplt_info
3132 {
3133   /* The information that is usually found in the generic ELF part of
3134      the hash table entry.  */
3135   union gotplt_union root;
3136 
3137   /* The information that is usually found in the ARM-specific part of
3138      the hash table entry.  */
3139   struct arm_plt_info arm;
3140 
3141   /* A list of all potential dynamic relocations against this symbol.  */
3142   struct elf_dyn_relocs *dyn_relocs;
3143 };
3144 
3145 /* Structure to handle FDPIC support for local functions.  */
3146 struct fdpic_local {
3147   unsigned int funcdesc_cnt;
3148   unsigned int gotofffuncdesc_cnt;
3149   int funcdesc_offset;
3150 };
3151 
3152 struct elf_arm_obj_tdata
3153 {
3154   struct elf_obj_tdata root;
3155 
3156   /* tls_type for each local got entry.  */
3157   char *local_got_tls_type;
3158 
3159   /* GOTPLT entries for TLS descriptors.  */
3160   bfd_vma *local_tlsdesc_gotent;
3161 
3162   /* Information for local symbols that need entries in .iplt.  */
3163   struct arm_local_iplt_info **local_iplt;
3164 
3165   /* Zero to warn when linking objects with incompatible enum sizes.  */
3166   int no_enum_size_warning;
3167 
3168   /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
3169   int no_wchar_size_warning;
3170 
3171   /* Maintains FDPIC counters and funcdesc info.  */
3172   struct fdpic_local *local_fdpic_cnts;
3173 };
3174 
3175 #define elf_arm_tdata(bfd) \
3176   ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
3177 
3178 #define elf32_arm_local_got_tls_type(bfd) \
3179   (elf_arm_tdata (bfd)->local_got_tls_type)
3180 
3181 #define elf32_arm_local_tlsdesc_gotent(bfd) \
3182   (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
3183 
3184 #define elf32_arm_local_iplt(bfd) \
3185   (elf_arm_tdata (bfd)->local_iplt)
3186 
3187 #define elf32_arm_local_fdpic_cnts(bfd) \
3188   (elf_arm_tdata (bfd)->local_fdpic_cnts)
3189 
3190 #define is_arm_elf(bfd) \
3191   (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
3192    && elf_tdata (bfd) != NULL \
3193    && elf_object_id (bfd) == ARM_ELF_DATA)
3194 
3195 static bfd_boolean
3196 elf32_arm_mkobject (bfd *abfd)
3197 {
3198   return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
3199 				  ARM_ELF_DATA);
3200 }
3201 
3202 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
3203 
3204 /* Structure to handle FDPIC support for extern functions.  */
3205 struct fdpic_global {
3206   unsigned int gotofffuncdesc_cnt;
3207   unsigned int gotfuncdesc_cnt;
3208   unsigned int funcdesc_cnt;
3209   int funcdesc_offset;
3210   int gotfuncdesc_offset;
3211 };
3212 
3213 /* Arm ELF linker hash entry.  */
3214 struct elf32_arm_link_hash_entry
3215 {
3216   struct elf_link_hash_entry root;
3217 
3218   /* Track dynamic relocs copied for this symbol.  */
3219   struct elf_dyn_relocs *dyn_relocs;
3220 
3221   /* ARM-specific PLT information.  */
3222   struct arm_plt_info plt;
3223 
3224 #define GOT_UNKNOWN	0
3225 #define GOT_NORMAL	1
3226 #define GOT_TLS_GD	2
3227 #define GOT_TLS_IE	4
3228 #define GOT_TLS_GDESC	8
3229 #define GOT_TLS_GD_ANY_P(type)	((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
3230   unsigned int tls_type : 8;
3231 
3232   /* True if the symbol's PLT entry is in .iplt rather than .plt.  */
3233   unsigned int is_iplt : 1;
3234 
3235   unsigned int unused : 23;
3236 
3237   /* Offset of the GOTPLT entry reserved for the TLS descriptor,
3238      starting at the end of the jump table.  */
3239   bfd_vma tlsdesc_got;
3240 
3241   /* The symbol marking the real symbol location for exported thumb
3242      symbols with Arm stubs.  */
3243   struct elf_link_hash_entry *export_glue;
3244 
3245   /* A pointer to the most recently used stub hash entry against this
3246      symbol.  */
3247   struct elf32_arm_stub_hash_entry *stub_cache;
3248 
3249   /* Counter for FDPIC relocations against this symbol.  */
3250   struct fdpic_global fdpic_cnts;
3251 };
3252 
3253 /* Traverse an arm ELF linker hash table.  */
3254 #define elf32_arm_link_hash_traverse(table, func, info)			\
3255   (elf_link_hash_traverse						\
3256    (&(table)->root,							\
3257     (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func),	\
3258     (info)))
3259 
3260 /* Get the ARM elf linker hash table from a link_info structure.  */
3261 #define elf32_arm_hash_table(info) \
3262   (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3263   == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3264 
3265 #define arm_stub_hash_lookup(table, string, create, copy) \
3266   ((struct elf32_arm_stub_hash_entry *) \
3267    bfd_hash_lookup ((table), (string), (create), (copy)))
3268 
3269 /* Array to keep track of which stub sections have been created, and
3270    information on stub grouping.  */
3271 struct map_stub
3272 {
3273   /* This is the section to which stubs in the group will be
3274      attached.  */
3275   asection *link_sec;
3276   /* The stub section.  */
3277   asection *stub_sec;
3278 };
3279 
3280 #define elf32_arm_compute_jump_table_size(htab) \
3281   ((htab)->next_tls_desc_index * 4)
3282 
3283 /* ARM ELF linker hash table.  */
3284 struct elf32_arm_link_hash_table
3285 {
3286   /* The main hash table.  */
3287   struct elf_link_hash_table root;
3288 
3289   /* The size in bytes of the section containing the Thumb-to-ARM glue.  */
3290   bfd_size_type thumb_glue_size;
3291 
3292   /* The size in bytes of the section containing the ARM-to-Thumb glue.  */
3293   bfd_size_type arm_glue_size;
3294 
3295   /* The size in bytes of section containing the ARMv4 BX veneers.  */
3296   bfd_size_type bx_glue_size;
3297 
3298   /* Offsets of ARMv4 BX veneers.  Bit1 set if present, and Bit0 set when
3299      veneer has been populated.  */
3300   bfd_vma bx_glue_offset[15];
3301 
3302   /* The size in bytes of the section containing glue for VFP11 erratum
3303      veneers.  */
3304   bfd_size_type vfp11_erratum_glue_size;
3305 
3306  /* The size in bytes of the section containing glue for STM32L4XX erratum
3307      veneers.  */
3308   bfd_size_type stm32l4xx_erratum_glue_size;
3309 
3310   /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
3311      holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3312      elf32_arm_write_section().  */
3313   struct a8_erratum_fix *a8_erratum_fixes;
3314   unsigned int num_a8_erratum_fixes;
3315 
3316   /* An arbitrary input BFD chosen to hold the glue sections.  */
3317   bfd * bfd_of_glue_owner;
3318 
3319   /* Nonzero to output a BE8 image.  */
3320   int byteswap_code;
3321 
3322   /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3323      Nonzero if R_ARM_TARGET1 means R_ARM_REL32.  */
3324   int target1_is_rel;
3325 
3326   /* The relocation to use for R_ARM_TARGET2 relocations.  */
3327   int target2_reloc;
3328 
3329   /* 0 = Ignore R_ARM_V4BX.
3330      1 = Convert BX to MOV PC.
3331      2 = Generate v4 interworing stubs.  */
3332   int fix_v4bx;
3333 
3334   /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum.  */
3335   int fix_cortex_a8;
3336 
3337   /* Whether we should fix the ARM1176 BLX immediate issue.  */
3338   int fix_arm1176;
3339 
3340   /* Nonzero if the ARM/Thumb BLX instructions are available for use.  */
3341   int use_blx;
3342 
3343   /* What sort of code sequences we should look for which may trigger the
3344      VFP11 denorm erratum.  */
3345   bfd_arm_vfp11_fix vfp11_fix;
3346 
3347   /* Global counter for the number of fixes we have emitted.  */
3348   int num_vfp11_fixes;
3349 
3350   /* What sort of code sequences we should look for which may trigger the
3351      STM32L4XX erratum.  */
3352   bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3353 
3354   /* Global counter for the number of fixes we have emitted.  */
3355   int num_stm32l4xx_fixes;
3356 
3357   /* Nonzero to force PIC branch veneers.  */
3358   int pic_veneer;
3359 
3360   /* The number of bytes in the initial entry in the PLT.  */
3361   bfd_size_type plt_header_size;
3362 
3363   /* The number of bytes in the subsequent PLT etries.  */
3364   bfd_size_type plt_entry_size;
3365 
3366   /* True if the target system is VxWorks.  */
3367   int vxworks_p;
3368 
3369   /* True if the target system is Symbian OS.  */
3370   int symbian_p;
3371 
3372   /* True if the target system is Native Client.  */
3373   int nacl_p;
3374 
3375   /* True if the target uses REL relocations.  */
3376   bfd_boolean use_rel;
3377 
3378   /* Nonzero if import library must be a secure gateway import library
3379      as per ARMv8-M Security Extensions.  */
3380   int cmse_implib;
3381 
3382   /* The import library whose symbols' address must remain stable in
3383      the import library generated.  */
3384   bfd *in_implib_bfd;
3385 
3386   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
3387   bfd_vma next_tls_desc_index;
3388 
3389   /* How many R_ARM_TLS_DESC relocations were generated so far.  */
3390   bfd_vma num_tls_desc;
3391 
3392   /* The (unloaded but important) VxWorks .rela.plt.unloaded section.  */
3393   asection *srelplt2;
3394 
3395   /* The offset into splt of the PLT entry for the TLS descriptor
3396      resolver.  Special values are 0, if not necessary (or not found
3397      to be necessary yet), and -1 if needed but not determined
3398      yet.  */
3399   bfd_vma dt_tlsdesc_plt;
3400 
3401   /* The offset into sgot of the GOT entry used by the PLT entry
3402      above.  */
3403   bfd_vma dt_tlsdesc_got;
3404 
3405   /* Offset in .plt section of tls_arm_trampoline.  */
3406   bfd_vma tls_trampoline;
3407 
3408   /* Data for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
3409   union
3410   {
3411     bfd_signed_vma refcount;
3412     bfd_vma offset;
3413   } tls_ldm_got;
3414 
3415   /* Small local sym cache.  */
3416   struct sym_cache sym_cache;
3417 
3418   /* For convenience in allocate_dynrelocs.  */
3419   bfd * obfd;
3420 
3421   /* The amount of space used by the reserved portion of the sgotplt
3422      section, plus whatever space is used by the jump slots.  */
3423   bfd_vma sgotplt_jump_table_size;
3424 
3425   /* The stub hash table.  */
3426   struct bfd_hash_table stub_hash_table;
3427 
3428   /* Linker stub bfd.  */
3429   bfd *stub_bfd;
3430 
3431   /* Linker call-backs.  */
3432   asection * (*add_stub_section) (const char *, asection *, asection *,
3433 				  unsigned int);
3434   void (*layout_sections_again) (void);
3435 
3436   /* Array to keep track of which stub sections have been created, and
3437      information on stub grouping.  */
3438   struct map_stub *stub_group;
3439 
3440   /* Input stub section holding secure gateway veneers.  */
3441   asection *cmse_stub_sec;
3442 
3443   /* Offset in cmse_stub_sec where new SG veneers (not in input import library)
3444      start to be allocated.  */
3445   bfd_vma new_cmse_stub_offset;
3446 
3447   /* Number of elements in stub_group.  */
3448   unsigned int top_id;
3449 
3450   /* Assorted information used by elf32_arm_size_stubs.  */
3451   unsigned int bfd_count;
3452   unsigned int top_index;
3453   asection **input_list;
3454 
3455   /* True if the target system uses FDPIC. */
3456   int fdpic_p;
3457 
3458   /* Fixup section. Used for FDPIC.  */
3459   asection *srofixup;
3460 };
3461 
3462 /* Add an FDPIC read-only fixup.  */
3463 static void
3464 arm_elf_add_rofixup (bfd *output_bfd, asection *srofixup, bfd_vma offset)
3465 {
3466   bfd_vma fixup_offset;
3467 
3468   fixup_offset = srofixup->reloc_count++ * 4;
3469   BFD_ASSERT (fixup_offset < srofixup->size);
3470   bfd_put_32 (output_bfd, offset, srofixup->contents + fixup_offset);
3471 }
3472 
3473 static inline int
3474 ctz (unsigned int mask)
3475 {
3476 #if GCC_VERSION >= 3004
3477   return __builtin_ctz (mask);
3478 #else
3479   unsigned int i;
3480 
3481   for (i = 0; i < 8 * sizeof (mask); i++)
3482     {
3483       if (mask & 0x1)
3484 	break;
3485       mask = (mask >> 1);
3486     }
3487   return i;
3488 #endif
3489 }
3490 
3491 #if !defined (__NetBSD__) || (__NetBSD_Version__ < 600000000)
3492 static inline int
3493 elf32_arm_popcount (unsigned int mask)
3494 {
3495 #if GCC_VERSION >= 3004
3496   return __builtin_popcount (mask);
3497 #else
3498   unsigned int i;
3499   int sum = 0;
3500 
3501   for (i = 0; i < 8 * sizeof (mask); i++)
3502     {
3503       if (mask & 0x1)
3504 	sum++;
3505       mask = (mask >> 1);
3506     }
3507   return sum;
3508 #endif
3509 }
3510 #endif
3511 
3512 static void elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
3513 				    asection *sreloc, Elf_Internal_Rela *rel);
3514 
3515 static void
3516 arm_elf_fill_funcdesc(bfd *output_bfd,
3517 		      struct bfd_link_info *info,
3518 		      int *funcdesc_offset,
3519 		      int dynindx,
3520 		      int offset,
3521 		      bfd_vma addr,
3522 		      bfd_vma dynreloc_value,
3523 		      bfd_vma seg)
3524 {
3525   if ((*funcdesc_offset & 1) == 0)
3526     {
3527       struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
3528       asection *sgot = globals->root.sgot;
3529 
3530       if (bfd_link_pic(info))
3531 	{
3532 	  asection *srelgot = globals->root.srelgot;
3533 	  Elf_Internal_Rela outrel;
3534 
3535 	  outrel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
3536 	  outrel.r_offset = sgot->output_section->vma + sgot->output_offset + offset;
3537 	  outrel.r_addend = 0;
3538 
3539 	  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
3540 	  bfd_put_32 (output_bfd, addr, sgot->contents + offset);
3541 	  bfd_put_32 (output_bfd, seg, sgot->contents + offset + 4);
3542 	}
3543       else
3544 	{
3545 	  struct elf_link_hash_entry *hgot = globals->root.hgot;
3546 	  bfd_vma got_value = hgot->root.u.def.value
3547 	    + hgot->root.u.def.section->output_section->vma
3548 	    + hgot->root.u.def.section->output_offset;
3549 
3550 	  arm_elf_add_rofixup(output_bfd, globals->srofixup,
3551 			      sgot->output_section->vma + sgot->output_offset
3552 			      + offset);
3553 	  arm_elf_add_rofixup(output_bfd, globals->srofixup,
3554 			      sgot->output_section->vma + sgot->output_offset
3555 			      + offset + 4);
3556 	  bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + offset);
3557 	  bfd_put_32 (output_bfd, got_value, sgot->contents + offset + 4);
3558 	}
3559       *funcdesc_offset |= 1;
3560     }
3561 }
3562 
3563 /* Create an entry in an ARM ELF linker hash table.  */
3564 
3565 static struct bfd_hash_entry *
3566 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3567 			     struct bfd_hash_table * table,
3568 			     const char * string)
3569 {
3570   struct elf32_arm_link_hash_entry * ret =
3571     (struct elf32_arm_link_hash_entry *) entry;
3572 
3573   /* Allocate the structure if it has not already been allocated by a
3574      subclass.  */
3575   if (ret == NULL)
3576     ret = (struct elf32_arm_link_hash_entry *)
3577 	bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3578   if (ret == NULL)
3579     return (struct bfd_hash_entry *) ret;
3580 
3581   /* Call the allocation method of the superclass.  */
3582   ret = ((struct elf32_arm_link_hash_entry *)
3583 	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3584 				     table, string));
3585   if (ret != NULL)
3586     {
3587       ret->dyn_relocs = NULL;
3588       ret->tls_type = GOT_UNKNOWN;
3589       ret->tlsdesc_got = (bfd_vma) -1;
3590       ret->plt.thumb_refcount = 0;
3591       ret->plt.maybe_thumb_refcount = 0;
3592       ret->plt.noncall_refcount = 0;
3593       ret->plt.got_offset = -1;
3594       ret->is_iplt = FALSE;
3595       ret->export_glue = NULL;
3596 
3597       ret->stub_cache = NULL;
3598 
3599       ret->fdpic_cnts.gotofffuncdesc_cnt = 0;
3600       ret->fdpic_cnts.gotfuncdesc_cnt = 0;
3601       ret->fdpic_cnts.funcdesc_cnt = 0;
3602       ret->fdpic_cnts.funcdesc_offset = -1;
3603       ret->fdpic_cnts.gotfuncdesc_offset = -1;
3604     }
3605 
3606   return (struct bfd_hash_entry *) ret;
3607 }
3608 
3609 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3610    symbols.  */
3611 
3612 static bfd_boolean
3613 elf32_arm_allocate_local_sym_info (bfd *abfd)
3614 {
3615   if (elf_local_got_refcounts (abfd) == NULL)
3616     {
3617       bfd_size_type num_syms;
3618       bfd_size_type size;
3619       char *data;
3620 
3621       num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3622       size = num_syms * (sizeof (bfd_signed_vma)
3623 			 + sizeof (struct arm_local_iplt_info *)
3624 			 + sizeof (bfd_vma)
3625 			 + sizeof (char)
3626 			 + sizeof (struct fdpic_local));
3627       data = bfd_zalloc (abfd, size);
3628       if (data == NULL)
3629 	return FALSE;
3630 
3631       elf32_arm_local_fdpic_cnts (abfd) = (struct fdpic_local *) data;
3632       data += num_syms * sizeof (struct fdpic_local);
3633 
3634       elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3635       data += num_syms * sizeof (bfd_signed_vma);
3636 
3637       elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3638       data += num_syms * sizeof (struct arm_local_iplt_info *);
3639 
3640       elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3641       data += num_syms * sizeof (bfd_vma);
3642 
3643       elf32_arm_local_got_tls_type (abfd) = data;
3644     }
3645   return TRUE;
3646 }
3647 
3648 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3649    to input bfd ABFD.  Create the information if it doesn't already exist.
3650    Return null if an allocation fails.  */
3651 
3652 static struct arm_local_iplt_info *
3653 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3654 {
3655   struct arm_local_iplt_info **ptr;
3656 
3657   if (!elf32_arm_allocate_local_sym_info (abfd))
3658     return NULL;
3659 
3660   BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3661   ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3662   if (*ptr == NULL)
3663     *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3664   return *ptr;
3665 }
3666 
3667 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3668    in ABFD's symbol table.  If the symbol is global, H points to its
3669    hash table entry, otherwise H is null.
3670 
3671    Return true if the symbol does have PLT information.  When returning
3672    true, point *ROOT_PLT at the target-independent reference count/offset
3673    union and *ARM_PLT at the ARM-specific information.  */
3674 
3675 static bfd_boolean
3676 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_table *globals,
3677 			struct elf32_arm_link_hash_entry *h,
3678 			unsigned long r_symndx, union gotplt_union **root_plt,
3679 			struct arm_plt_info **arm_plt)
3680 {
3681   struct arm_local_iplt_info *local_iplt;
3682 
3683   if (globals->root.splt == NULL && globals->root.iplt == NULL)
3684     return FALSE;
3685 
3686   if (h != NULL)
3687     {
3688       *root_plt = &h->root.plt;
3689       *arm_plt = &h->plt;
3690       return TRUE;
3691     }
3692 
3693   if (elf32_arm_local_iplt (abfd) == NULL)
3694     return FALSE;
3695 
3696   local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3697   if (local_iplt == NULL)
3698     return FALSE;
3699 
3700   *root_plt = &local_iplt->root;
3701   *arm_plt = &local_iplt->arm;
3702   return TRUE;
3703 }
3704 
3705 static bfd_boolean using_thumb_only (struct elf32_arm_link_hash_table *globals);
3706 
3707 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3708    before it.  */
3709 
3710 static bfd_boolean
3711 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3712 				  struct arm_plt_info *arm_plt)
3713 {
3714   struct elf32_arm_link_hash_table *htab;
3715 
3716   htab = elf32_arm_hash_table (info);
3717 
3718   return (!using_thumb_only(htab) && (arm_plt->thumb_refcount != 0
3719 	  || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0)));
3720 }
3721 
3722 /* Return a pointer to the head of the dynamic reloc list that should
3723    be used for local symbol ISYM, which is symbol number R_SYMNDX in
3724    ABFD's symbol table.  Return null if an error occurs.  */
3725 
3726 static struct elf_dyn_relocs **
3727 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3728 				   Elf_Internal_Sym *isym)
3729 {
3730   if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3731     {
3732       struct arm_local_iplt_info *local_iplt;
3733 
3734       local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3735       if (local_iplt == NULL)
3736 	return NULL;
3737       return &local_iplt->dyn_relocs;
3738     }
3739   else
3740     {
3741       /* Track dynamic relocs needed for local syms too.
3742 	 We really need local syms available to do this
3743 	 easily.  Oh well.  */
3744       asection *s;
3745       void *vpp;
3746 
3747       s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3748       if (s == NULL)
3749 	abort ();
3750 
3751       vpp = &elf_section_data (s)->local_dynrel;
3752       return (struct elf_dyn_relocs **) vpp;
3753     }
3754 }
3755 
3756 /* Initialize an entry in the stub hash table.  */
3757 
3758 static struct bfd_hash_entry *
3759 stub_hash_newfunc (struct bfd_hash_entry *entry,
3760 		   struct bfd_hash_table *table,
3761 		   const char *string)
3762 {
3763   /* Allocate the structure if it has not already been allocated by a
3764      subclass.  */
3765   if (entry == NULL)
3766     {
3767       entry = (struct bfd_hash_entry *)
3768 	  bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3769       if (entry == NULL)
3770 	return entry;
3771     }
3772 
3773   /* Call the allocation method of the superclass.  */
3774   entry = bfd_hash_newfunc (entry, table, string);
3775   if (entry != NULL)
3776     {
3777       struct elf32_arm_stub_hash_entry *eh;
3778 
3779       /* Initialize the local fields.  */
3780       eh = (struct elf32_arm_stub_hash_entry *) entry;
3781       eh->stub_sec = NULL;
3782       eh->stub_offset = (bfd_vma) -1;
3783       eh->source_value = 0;
3784       eh->target_value = 0;
3785       eh->target_section = NULL;
3786       eh->orig_insn = 0;
3787       eh->stub_type = arm_stub_none;
3788       eh->stub_size = 0;
3789       eh->stub_template = NULL;
3790       eh->stub_template_size = -1;
3791       eh->h = NULL;
3792       eh->id_sec = NULL;
3793       eh->output_name = NULL;
3794     }
3795 
3796   return entry;
3797 }
3798 
3799 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3800    shortcuts to them in our hash table.  */
3801 
3802 static bfd_boolean
3803 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3804 {
3805   struct elf32_arm_link_hash_table *htab;
3806 
3807   htab = elf32_arm_hash_table (info);
3808   if (htab == NULL)
3809     return FALSE;
3810 
3811   /* BPABI objects never have a GOT, or associated sections.  */
3812   if (htab->symbian_p)
3813     return TRUE;
3814 
3815   if (! _bfd_elf_create_got_section (dynobj, info))
3816     return FALSE;
3817 
3818   /* Also create .rofixup.  */
3819   if (htab->fdpic_p)
3820     {
3821       htab->srofixup = bfd_make_section_with_flags (dynobj, ".rofixup",
3822 						    (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS
3823 						     | SEC_IN_MEMORY | SEC_LINKER_CREATED | SEC_READONLY));
3824       if (htab->srofixup == NULL
3825 	  || !bfd_set_section_alignment (htab->srofixup, 2))
3826 	return FALSE;
3827     }
3828 
3829   return TRUE;
3830 }
3831 
3832 /* Create the .iplt, .rel(a).iplt and .igot.plt sections.  */
3833 
3834 static bfd_boolean
3835 create_ifunc_sections (struct bfd_link_info *info)
3836 {
3837   struct elf32_arm_link_hash_table *htab;
3838   const struct elf_backend_data *bed;
3839   bfd *dynobj;
3840   asection *s;
3841   flagword flags;
3842 
3843   htab = elf32_arm_hash_table (info);
3844   dynobj = htab->root.dynobj;
3845   bed = get_elf_backend_data (dynobj);
3846   flags = bed->dynamic_sec_flags;
3847 
3848   if (htab->root.iplt == NULL)
3849     {
3850       s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3851 					      flags | SEC_READONLY | SEC_CODE);
3852       if (s == NULL
3853 	  || !bfd_set_section_alignment (s, bed->plt_alignment))
3854 	return FALSE;
3855       htab->root.iplt = s;
3856     }
3857 
3858   if (htab->root.irelplt == NULL)
3859     {
3860       s = bfd_make_section_anyway_with_flags (dynobj,
3861 					      RELOC_SECTION (htab, ".iplt"),
3862 					      flags | SEC_READONLY);
3863       if (s == NULL
3864 	  || !bfd_set_section_alignment (s, bed->s->log_file_align))
3865 	return FALSE;
3866       htab->root.irelplt = s;
3867     }
3868 
3869   if (htab->root.igotplt == NULL)
3870     {
3871       s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3872       if (s == NULL
3873 	  || !bfd_set_section_alignment (s, bed->s->log_file_align))
3874 	return FALSE;
3875       htab->root.igotplt = s;
3876     }
3877   return TRUE;
3878 }
3879 
3880 /* Determine if we're dealing with a Thumb only architecture.  */
3881 
3882 static bfd_boolean
3883 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3884 {
3885   int arch;
3886   int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3887 					  Tag_CPU_arch_profile);
3888 
3889   if (profile)
3890     return profile == 'M';
3891 
3892   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3893 
3894   /* Force return logic to be reviewed for each new architecture.  */
3895   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3896 
3897   if (arch == TAG_CPU_ARCH_V6_M
3898       || arch == TAG_CPU_ARCH_V6S_M
3899       || arch == TAG_CPU_ARCH_V7E_M
3900       || arch == TAG_CPU_ARCH_V8M_BASE
3901       || arch == TAG_CPU_ARCH_V8M_MAIN
3902       || arch == TAG_CPU_ARCH_V8_1M_MAIN)
3903     return TRUE;
3904 
3905   return FALSE;
3906 }
3907 
3908 /* Determine if we're dealing with a Thumb-2 object.  */
3909 
3910 static bfd_boolean
3911 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3912 {
3913   int arch;
3914   int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3915 					    Tag_THUMB_ISA_use);
3916 
3917   if (thumb_isa)
3918     return thumb_isa == 2;
3919 
3920   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3921 
3922   /* Force return logic to be reviewed for each new architecture.  */
3923   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3924 
3925   return (arch == TAG_CPU_ARCH_V6T2
3926 	  || arch == TAG_CPU_ARCH_V7
3927 	  || arch == TAG_CPU_ARCH_V7E_M
3928 	  || arch == TAG_CPU_ARCH_V8
3929 	  || arch == TAG_CPU_ARCH_V8R
3930 	  || arch == TAG_CPU_ARCH_V8M_MAIN
3931 	  || arch == TAG_CPU_ARCH_V8_1M_MAIN);
3932 }
3933 
3934 /* Determine whether Thumb-2 BL instruction is available.  */
3935 
3936 static bfd_boolean
3937 using_thumb2_bl (struct elf32_arm_link_hash_table *globals)
3938 {
3939   int arch =
3940     bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3941 
3942   /* Force return logic to be reviewed for each new architecture.  */
3943   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
3944 
3945   /* Architecture was introduced after ARMv6T2 (eg. ARMv6-M).  */
3946   return (arch == TAG_CPU_ARCH_V6T2
3947 	  || arch >= TAG_CPU_ARCH_V7);
3948 }
3949 
3950 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3951    .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3952    hash table.  */
3953 
3954 static bfd_boolean
3955 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3956 {
3957   struct elf32_arm_link_hash_table *htab;
3958 
3959   htab = elf32_arm_hash_table (info);
3960   if (htab == NULL)
3961     return FALSE;
3962 
3963   if (!htab->root.sgot && !create_got_section (dynobj, info))
3964     return FALSE;
3965 
3966   if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3967     return FALSE;
3968 
3969   if (htab->vxworks_p)
3970     {
3971       if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3972 	return FALSE;
3973 
3974       if (bfd_link_pic (info))
3975 	{
3976 	  htab->plt_header_size = 0;
3977 	  htab->plt_entry_size
3978 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3979 	}
3980       else
3981 	{
3982 	  htab->plt_header_size
3983 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3984 	  htab->plt_entry_size
3985 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3986 	}
3987 
3988       if (elf_elfheader (dynobj))
3989 	elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3990     }
3991   else
3992     {
3993       /* PR ld/16017
3994 	 Test for thumb only architectures.  Note - we cannot just call
3995 	 using_thumb_only() as the attributes in the output bfd have not been
3996 	 initialised at this point, so instead we use the input bfd.  */
3997       bfd * saved_obfd = htab->obfd;
3998 
3999       htab->obfd = dynobj;
4000       if (using_thumb_only (htab))
4001 	{
4002 	  htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
4003 	  htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
4004 	}
4005       htab->obfd = saved_obfd;
4006     }
4007 
4008   if (htab->fdpic_p) {
4009     htab->plt_header_size = 0;
4010     if (info->flags & DF_BIND_NOW)
4011       htab->plt_entry_size = 4 * (ARRAY_SIZE(elf32_arm_fdpic_plt_entry) - 5);
4012     else
4013       htab->plt_entry_size = 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry);
4014   }
4015 
4016   if (!htab->root.splt
4017       || !htab->root.srelplt
4018       || !htab->root.sdynbss
4019       || (!bfd_link_pic (info) && !htab->root.srelbss))
4020     abort ();
4021 
4022   return TRUE;
4023 }
4024 
4025 /* Copy the extra info we tack onto an elf_link_hash_entry.  */
4026 
4027 static void
4028 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
4029 				struct elf_link_hash_entry *dir,
4030 				struct elf_link_hash_entry *ind)
4031 {
4032   struct elf32_arm_link_hash_entry *edir, *eind;
4033 
4034   edir = (struct elf32_arm_link_hash_entry *) dir;
4035   eind = (struct elf32_arm_link_hash_entry *) ind;
4036 
4037   if (eind->dyn_relocs != NULL)
4038     {
4039       if (edir->dyn_relocs != NULL)
4040 	{
4041 	  struct elf_dyn_relocs **pp;
4042 	  struct elf_dyn_relocs *p;
4043 
4044 	  /* Add reloc counts against the indirect sym to the direct sym
4045 	     list.  Merge any entries against the same section.  */
4046 	  for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
4047 	    {
4048 	      struct elf_dyn_relocs *q;
4049 
4050 	      for (q = edir->dyn_relocs; q != NULL; q = q->next)
4051 		if (q->sec == p->sec)
4052 		  {
4053 		    q->pc_count += p->pc_count;
4054 		    q->count += p->count;
4055 		    *pp = p->next;
4056 		    break;
4057 		  }
4058 	      if (q == NULL)
4059 		pp = &p->next;
4060 	    }
4061 	  *pp = edir->dyn_relocs;
4062 	}
4063 
4064       edir->dyn_relocs = eind->dyn_relocs;
4065       eind->dyn_relocs = NULL;
4066     }
4067 
4068   if (ind->root.type == bfd_link_hash_indirect)
4069     {
4070       /* Copy over PLT info.  */
4071       edir->plt.thumb_refcount += eind->plt.thumb_refcount;
4072       eind->plt.thumb_refcount = 0;
4073       edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
4074       eind->plt.maybe_thumb_refcount = 0;
4075       edir->plt.noncall_refcount += eind->plt.noncall_refcount;
4076       eind->plt.noncall_refcount = 0;
4077 
4078       /* Copy FDPIC counters.  */
4079       edir->fdpic_cnts.gotofffuncdesc_cnt += eind->fdpic_cnts.gotofffuncdesc_cnt;
4080       edir->fdpic_cnts.gotfuncdesc_cnt += eind->fdpic_cnts.gotfuncdesc_cnt;
4081       edir->fdpic_cnts.funcdesc_cnt += eind->fdpic_cnts.funcdesc_cnt;
4082 
4083       /* We should only allocate a function to .iplt once the final
4084 	 symbol information is known.  */
4085       BFD_ASSERT (!eind->is_iplt);
4086 
4087       if (dir->got.refcount <= 0)
4088 	{
4089 	  edir->tls_type = eind->tls_type;
4090 	  eind->tls_type = GOT_UNKNOWN;
4091 	}
4092     }
4093 
4094   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
4095 }
4096 
4097 /* Destroy an ARM elf linker hash table.  */
4098 
4099 static void
4100 elf32_arm_link_hash_table_free (bfd *obfd)
4101 {
4102   struct elf32_arm_link_hash_table *ret
4103     = (struct elf32_arm_link_hash_table *) obfd->link.hash;
4104 
4105   bfd_hash_table_free (&ret->stub_hash_table);
4106   _bfd_elf_link_hash_table_free (obfd);
4107 }
4108 
4109 /* Create an ARM elf linker hash table.  */
4110 
4111 static struct bfd_link_hash_table *
4112 elf32_arm_link_hash_table_create (bfd *abfd)
4113 {
4114   struct elf32_arm_link_hash_table *ret;
4115   bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
4116 
4117   ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
4118   if (ret == NULL)
4119     return NULL;
4120 
4121   if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
4122 				      elf32_arm_link_hash_newfunc,
4123 				      sizeof (struct elf32_arm_link_hash_entry),
4124 				      ARM_ELF_DATA))
4125     {
4126       free (ret);
4127       return NULL;
4128     }
4129 
4130   ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
4131   ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
4132 #ifdef FOUR_WORD_PLT
4133   ret->plt_header_size = 16;
4134   ret->plt_entry_size = 16;
4135 #else
4136   ret->plt_header_size = 20;
4137   ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
4138 #endif
4139   ret->use_rel = TRUE;
4140   ret->obfd = abfd;
4141   ret->fdpic_p = 0;
4142 
4143   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
4144 			    sizeof (struct elf32_arm_stub_hash_entry)))
4145     {
4146       _bfd_elf_link_hash_table_free (abfd);
4147       return NULL;
4148     }
4149   ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
4150 
4151   return &ret->root.root;
4152 }
4153 
4154 /* Determine what kind of NOPs are available.  */
4155 
4156 static bfd_boolean
4157 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
4158 {
4159   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
4160 					     Tag_CPU_arch);
4161 
4162   /* Force return logic to be reviewed for each new architecture.  */
4163   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
4164 
4165   return (arch == TAG_CPU_ARCH_V6T2
4166 	  || arch == TAG_CPU_ARCH_V6K
4167 	  || arch == TAG_CPU_ARCH_V7
4168 	  || arch == TAG_CPU_ARCH_V8
4169 	  || arch == TAG_CPU_ARCH_V8R);
4170 }
4171 
4172 static bfd_boolean
4173 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
4174 {
4175   switch (stub_type)
4176     {
4177     case arm_stub_long_branch_thumb_only:
4178     case arm_stub_long_branch_thumb2_only:
4179     case arm_stub_long_branch_thumb2_only_pure:
4180     case arm_stub_long_branch_v4t_thumb_arm:
4181     case arm_stub_short_branch_v4t_thumb_arm:
4182     case arm_stub_long_branch_v4t_thumb_arm_pic:
4183     case arm_stub_long_branch_v4t_thumb_tls_pic:
4184     case arm_stub_long_branch_thumb_only_pic:
4185     case arm_stub_cmse_branch_thumb_only:
4186       return TRUE;
4187     case arm_stub_none:
4188       BFD_FAIL ();
4189       return FALSE;
4190       break;
4191     default:
4192       return FALSE;
4193     }
4194 }
4195 
4196 /* Determine the type of stub needed, if any, for a call.  */
4197 
4198 static enum elf32_arm_stub_type
4199 arm_type_of_stub (struct bfd_link_info *info,
4200 		  asection *input_sec,
4201 		  const Elf_Internal_Rela *rel,
4202 		  unsigned char st_type,
4203 		  enum arm_st_branch_type *actual_branch_type,
4204 		  struct elf32_arm_link_hash_entry *hash,
4205 		  bfd_vma destination,
4206 		  asection *sym_sec,
4207 		  bfd *input_bfd,
4208 		  const char *name)
4209 {
4210   bfd_vma location;
4211   bfd_signed_vma branch_offset;
4212   unsigned int r_type;
4213   struct elf32_arm_link_hash_table * globals;
4214   bfd_boolean thumb2, thumb2_bl, thumb_only;
4215   enum elf32_arm_stub_type stub_type = arm_stub_none;
4216   int use_plt = 0;
4217   enum arm_st_branch_type branch_type = *actual_branch_type;
4218   union gotplt_union *root_plt;
4219   struct arm_plt_info *arm_plt;
4220   int arch;
4221   int thumb2_movw;
4222 
4223   if (branch_type == ST_BRANCH_LONG)
4224     return stub_type;
4225 
4226   globals = elf32_arm_hash_table (info);
4227   if (globals == NULL)
4228     return stub_type;
4229 
4230   thumb_only = using_thumb_only (globals);
4231   thumb2 = using_thumb2 (globals);
4232   thumb2_bl = using_thumb2_bl (globals);
4233 
4234   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
4235 
4236   /* True for architectures that implement the thumb2 movw instruction.  */
4237   thumb2_movw = thumb2 || (arch  == TAG_CPU_ARCH_V8M_BASE);
4238 
4239   /* Determine where the call point is.  */
4240   location = (input_sec->output_offset
4241 	      + input_sec->output_section->vma
4242 	      + rel->r_offset);
4243 
4244   r_type = ELF32_R_TYPE (rel->r_info);
4245 
4246   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
4247      are considering a function call relocation.  */
4248   if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4249 		     || r_type == R_ARM_THM_JUMP19)
4250       && branch_type == ST_BRANCH_TO_ARM)
4251     branch_type = ST_BRANCH_TO_THUMB;
4252 
4253   /* For TLS call relocs, it is the caller's responsibility to provide
4254      the address of the appropriate trampoline.  */
4255   if (r_type != R_ARM_TLS_CALL
4256       && r_type != R_ARM_THM_TLS_CALL
4257       && elf32_arm_get_plt_info (input_bfd, globals, hash,
4258 				 ELF32_R_SYM (rel->r_info), &root_plt,
4259 				 &arm_plt)
4260       && root_plt->offset != (bfd_vma) -1)
4261     {
4262       asection *splt;
4263 
4264       if (hash == NULL || hash->is_iplt)
4265 	splt = globals->root.iplt;
4266       else
4267 	splt = globals->root.splt;
4268       if (splt != NULL)
4269 	{
4270 	  use_plt = 1;
4271 
4272 	  /* Note when dealing with PLT entries: the main PLT stub is in
4273 	     ARM mode, so if the branch is in Thumb mode, another
4274 	     Thumb->ARM stub will be inserted later just before the ARM
4275 	     PLT stub. If a long branch stub is needed, we'll add a
4276 	     Thumb->Arm one and branch directly to the ARM PLT entry.
4277 	     Here, we have to check if a pre-PLT Thumb->ARM stub
4278 	     is needed and if it will be close enough.  */
4279 
4280 	  destination = (splt->output_section->vma
4281 			 + splt->output_offset
4282 			 + root_plt->offset);
4283 	  st_type = STT_FUNC;
4284 
4285 	  /* Thumb branch/call to PLT: it can become a branch to ARM
4286 	     or to Thumb. We must perform the same checks and
4287 	     corrections as in elf32_arm_final_link_relocate.  */
4288 	  if ((r_type == R_ARM_THM_CALL)
4289 	      || (r_type == R_ARM_THM_JUMP24))
4290 	    {
4291 	      if (globals->use_blx
4292 		  && r_type == R_ARM_THM_CALL
4293 		  && !thumb_only)
4294 		{
4295 		  /* If the Thumb BLX instruction is available, convert
4296 		     the BL to a BLX instruction to call the ARM-mode
4297 		     PLT entry.  */
4298 		  branch_type = ST_BRANCH_TO_ARM;
4299 		}
4300 	      else
4301 		{
4302 		  if (!thumb_only)
4303 		    /* Target the Thumb stub before the ARM PLT entry.  */
4304 		    destination -= PLT_THUMB_STUB_SIZE;
4305 		  branch_type = ST_BRANCH_TO_THUMB;
4306 		}
4307 	    }
4308 	  else
4309 	    {
4310 	      branch_type = ST_BRANCH_TO_ARM;
4311 	    }
4312 	}
4313     }
4314   /* Calls to STT_GNU_IFUNC symbols should go through a PLT.  */
4315   BFD_ASSERT (st_type != STT_GNU_IFUNC);
4316 
4317   branch_offset = (bfd_signed_vma)(destination - location);
4318 
4319   if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
4320       || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
4321     {
4322       /* Handle cases where:
4323 	 - this call goes too far (different Thumb/Thumb2 max
4324 	   distance)
4325 	 - it's a Thumb->Arm call and blx is not available, or it's a
4326 	   Thumb->Arm branch (not bl). A stub is needed in this case,
4327 	   but only if this call is not through a PLT entry. Indeed,
4328 	   PLT stubs handle mode switching already.  */
4329       if ((!thumb2_bl
4330 	    && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
4331 		|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
4332 	  || (thumb2_bl
4333 	      && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
4334 		  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
4335 	  || (thumb2
4336 	      && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
4337 		  || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
4338 	      && (r_type == R_ARM_THM_JUMP19))
4339 	  || (branch_type == ST_BRANCH_TO_ARM
4340 	      && (((r_type == R_ARM_THM_CALL
4341 		    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
4342 		  || (r_type == R_ARM_THM_JUMP24)
4343 		  || (r_type == R_ARM_THM_JUMP19))
4344 	      && !use_plt))
4345 	{
4346 	  /* If we need to insert a Thumb-Thumb long branch stub to a
4347 	     PLT, use one that branches directly to the ARM PLT
4348 	     stub. If we pretended we'd use the pre-PLT Thumb->ARM
4349 	     stub, undo this now.  */
4350 	  if ((branch_type == ST_BRANCH_TO_THUMB) && use_plt && !thumb_only)
4351 	    {
4352 	      branch_type = ST_BRANCH_TO_ARM;
4353 	      branch_offset += PLT_THUMB_STUB_SIZE;
4354 	    }
4355 
4356 	  if (branch_type == ST_BRANCH_TO_THUMB)
4357 	    {
4358 	      /* Thumb to thumb.  */
4359 	      if (!thumb_only)
4360 		{
4361 		  if (input_sec->flags & SEC_ELF_PURECODE)
4362 		    _bfd_error_handler
4363 		      (_("%pB(%pA): warning: long branch veneers used in"
4364 			 " section with SHF_ARM_PURECODE section"
4365 			 " attribute is only supported for M-profile"
4366 			 " targets that implement the movw instruction"),
4367 		       input_bfd, input_sec);
4368 
4369 		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4370 		    /* PIC stubs.  */
4371 		    ? ((globals->use_blx
4372 			&& (r_type == R_ARM_THM_CALL))
4373 		       /* V5T and above. Stub starts with ARM code, so
4374 			  we must be able to switch mode before
4375 			  reaching it, which is only possible for 'bl'
4376 			  (ie R_ARM_THM_CALL relocation).  */
4377 		       ? arm_stub_long_branch_any_thumb_pic
4378 		       /* On V4T, use Thumb code only.  */
4379 		       : arm_stub_long_branch_v4t_thumb_thumb_pic)
4380 
4381 		    /* non-PIC stubs.  */
4382 		    : ((globals->use_blx
4383 			&& (r_type == R_ARM_THM_CALL))
4384 		       /* V5T and above.  */
4385 		       ? arm_stub_long_branch_any_any
4386 		       /* V4T.  */
4387 		       : arm_stub_long_branch_v4t_thumb_thumb);
4388 		}
4389 	      else
4390 		{
4391 		  if (thumb2_movw && (input_sec->flags & SEC_ELF_PURECODE))
4392 		      stub_type = arm_stub_long_branch_thumb2_only_pure;
4393 		  else
4394 		    {
4395 		      if (input_sec->flags & SEC_ELF_PURECODE)
4396 			_bfd_error_handler
4397 			  (_("%pB(%pA): warning: long branch veneers used in"
4398 			     " section with SHF_ARM_PURECODE section"
4399 			     " attribute is only supported for M-profile"
4400 			     " targets that implement the movw instruction"),
4401 			   input_bfd, input_sec);
4402 
4403 		      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4404 			/* PIC stub.  */
4405 			? arm_stub_long_branch_thumb_only_pic
4406 			/* non-PIC stub.  */
4407 			: (thumb2 ? arm_stub_long_branch_thumb2_only
4408 				  : arm_stub_long_branch_thumb_only);
4409 		    }
4410 		}
4411 	    }
4412 	  else
4413 	    {
4414 	      if (input_sec->flags & SEC_ELF_PURECODE)
4415 		_bfd_error_handler
4416 		  (_("%pB(%pA): warning: long branch veneers used in"
4417 		     " section with SHF_ARM_PURECODE section"
4418 		     " attribute is only supported" " for M-profile"
4419 		     " targets that implement the movw instruction"),
4420 		   input_bfd, input_sec);
4421 
4422 	      /* Thumb to arm.  */
4423 	      if (sym_sec != NULL
4424 		  && sym_sec->owner != NULL
4425 		  && !INTERWORK_FLAG (sym_sec->owner))
4426 		{
4427 		  _bfd_error_handler
4428 		    (_("%pB(%s): warning: interworking not enabled;"
4429 		       " first occurrence: %pB: %s call to %s"),
4430 		     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
4431 		}
4432 
4433 	      stub_type =
4434 		(bfd_link_pic (info) | globals->pic_veneer)
4435 		/* PIC stubs.  */
4436 		? (r_type == R_ARM_THM_TLS_CALL
4437 		   /* TLS PIC stubs.  */
4438 		   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
4439 		      : arm_stub_long_branch_v4t_thumb_tls_pic)
4440 		   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
4441 		      /* V5T PIC and above.  */
4442 		      ? arm_stub_long_branch_any_arm_pic
4443 		      /* V4T PIC stub.  */
4444 		      : arm_stub_long_branch_v4t_thumb_arm_pic))
4445 
4446 		/* non-PIC stubs.  */
4447 		: ((globals->use_blx && r_type == R_ARM_THM_CALL)
4448 		   /* V5T and above.  */
4449 		   ? arm_stub_long_branch_any_any
4450 		   /* V4T.  */
4451 		   : arm_stub_long_branch_v4t_thumb_arm);
4452 
4453 	      /* Handle v4t short branches.  */
4454 	      if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
4455 		  && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
4456 		  && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4457 		stub_type = arm_stub_short_branch_v4t_thumb_arm;
4458 	    }
4459 	}
4460     }
4461   else if (r_type == R_ARM_CALL
4462 	   || r_type == R_ARM_JUMP24
4463 	   || r_type == R_ARM_PLT32
4464 	   || r_type == R_ARM_TLS_CALL)
4465     {
4466       if (input_sec->flags & SEC_ELF_PURECODE)
4467 	_bfd_error_handler
4468 	  (_("%pB(%pA): warning: long branch veneers used in"
4469 	     " section with SHF_ARM_PURECODE section"
4470 	     " attribute is only supported for M-profile"
4471 	     " targets that implement the movw instruction"),
4472 	   input_bfd, input_sec);
4473       if (branch_type == ST_BRANCH_TO_THUMB)
4474 	{
4475 	  /* Arm to thumb.  */
4476 
4477 	  if (sym_sec != NULL
4478 	      && sym_sec->owner != NULL
4479 	      && !INTERWORK_FLAG (sym_sec->owner))
4480 	    {
4481 	      _bfd_error_handler
4482 		(_("%pB(%s): warning: interworking not enabled;"
4483 		   " first occurrence: %pB: %s call to %s"),
4484 		 sym_sec->owner, name, input_bfd, "ARM", "Thumb");
4485 	    }
4486 
4487 	  /* We have an extra 2-bytes reach because of
4488 	     the mode change (bit 24 (H) of BLX encoding).  */
4489 	  if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4490 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4491 	      || (r_type == R_ARM_CALL && !globals->use_blx)
4492 	      || (r_type == R_ARM_JUMP24)
4493 	      || (r_type == R_ARM_PLT32))
4494 	    {
4495 	      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4496 		/* PIC stubs.  */
4497 		? ((globals->use_blx)
4498 		   /* V5T and above.  */
4499 		   ? arm_stub_long_branch_any_thumb_pic
4500 		   /* V4T stub.  */
4501 		   : arm_stub_long_branch_v4t_arm_thumb_pic)
4502 
4503 		/* non-PIC stubs.  */
4504 		: ((globals->use_blx)
4505 		   /* V5T and above.  */
4506 		   ? arm_stub_long_branch_any_any
4507 		   /* V4T.  */
4508 		   : arm_stub_long_branch_v4t_arm_thumb);
4509 	    }
4510 	}
4511       else
4512 	{
4513 	  /* Arm to arm.  */
4514 	  if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4515 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4516 	    {
4517 	      stub_type =
4518 		(bfd_link_pic (info) | globals->pic_veneer)
4519 		/* PIC stubs.  */
4520 		? (r_type == R_ARM_TLS_CALL
4521 		   /* TLS PIC Stub.  */
4522 		   ? arm_stub_long_branch_any_tls_pic
4523 		   : (globals->nacl_p
4524 		      ? arm_stub_long_branch_arm_nacl_pic
4525 		      : arm_stub_long_branch_any_arm_pic))
4526 		/* non-PIC stubs.  */
4527 		: (globals->nacl_p
4528 		   ? arm_stub_long_branch_arm_nacl
4529 		   : arm_stub_long_branch_any_any);
4530 	    }
4531 	}
4532     }
4533 
4534   /* If a stub is needed, record the actual destination type.  */
4535   if (stub_type != arm_stub_none)
4536     *actual_branch_type = branch_type;
4537 
4538   return stub_type;
4539 }
4540 
4541 /* Build a name for an entry in the stub hash table.  */
4542 
4543 static char *
4544 elf32_arm_stub_name (const asection *input_section,
4545 		     const asection *sym_sec,
4546 		     const struct elf32_arm_link_hash_entry *hash,
4547 		     const Elf_Internal_Rela *rel,
4548 		     enum elf32_arm_stub_type stub_type)
4549 {
4550   char *stub_name;
4551   bfd_size_type len;
4552 
4553   if (hash)
4554     {
4555       len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4556       stub_name = (char *) bfd_malloc (len);
4557       if (stub_name != NULL)
4558 	sprintf (stub_name, "%08x_%s+%x_%d",
4559 		 input_section->id & 0xffffffff,
4560 		 hash->root.root.root.string,
4561 		 (int) rel->r_addend & 0xffffffff,
4562 		 (int) stub_type);
4563     }
4564   else
4565     {
4566       len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4567       stub_name = (char *) bfd_malloc (len);
4568       if (stub_name != NULL)
4569 	sprintf (stub_name, "%08x_%x:%x+%x_%d",
4570 		 input_section->id & 0xffffffff,
4571 		 sym_sec->id & 0xffffffff,
4572 		 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4573 		 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4574 		 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4575 		 (int) rel->r_addend & 0xffffffff,
4576 		 (int) stub_type);
4577     }
4578 
4579   return stub_name;
4580 }
4581 
4582 /* Look up an entry in the stub hash.  Stub entries are cached because
4583    creating the stub name takes a bit of time.  */
4584 
4585 static struct elf32_arm_stub_hash_entry *
4586 elf32_arm_get_stub_entry (const asection *input_section,
4587 			  const asection *sym_sec,
4588 			  struct elf_link_hash_entry *hash,
4589 			  const Elf_Internal_Rela *rel,
4590 			  struct elf32_arm_link_hash_table *htab,
4591 			  enum elf32_arm_stub_type stub_type)
4592 {
4593   struct elf32_arm_stub_hash_entry *stub_entry;
4594   struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4595   const asection *id_sec;
4596 
4597   if ((input_section->flags & SEC_CODE) == 0)
4598     return NULL;
4599 
4600   /* If the input section is the CMSE stubs one and it needs a long
4601      branch stub to reach it's final destination, give up with an
4602      error message: this is not supported.  See PR ld/24709.  */
4603   if (!strncmp (input_section->name, CMSE_STUB_NAME, strlen(CMSE_STUB_NAME)))
4604     {
4605       bfd *output_bfd = htab->obfd;
4606       asection *out_sec = bfd_get_section_by_name (output_bfd, CMSE_STUB_NAME);
4607 
4608       _bfd_error_handler (_("ERROR: CMSE stub (%s section) too far "
4609 			    "(%#" PRIx64 ") from destination (%#" PRIx64 ")"),
4610 			  CMSE_STUB_NAME,
4611 			  (uint64_t)out_sec->output_section->vma
4612 			    + out_sec->output_offset,
4613 			  (uint64_t)sym_sec->output_section->vma
4614 			    + sym_sec->output_offset
4615 			    + h->root.root.u.def.value);
4616       /* Exit, rather than leave incompletely processed
4617 	 relocations.  */
4618       xexit(1);
4619     }
4620 
4621   /* If this input section is part of a group of sections sharing one
4622      stub section, then use the id of the first section in the group.
4623      Stub names need to include a section id, as there may well be
4624      more than one stub used to reach say, printf, and we need to
4625      distinguish between them.  */
4626   BFD_ASSERT (input_section->id <= htab->top_id);
4627   id_sec = htab->stub_group[input_section->id].link_sec;
4628 
4629   if (h != NULL && h->stub_cache != NULL
4630       && h->stub_cache->h == h
4631       && h->stub_cache->id_sec == id_sec
4632       && h->stub_cache->stub_type == stub_type)
4633     {
4634       stub_entry = h->stub_cache;
4635     }
4636   else
4637     {
4638       char *stub_name;
4639 
4640       stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4641       if (stub_name == NULL)
4642 	return NULL;
4643 
4644       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4645 					stub_name, FALSE, FALSE);
4646       if (h != NULL)
4647 	h->stub_cache = stub_entry;
4648 
4649       free (stub_name);
4650     }
4651 
4652   return stub_entry;
4653 }
4654 
4655 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4656    section.  */
4657 
4658 static bfd_boolean
4659 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4660 {
4661   if (stub_type >= max_stub_type)
4662     abort ();  /* Should be unreachable.  */
4663 
4664   switch (stub_type)
4665     {
4666     case arm_stub_cmse_branch_thumb_only:
4667       return TRUE;
4668 
4669     default:
4670       return FALSE;
4671     }
4672 
4673   abort ();  /* Should be unreachable.  */
4674 }
4675 
4676 /* Required alignment (as a power of 2) for the dedicated section holding
4677    veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4678    with input sections.  */
4679 
4680 static int
4681 arm_dedicated_stub_output_section_required_alignment
4682   (enum elf32_arm_stub_type stub_type)
4683 {
4684   if (stub_type >= max_stub_type)
4685     abort ();  /* Should be unreachable.  */
4686 
4687   switch (stub_type)
4688     {
4689     /* Vectors of Secure Gateway veneers must be aligned on 32byte
4690        boundary.  */
4691     case arm_stub_cmse_branch_thumb_only:
4692       return 5;
4693 
4694     default:
4695       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4696       return 0;
4697     }
4698 
4699   abort ();  /* Should be unreachable.  */
4700 }
4701 
4702 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4703    NULL if veneers of this type are interspersed with input sections.  */
4704 
4705 static const char *
4706 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4707 {
4708   if (stub_type >= max_stub_type)
4709     abort ();  /* Should be unreachable.  */
4710 
4711   switch (stub_type)
4712     {
4713     case arm_stub_cmse_branch_thumb_only:
4714       return CMSE_STUB_NAME;
4715 
4716     default:
4717       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4718       return NULL;
4719     }
4720 
4721   abort ();  /* Should be unreachable.  */
4722 }
4723 
4724 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4725    returns the address of the hash table field in HTAB holding a pointer to the
4726    corresponding input section.  Otherwise, returns NULL.  */
4727 
4728 static asection **
4729 arm_dedicated_stub_input_section_ptr (struct elf32_arm_link_hash_table *htab,
4730 				      enum elf32_arm_stub_type stub_type)
4731 {
4732   if (stub_type >= max_stub_type)
4733     abort ();  /* Should be unreachable.  */
4734 
4735   switch (stub_type)
4736     {
4737     case arm_stub_cmse_branch_thumb_only:
4738       return &htab->cmse_stub_sec;
4739 
4740     default:
4741       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4742       return NULL;
4743     }
4744 
4745   abort ();  /* Should be unreachable.  */
4746 }
4747 
4748 /* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
4749    is the section that branch into veneer and can be NULL if stub should go in
4750    a dedicated output section.  Returns a pointer to the stub section, and the
4751    section to which the stub section will be attached (in *LINK_SEC_P).
4752    LINK_SEC_P may be NULL.  */
4753 
4754 static asection *
4755 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4756 				   struct elf32_arm_link_hash_table *htab,
4757 				   enum elf32_arm_stub_type stub_type)
4758 {
4759   asection *link_sec, *out_sec, **stub_sec_p;
4760   const char *stub_sec_prefix;
4761   bfd_boolean dedicated_output_section =
4762     arm_dedicated_stub_output_section_required (stub_type);
4763   int align;
4764 
4765   if (dedicated_output_section)
4766     {
4767       bfd *output_bfd = htab->obfd;
4768       const char *out_sec_name =
4769 	arm_dedicated_stub_output_section_name (stub_type);
4770       link_sec = NULL;
4771       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4772       stub_sec_prefix = out_sec_name;
4773       align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4774       out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4775       if (out_sec == NULL)
4776 	{
4777 	  _bfd_error_handler (_("no address assigned to the veneers output "
4778 				"section %s"), out_sec_name);
4779 	  return NULL;
4780 	}
4781     }
4782   else
4783     {
4784       BFD_ASSERT (section->id <= htab->top_id);
4785       link_sec = htab->stub_group[section->id].link_sec;
4786       BFD_ASSERT (link_sec != NULL);
4787       stub_sec_p = &htab->stub_group[section->id].stub_sec;
4788       if (*stub_sec_p == NULL)
4789 	stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4790       stub_sec_prefix = link_sec->name;
4791       out_sec = link_sec->output_section;
4792       align = htab->nacl_p ? 4 : 3;
4793     }
4794 
4795   if (*stub_sec_p == NULL)
4796     {
4797       size_t namelen;
4798       bfd_size_type len;
4799       char *s_name;
4800 
4801       namelen = strlen (stub_sec_prefix);
4802       len = namelen + sizeof (STUB_SUFFIX);
4803       s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4804       if (s_name == NULL)
4805 	return NULL;
4806 
4807       memcpy (s_name, stub_sec_prefix, namelen);
4808       memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4809       *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4810 					       align);
4811       if (*stub_sec_p == NULL)
4812 	return NULL;
4813 
4814       out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4815 			| SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4816 			| SEC_KEEP;
4817     }
4818 
4819   if (!dedicated_output_section)
4820     htab->stub_group[section->id].stub_sec = *stub_sec_p;
4821 
4822   if (link_sec_p)
4823     *link_sec_p = link_sec;
4824 
4825   return *stub_sec_p;
4826 }
4827 
4828 /* Add a new stub entry to the stub hash.  Not all fields of the new
4829    stub entry are initialised.  */
4830 
4831 static struct elf32_arm_stub_hash_entry *
4832 elf32_arm_add_stub (const char *stub_name, asection *section,
4833 		    struct elf32_arm_link_hash_table *htab,
4834 		    enum elf32_arm_stub_type stub_type)
4835 {
4836   asection *link_sec;
4837   asection *stub_sec;
4838   struct elf32_arm_stub_hash_entry *stub_entry;
4839 
4840   stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4841 						stub_type);
4842   if (stub_sec == NULL)
4843     return NULL;
4844 
4845   /* Enter this entry into the linker stub hash table.  */
4846   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4847 				     TRUE, FALSE);
4848   if (stub_entry == NULL)
4849     {
4850       if (section == NULL)
4851 	section = stub_sec;
4852       _bfd_error_handler (_("%pB: cannot create stub entry %s"),
4853 			  section->owner, stub_name);
4854       return NULL;
4855     }
4856 
4857   stub_entry->stub_sec = stub_sec;
4858   stub_entry->stub_offset = (bfd_vma) -1;
4859   stub_entry->id_sec = link_sec;
4860 
4861   return stub_entry;
4862 }
4863 
4864 /* Store an Arm insn into an output section not processed by
4865    elf32_arm_write_section.  */
4866 
4867 static void
4868 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4869 	      bfd * output_bfd, bfd_vma val, void * ptr)
4870 {
4871   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4872     bfd_putl32 (val, ptr);
4873   else
4874     bfd_putb32 (val, ptr);
4875 }
4876 
4877 /* Store a 16-bit Thumb insn into an output section not processed by
4878    elf32_arm_write_section.  */
4879 
4880 static void
4881 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4882 		bfd * output_bfd, bfd_vma val, void * ptr)
4883 {
4884   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4885     bfd_putl16 (val, ptr);
4886   else
4887     bfd_putb16 (val, ptr);
4888 }
4889 
4890 /* Store a Thumb2 insn into an output section not processed by
4891    elf32_arm_write_section.  */
4892 
4893 static void
4894 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4895 		 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4896 {
4897   /* T2 instructions are 16-bit streamed.  */
4898   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4899     {
4900       bfd_putl16 ((val >> 16) & 0xffff, ptr);
4901       bfd_putl16 ((val & 0xffff), ptr + 2);
4902     }
4903   else
4904     {
4905       bfd_putb16 ((val >> 16) & 0xffff, ptr);
4906       bfd_putb16 ((val & 0xffff), ptr + 2);
4907     }
4908 }
4909 
4910 /* If it's possible to change R_TYPE to a more efficient access
4911    model, return the new reloc type.  */
4912 
4913 static unsigned
4914 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4915 			  struct elf_link_hash_entry *h)
4916 {
4917   int is_local = (h == NULL);
4918 
4919   if (bfd_link_dll (info)
4920       || (h && h->root.type == bfd_link_hash_undefweak))
4921     return r_type;
4922 
4923   /* We do not support relaxations for Old TLS models.  */
4924   switch (r_type)
4925     {
4926     case R_ARM_TLS_GOTDESC:
4927     case R_ARM_TLS_CALL:
4928     case R_ARM_THM_TLS_CALL:
4929     case R_ARM_TLS_DESCSEQ:
4930     case R_ARM_THM_TLS_DESCSEQ:
4931       return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4932     }
4933 
4934   return r_type;
4935 }
4936 
4937 static bfd_reloc_status_type elf32_arm_final_link_relocate
4938   (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4939    Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4940    const char *, unsigned char, enum arm_st_branch_type,
4941    struct elf_link_hash_entry *, bfd_boolean *, char **);
4942 
4943 static unsigned int
4944 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4945 {
4946   switch (stub_type)
4947     {
4948     case arm_stub_a8_veneer_b_cond:
4949     case arm_stub_a8_veneer_b:
4950     case arm_stub_a8_veneer_bl:
4951       return 2;
4952 
4953     case arm_stub_long_branch_any_any:
4954     case arm_stub_long_branch_v4t_arm_thumb:
4955     case arm_stub_long_branch_thumb_only:
4956     case arm_stub_long_branch_thumb2_only:
4957     case arm_stub_long_branch_thumb2_only_pure:
4958     case arm_stub_long_branch_v4t_thumb_thumb:
4959     case arm_stub_long_branch_v4t_thumb_arm:
4960     case arm_stub_short_branch_v4t_thumb_arm:
4961     case arm_stub_long_branch_any_arm_pic:
4962     case arm_stub_long_branch_any_thumb_pic:
4963     case arm_stub_long_branch_v4t_thumb_thumb_pic:
4964     case arm_stub_long_branch_v4t_arm_thumb_pic:
4965     case arm_stub_long_branch_v4t_thumb_arm_pic:
4966     case arm_stub_long_branch_thumb_only_pic:
4967     case arm_stub_long_branch_any_tls_pic:
4968     case arm_stub_long_branch_v4t_thumb_tls_pic:
4969     case arm_stub_cmse_branch_thumb_only:
4970     case arm_stub_a8_veneer_blx:
4971       return 4;
4972 
4973     case arm_stub_long_branch_arm_nacl:
4974     case arm_stub_long_branch_arm_nacl_pic:
4975       return 16;
4976 
4977     default:
4978       abort ();  /* Should be unreachable.  */
4979     }
4980 }
4981 
4982 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4983    veneering (TRUE) or have their own symbol (FALSE).  */
4984 
4985 static bfd_boolean
4986 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4987 {
4988   if (stub_type >= max_stub_type)
4989     abort ();  /* Should be unreachable.  */
4990 
4991   switch (stub_type)
4992     {
4993     case arm_stub_cmse_branch_thumb_only:
4994       return TRUE;
4995 
4996     default:
4997       return FALSE;
4998     }
4999 
5000   abort ();  /* Should be unreachable.  */
5001 }
5002 
5003 /* Returns the padding needed for the dedicated section used stubs of type
5004    STUB_TYPE.  */
5005 
5006 static int
5007 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
5008 {
5009   if (stub_type >= max_stub_type)
5010     abort ();  /* Should be unreachable.  */
5011 
5012   switch (stub_type)
5013     {
5014     case arm_stub_cmse_branch_thumb_only:
5015       return 32;
5016 
5017     default:
5018       return 0;
5019     }
5020 
5021   abort ();  /* Should be unreachable.  */
5022 }
5023 
5024 /* If veneers of type STUB_TYPE should go in a dedicated output section,
5025    returns the address of the hash table field in HTAB holding the offset at
5026    which new veneers should be layed out in the stub section.  */
5027 
5028 static bfd_vma*
5029 arm_new_stubs_start_offset_ptr (struct elf32_arm_link_hash_table *htab,
5030 				enum elf32_arm_stub_type stub_type)
5031 {
5032   switch (stub_type)
5033     {
5034     case arm_stub_cmse_branch_thumb_only:
5035       return &htab->new_cmse_stub_offset;
5036 
5037     default:
5038       BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
5039       return NULL;
5040     }
5041 }
5042 
5043 static bfd_boolean
5044 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
5045 		    void * in_arg)
5046 {
5047 #define MAXRELOCS 3
5048   bfd_boolean removed_sg_veneer;
5049   struct elf32_arm_stub_hash_entry *stub_entry;
5050   struct elf32_arm_link_hash_table *globals;
5051   struct bfd_link_info *info;
5052   asection *stub_sec;
5053   bfd *stub_bfd;
5054   bfd_byte *loc;
5055   bfd_vma sym_value;
5056   int template_size;
5057   int size;
5058   const insn_sequence *template_sequence;
5059   int i;
5060   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
5061   int stub_reloc_offset[MAXRELOCS] = {0, 0};
5062   int nrelocs = 0;
5063   int just_allocated = 0;
5064 
5065   /* Massage our args to the form they really have.  */
5066   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5067   info = (struct bfd_link_info *) in_arg;
5068 
5069   globals = elf32_arm_hash_table (info);
5070   if (globals == NULL)
5071     return FALSE;
5072 
5073   stub_sec = stub_entry->stub_sec;
5074 
5075   if ((globals->fix_cortex_a8 < 0)
5076       != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
5077     /* We have to do less-strictly-aligned fixes last.  */
5078     return TRUE;
5079 
5080   /* Assign a slot at the end of section if none assigned yet.  */
5081   if (stub_entry->stub_offset == (bfd_vma) -1)
5082     {
5083       stub_entry->stub_offset = stub_sec->size;
5084       just_allocated = 1;
5085     }
5086   loc = stub_sec->contents + stub_entry->stub_offset;
5087 
5088   stub_bfd = stub_sec->owner;
5089 
5090   /* This is the address of the stub destination.  */
5091   sym_value = (stub_entry->target_value
5092 	       + stub_entry->target_section->output_offset
5093 	       + stub_entry->target_section->output_section->vma);
5094 
5095   template_sequence = stub_entry->stub_template;
5096   template_size = stub_entry->stub_template_size;
5097 
5098   size = 0;
5099   for (i = 0; i < template_size; i++)
5100     {
5101       switch (template_sequence[i].type)
5102 	{
5103 	case THUMB16_TYPE:
5104 	  {
5105 	    bfd_vma data = (bfd_vma) template_sequence[i].data;
5106 	    if (template_sequence[i].reloc_addend != 0)
5107 	      {
5108 		/* We've borrowed the reloc_addend field to mean we should
5109 		   insert a condition code into this (Thumb-1 branch)
5110 		   instruction.  See THUMB16_BCOND_INSN.  */
5111 		BFD_ASSERT ((data & 0xff00) == 0xd000);
5112 		data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
5113 	      }
5114 	    bfd_put_16 (stub_bfd, data, loc + size);
5115 	    size += 2;
5116 	  }
5117 	  break;
5118 
5119 	case THUMB32_TYPE:
5120 	  bfd_put_16 (stub_bfd,
5121 		      (template_sequence[i].data >> 16) & 0xffff,
5122 		      loc + size);
5123 	  bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
5124 		      loc + size + 2);
5125 	  if (template_sequence[i].r_type != R_ARM_NONE)
5126 	    {
5127 	      stub_reloc_idx[nrelocs] = i;
5128 	      stub_reloc_offset[nrelocs++] = size;
5129 	    }
5130 	  size += 4;
5131 	  break;
5132 
5133 	case ARM_TYPE:
5134 	  bfd_put_32 (stub_bfd, template_sequence[i].data,
5135 		      loc + size);
5136 	  /* Handle cases where the target is encoded within the
5137 	     instruction.  */
5138 	  if (template_sequence[i].r_type == R_ARM_JUMP24)
5139 	    {
5140 	      stub_reloc_idx[nrelocs] = i;
5141 	      stub_reloc_offset[nrelocs++] = size;
5142 	    }
5143 	  size += 4;
5144 	  break;
5145 
5146 	case DATA_TYPE:
5147 	  bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
5148 	  stub_reloc_idx[nrelocs] = i;
5149 	  stub_reloc_offset[nrelocs++] = size;
5150 	  size += 4;
5151 	  break;
5152 
5153 	default:
5154 	  BFD_FAIL ();
5155 	  return FALSE;
5156 	}
5157     }
5158 
5159   if (just_allocated)
5160     stub_sec->size += size;
5161 
5162   /* Stub size has already been computed in arm_size_one_stub. Check
5163      consistency.  */
5164   BFD_ASSERT (size == stub_entry->stub_size);
5165 
5166   /* Destination is Thumb. Force bit 0 to 1 to reflect this.  */
5167   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
5168     sym_value |= 1;
5169 
5170   /* Assume non empty slots have at least one and at most MAXRELOCS entries
5171      to relocate in each stub.  */
5172   removed_sg_veneer =
5173     (size == 0 && stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
5174   BFD_ASSERT (removed_sg_veneer || (nrelocs != 0 && nrelocs <= MAXRELOCS));
5175 
5176   for (i = 0; i < nrelocs; i++)
5177     {
5178       Elf_Internal_Rela rel;
5179       bfd_boolean unresolved_reloc;
5180       char *error_message;
5181       bfd_vma points_to =
5182 	sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
5183 
5184       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
5185       rel.r_info = ELF32_R_INFO (0,
5186 				 template_sequence[stub_reloc_idx[i]].r_type);
5187       rel.r_addend = 0;
5188 
5189       if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
5190 	/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
5191 	   template should refer back to the instruction after the original
5192 	   branch.  We use target_section as Cortex-A8 erratum workaround stubs
5193 	   are only generated when both source and target are in the same
5194 	   section.  */
5195 	points_to = stub_entry->target_section->output_section->vma
5196 		    + stub_entry->target_section->output_offset
5197 		    + stub_entry->source_value;
5198 
5199       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
5200 	  (template_sequence[stub_reloc_idx[i]].r_type),
5201 	   stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
5202 	   points_to, info, stub_entry->target_section, "", STT_FUNC,
5203 	   stub_entry->branch_type,
5204 	   (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
5205 	   &error_message);
5206     }
5207 
5208   return TRUE;
5209 #undef MAXRELOCS
5210 }
5211 
5212 /* Calculate the template, template size and instruction size for a stub.
5213    Return value is the instruction size.  */
5214 
5215 static unsigned int
5216 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
5217 			     const insn_sequence **stub_template,
5218 			     int *stub_template_size)
5219 {
5220   const insn_sequence *template_sequence = NULL;
5221   int template_size = 0, i;
5222   unsigned int size;
5223 
5224   template_sequence = stub_definitions[stub_type].template_sequence;
5225   if (stub_template)
5226     *stub_template = template_sequence;
5227 
5228   template_size = stub_definitions[stub_type].template_size;
5229   if (stub_template_size)
5230     *stub_template_size = template_size;
5231 
5232   size = 0;
5233   for (i = 0; i < template_size; i++)
5234     {
5235       switch (template_sequence[i].type)
5236 	{
5237 	case THUMB16_TYPE:
5238 	  size += 2;
5239 	  break;
5240 
5241 	case ARM_TYPE:
5242 	case THUMB32_TYPE:
5243 	case DATA_TYPE:
5244 	  size += 4;
5245 	  break;
5246 
5247 	default:
5248 	  BFD_FAIL ();
5249 	  return 0;
5250 	}
5251     }
5252 
5253   return size;
5254 }
5255 
5256 /* As above, but don't actually build the stub.  Just bump offset so
5257    we know stub section sizes.  */
5258 
5259 static bfd_boolean
5260 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
5261 		   void *in_arg ATTRIBUTE_UNUSED)
5262 {
5263   struct elf32_arm_stub_hash_entry *stub_entry;
5264   const insn_sequence *template_sequence;
5265   int template_size, size;
5266 
5267   /* Massage our args to the form they really have.  */
5268   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
5269 
5270   BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
5271 	     && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
5272 
5273   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
5274 				      &template_size);
5275 
5276   /* Initialized to -1.  Null size indicates an empty slot full of zeros.  */
5277   if (stub_entry->stub_template_size)
5278     {
5279       stub_entry->stub_size = size;
5280       stub_entry->stub_template = template_sequence;
5281       stub_entry->stub_template_size = template_size;
5282     }
5283 
5284   /* Already accounted for.  */
5285   if (stub_entry->stub_offset != (bfd_vma) -1)
5286     return TRUE;
5287 
5288   size = (size + 7) & ~7;
5289   stub_entry->stub_sec->size += size;
5290 
5291   return TRUE;
5292 }
5293 
5294 /* External entry points for sizing and building linker stubs.  */
5295 
5296 /* Set up various things so that we can make a list of input sections
5297    for each output section included in the link.  Returns -1 on error,
5298    0 when no stubs will be needed, and 1 on success.  */
5299 
5300 int
5301 elf32_arm_setup_section_lists (bfd *output_bfd,
5302 			       struct bfd_link_info *info)
5303 {
5304   bfd *input_bfd;
5305   unsigned int bfd_count;
5306   unsigned int top_id, top_index;
5307   asection *section;
5308   asection **input_list, **list;
5309   bfd_size_type amt;
5310   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5311 
5312   if (htab == NULL)
5313     return 0;
5314   if (! is_elf_hash_table (htab))
5315     return 0;
5316 
5317   /* Count the number of input BFDs and find the top input section id.  */
5318   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
5319        input_bfd != NULL;
5320        input_bfd = input_bfd->link.next)
5321     {
5322       bfd_count += 1;
5323       for (section = input_bfd->sections;
5324 	   section != NULL;
5325 	   section = section->next)
5326 	{
5327 	  if (top_id < section->id)
5328 	    top_id = section->id;
5329 	}
5330     }
5331   htab->bfd_count = bfd_count;
5332 
5333   amt = sizeof (struct map_stub) * (top_id + 1);
5334   htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
5335   if (htab->stub_group == NULL)
5336     return -1;
5337   htab->top_id = top_id;
5338 
5339   /* We can't use output_bfd->section_count here to find the top output
5340      section index as some sections may have been removed, and
5341      _bfd_strip_section_from_output doesn't renumber the indices.  */
5342   for (section = output_bfd->sections, top_index = 0;
5343        section != NULL;
5344        section = section->next)
5345     {
5346       if (top_index < section->index)
5347 	top_index = section->index;
5348     }
5349 
5350   htab->top_index = top_index;
5351   amt = sizeof (asection *) * (top_index + 1);
5352   input_list = (asection **) bfd_malloc (amt);
5353   htab->input_list = input_list;
5354   if (input_list == NULL)
5355     return -1;
5356 
5357   /* For sections we aren't interested in, mark their entries with a
5358      value we can check later.  */
5359   list = input_list + top_index;
5360   do
5361     *list = bfd_abs_section_ptr;
5362   while (list-- != input_list);
5363 
5364   for (section = output_bfd->sections;
5365        section != NULL;
5366        section = section->next)
5367     {
5368       if ((section->flags & SEC_CODE) != 0)
5369 	input_list[section->index] = NULL;
5370     }
5371 
5372   return 1;
5373 }
5374 
5375 /* The linker repeatedly calls this function for each input section,
5376    in the order that input sections are linked into output sections.
5377    Build lists of input sections to determine groupings between which
5378    we may insert linker stubs.  */
5379 
5380 void
5381 elf32_arm_next_input_section (struct bfd_link_info *info,
5382 			      asection *isec)
5383 {
5384   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5385 
5386   if (htab == NULL)
5387     return;
5388 
5389   if (isec->output_section->index <= htab->top_index)
5390     {
5391       asection **list = htab->input_list + isec->output_section->index;
5392 
5393       if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
5394 	{
5395 	  /* Steal the link_sec pointer for our list.  */
5396 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
5397 	  /* This happens to make the list in reverse order,
5398 	     which we reverse later.  */
5399 	  PREV_SEC (isec) = *list;
5400 	  *list = isec;
5401 	}
5402     }
5403 }
5404 
5405 /* See whether we can group stub sections together.  Grouping stub
5406    sections may result in fewer stubs.  More importantly, we need to
5407    put all .init* and .fini* stubs at the end of the .init or
5408    .fini output sections respectively, because glibc splits the
5409    _init and _fini functions into multiple parts.  Putting a stub in
5410    the middle of a function is not a good idea.  */
5411 
5412 static void
5413 group_sections (struct elf32_arm_link_hash_table *htab,
5414 		bfd_size_type stub_group_size,
5415 		bfd_boolean stubs_always_after_branch)
5416 {
5417   asection **list = htab->input_list;
5418 
5419   do
5420     {
5421       asection *tail = *list;
5422       asection *head;
5423 
5424       if (tail == bfd_abs_section_ptr)
5425 	continue;
5426 
5427       /* Reverse the list: we must avoid placing stubs at the
5428 	 beginning of the section because the beginning of the text
5429 	 section may be required for an interrupt vector in bare metal
5430 	 code.  */
5431 #define NEXT_SEC PREV_SEC
5432       head = NULL;
5433       while (tail != NULL)
5434 	{
5435 	  /* Pop from tail.  */
5436 	  asection *item = tail;
5437 	  tail = PREV_SEC (item);
5438 
5439 	  /* Push on head.  */
5440 	  NEXT_SEC (item) = head;
5441 	  head = item;
5442 	}
5443 
5444       while (head != NULL)
5445 	{
5446 	  asection *curr;
5447 	  asection *next;
5448 	  bfd_vma stub_group_start = head->output_offset;
5449 	  bfd_vma end_of_next;
5450 
5451 	  curr = head;
5452 	  while (NEXT_SEC (curr) != NULL)
5453 	    {
5454 	      next = NEXT_SEC (curr);
5455 	      end_of_next = next->output_offset + next->size;
5456 	      if (end_of_next - stub_group_start >= stub_group_size)
5457 		/* End of NEXT is too far from start, so stop.  */
5458 		break;
5459 	      /* Add NEXT to the group.  */
5460 	      curr = next;
5461 	    }
5462 
5463 	  /* OK, the size from the start to the start of CURR is less
5464 	     than stub_group_size and thus can be handled by one stub
5465 	     section.  (Or the head section is itself larger than
5466 	     stub_group_size, in which case we may be toast.)
5467 	     We should really be keeping track of the total size of
5468 	     stubs added here, as stubs contribute to the final output
5469 	     section size.  */
5470 	  do
5471 	    {
5472 	      next = NEXT_SEC (head);
5473 	      /* Set up this stub group.  */
5474 	      htab->stub_group[head->id].link_sec = curr;
5475 	    }
5476 	  while (head != curr && (head = next) != NULL);
5477 
5478 	  /* But wait, there's more!  Input sections up to stub_group_size
5479 	     bytes after the stub section can be handled by it too.  */
5480 	  if (!stubs_always_after_branch)
5481 	    {
5482 	      stub_group_start = curr->output_offset + curr->size;
5483 
5484 	      while (next != NULL)
5485 		{
5486 		  end_of_next = next->output_offset + next->size;
5487 		  if (end_of_next - stub_group_start >= stub_group_size)
5488 		    /* End of NEXT is too far from stubs, so stop.  */
5489 		    break;
5490 		  /* Add NEXT to the stub group.  */
5491 		  head = next;
5492 		  next = NEXT_SEC (head);
5493 		  htab->stub_group[head->id].link_sec = curr;
5494 		}
5495 	    }
5496 	  head = next;
5497 	}
5498     }
5499   while (list++ != htab->input_list + htab->top_index);
5500 
5501   free (htab->input_list);
5502 #undef PREV_SEC
5503 #undef NEXT_SEC
5504 }
5505 
5506 /* Comparison function for sorting/searching relocations relating to Cortex-A8
5507    erratum fix.  */
5508 
5509 static int
5510 a8_reloc_compare (const void *a, const void *b)
5511 {
5512   const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
5513   const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
5514 
5515   if (ra->from < rb->from)
5516     return -1;
5517   else if (ra->from > rb->from)
5518     return 1;
5519   else
5520     return 0;
5521 }
5522 
5523 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
5524 						    const char *, char **);
5525 
5526 /* Helper function to scan code for sequences which might trigger the Cortex-A8
5527    branch/TLB erratum.  Fill in the table described by A8_FIXES_P,
5528    NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P.  Returns true if an error occurs, false
5529    otherwise.  */
5530 
5531 static bfd_boolean
5532 cortex_a8_erratum_scan (bfd *input_bfd,
5533 			struct bfd_link_info *info,
5534 			struct a8_erratum_fix **a8_fixes_p,
5535 			unsigned int *num_a8_fixes_p,
5536 			unsigned int *a8_fix_table_size_p,
5537 			struct a8_erratum_reloc *a8_relocs,
5538 			unsigned int num_a8_relocs,
5539 			unsigned prev_num_a8_fixes,
5540 			bfd_boolean *stub_changed_p)
5541 {
5542   asection *section;
5543   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5544   struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
5545   unsigned int num_a8_fixes = *num_a8_fixes_p;
5546   unsigned int a8_fix_table_size = *a8_fix_table_size_p;
5547 
5548   if (htab == NULL)
5549     return FALSE;
5550 
5551   for (section = input_bfd->sections;
5552        section != NULL;
5553        section = section->next)
5554     {
5555       bfd_byte *contents = NULL;
5556       struct _arm_elf_section_data *sec_data;
5557       unsigned int span;
5558       bfd_vma base_vma;
5559 
5560       if (elf_section_type (section) != SHT_PROGBITS
5561 	  || (elf_section_flags (section) & SHF_EXECINSTR) == 0
5562 	  || (section->flags & SEC_EXCLUDE) != 0
5563 	  || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
5564 	  || (section->output_section == bfd_abs_section_ptr))
5565 	continue;
5566 
5567       base_vma = section->output_section->vma + section->output_offset;
5568 
5569       if (elf_section_data (section)->this_hdr.contents != NULL)
5570 	contents = elf_section_data (section)->this_hdr.contents;
5571       else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
5572 	return TRUE;
5573 
5574       sec_data = elf32_arm_section_data (section);
5575 
5576       for (span = 0; span < sec_data->mapcount; span++)
5577 	{
5578 	  unsigned int span_start = sec_data->map[span].vma;
5579 	  unsigned int span_end = (span == sec_data->mapcount - 1)
5580 	    ? section->size : sec_data->map[span + 1].vma;
5581 	  unsigned int i;
5582 	  char span_type = sec_data->map[span].type;
5583 	  bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5584 
5585 	  if (span_type != 't')
5586 	    continue;
5587 
5588 	  /* Span is entirely within a single 4KB region: skip scanning.  */
5589 	  if (((base_vma + span_start) & ~0xfff)
5590 	      == ((base_vma + span_end) & ~0xfff))
5591 	    continue;
5592 
5593 	  /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5594 
5595 	       * The opcode is BLX.W, BL.W, B.W, Bcc.W
5596 	       * The branch target is in the same 4KB region as the
5597 		 first half of the branch.
5598 	       * The instruction before the branch is a 32-bit
5599 		 length non-branch instruction.  */
5600 	  for (i = span_start; i < span_end;)
5601 	    {
5602 	      unsigned int insn = bfd_getl16 (&contents[i]);
5603 	      bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5604 	      bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5605 
5606 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5607 		insn_32bit = TRUE;
5608 
5609 	      if (insn_32bit)
5610 		{
5611 		  /* Load the rest of the insn (in manual-friendly order).  */
5612 		  insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5613 
5614 		  /* Encoding T4: B<c>.W.  */
5615 		  is_b = (insn & 0xf800d000) == 0xf0009000;
5616 		  /* Encoding T1: BL<c>.W.  */
5617 		  is_bl = (insn & 0xf800d000) == 0xf000d000;
5618 		  /* Encoding T2: BLX<c>.W.  */
5619 		  is_blx = (insn & 0xf800d000) == 0xf000c000;
5620 		  /* Encoding T3: B<c>.W (not permitted in IT block).  */
5621 		  is_bcc = (insn & 0xf800d000) == 0xf0008000
5622 			   && (insn & 0x07f00000) != 0x03800000;
5623 		}
5624 
5625 	      is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5626 
5627 	      if (((base_vma + i) & 0xfff) == 0xffe
5628 		  && insn_32bit
5629 		  && is_32bit_branch
5630 		  && last_was_32bit
5631 		  && ! last_was_branch)
5632 		{
5633 		  bfd_signed_vma offset = 0;
5634 		  bfd_boolean force_target_arm = FALSE;
5635 		  bfd_boolean force_target_thumb = FALSE;
5636 		  bfd_vma target;
5637 		  enum elf32_arm_stub_type stub_type = arm_stub_none;
5638 		  struct a8_erratum_reloc key, *found;
5639 		  bfd_boolean use_plt = FALSE;
5640 
5641 		  key.from = base_vma + i;
5642 		  found = (struct a8_erratum_reloc *)
5643 		      bsearch (&key, a8_relocs, num_a8_relocs,
5644 			       sizeof (struct a8_erratum_reloc),
5645 			       &a8_reloc_compare);
5646 
5647 		  if (found)
5648 		    {
5649 		      char *error_message = NULL;
5650 		      struct elf_link_hash_entry *entry;
5651 
5652 		      /* We don't care about the error returned from this
5653 			 function, only if there is glue or not.  */
5654 		      entry = find_thumb_glue (info, found->sym_name,
5655 					       &error_message);
5656 
5657 		      if (entry)
5658 			found->non_a8_stub = TRUE;
5659 
5660 		      /* Keep a simpler condition, for the sake of clarity.  */
5661 		      if (htab->root.splt != NULL && found->hash != NULL
5662 			  && found->hash->root.plt.offset != (bfd_vma) -1)
5663 			use_plt = TRUE;
5664 
5665 		      if (found->r_type == R_ARM_THM_CALL)
5666 			{
5667 			  if (found->branch_type == ST_BRANCH_TO_ARM
5668 			      || use_plt)
5669 			    force_target_arm = TRUE;
5670 			  else
5671 			    force_target_thumb = TRUE;
5672 			}
5673 		    }
5674 
5675 		  /* Check if we have an offending branch instruction.  */
5676 
5677 		  if (found && found->non_a8_stub)
5678 		    /* We've already made a stub for this instruction, e.g.
5679 		       it's a long branch or a Thumb->ARM stub.  Assume that
5680 		       stub will suffice to work around the A8 erratum (see
5681 		       setting of always_after_branch above).  */
5682 		    ;
5683 		  else if (is_bcc)
5684 		    {
5685 		      offset = (insn & 0x7ff) << 1;
5686 		      offset |= (insn & 0x3f0000) >> 4;
5687 		      offset |= (insn & 0x2000) ? 0x40000 : 0;
5688 		      offset |= (insn & 0x800) ? 0x80000 : 0;
5689 		      offset |= (insn & 0x4000000) ? 0x100000 : 0;
5690 		      if (offset & 0x100000)
5691 			offset |= ~ ((bfd_signed_vma) 0xfffff);
5692 		      stub_type = arm_stub_a8_veneer_b_cond;
5693 		    }
5694 		  else if (is_b || is_bl || is_blx)
5695 		    {
5696 		      int s = (insn & 0x4000000) != 0;
5697 		      int j1 = (insn & 0x2000) != 0;
5698 		      int j2 = (insn & 0x800) != 0;
5699 		      int i1 = !(j1 ^ s);
5700 		      int i2 = !(j2 ^ s);
5701 
5702 		      offset = (insn & 0x7ff) << 1;
5703 		      offset |= (insn & 0x3ff0000) >> 4;
5704 		      offset |= i2 << 22;
5705 		      offset |= i1 << 23;
5706 		      offset |= s << 24;
5707 		      if (offset & 0x1000000)
5708 			offset |= ~ ((bfd_signed_vma) 0xffffff);
5709 
5710 		      if (is_blx)
5711 			offset &= ~ ((bfd_signed_vma) 3);
5712 
5713 		      stub_type = is_blx ? arm_stub_a8_veneer_blx :
5714 			is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5715 		    }
5716 
5717 		  if (stub_type != arm_stub_none)
5718 		    {
5719 		      bfd_vma pc_for_insn = base_vma + i + 4;
5720 
5721 		      /* The original instruction is a BL, but the target is
5722 			 an ARM instruction.  If we were not making a stub,
5723 			 the BL would have been converted to a BLX.  Use the
5724 			 BLX stub instead in that case.  */
5725 		      if (htab->use_blx && force_target_arm
5726 			  && stub_type == arm_stub_a8_veneer_bl)
5727 			{
5728 			  stub_type = arm_stub_a8_veneer_blx;
5729 			  is_blx = TRUE;
5730 			  is_bl = FALSE;
5731 			}
5732 		      /* Conversely, if the original instruction was
5733 			 BLX but the target is Thumb mode, use the BL
5734 			 stub.  */
5735 		      else if (force_target_thumb
5736 			       && stub_type == arm_stub_a8_veneer_blx)
5737 			{
5738 			  stub_type = arm_stub_a8_veneer_bl;
5739 			  is_blx = FALSE;
5740 			  is_bl = TRUE;
5741 			}
5742 
5743 		      if (is_blx)
5744 			pc_for_insn &= ~ ((bfd_vma) 3);
5745 
5746 		      /* If we found a relocation, use the proper destination,
5747 			 not the offset in the (unrelocated) instruction.
5748 			 Note this is always done if we switched the stub type
5749 			 above.  */
5750 		      if (found)
5751 			offset =
5752 			  (bfd_signed_vma) (found->destination - pc_for_insn);
5753 
5754 		      /* If the stub will use a Thumb-mode branch to a
5755 			 PLT target, redirect it to the preceding Thumb
5756 			 entry point.  */
5757 		      if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5758 			offset -= PLT_THUMB_STUB_SIZE;
5759 
5760 		      target = pc_for_insn + offset;
5761 
5762 		      /* The BLX stub is ARM-mode code.  Adjust the offset to
5763 			 take the different PC value (+8 instead of +4) into
5764 			 account.  */
5765 		      if (stub_type == arm_stub_a8_veneer_blx)
5766 			offset += 4;
5767 
5768 		      if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5769 			{
5770 			  char *stub_name = NULL;
5771 
5772 			  if (num_a8_fixes == a8_fix_table_size)
5773 			    {
5774 			      a8_fix_table_size *= 2;
5775 			      a8_fixes = (struct a8_erratum_fix *)
5776 				  bfd_realloc (a8_fixes,
5777 					       sizeof (struct a8_erratum_fix)
5778 					       * a8_fix_table_size);
5779 			    }
5780 
5781 			  if (num_a8_fixes < prev_num_a8_fixes)
5782 			    {
5783 			      /* If we're doing a subsequent scan,
5784 				 check if we've found the same fix as
5785 				 before, and try and reuse the stub
5786 				 name.  */
5787 			      stub_name = a8_fixes[num_a8_fixes].stub_name;
5788 			      if ((a8_fixes[num_a8_fixes].section != section)
5789 				  || (a8_fixes[num_a8_fixes].offset != i))
5790 				{
5791 				  free (stub_name);
5792 				  stub_name = NULL;
5793 				  *stub_changed_p = TRUE;
5794 				}
5795 			    }
5796 
5797 			  if (!stub_name)
5798 			    {
5799 			      stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5800 			      if (stub_name != NULL)
5801 				sprintf (stub_name, "%x:%x", section->id, i);
5802 			    }
5803 
5804 			  a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5805 			  a8_fixes[num_a8_fixes].section = section;
5806 			  a8_fixes[num_a8_fixes].offset = i;
5807 			  a8_fixes[num_a8_fixes].target_offset =
5808 			    target - base_vma;
5809 			  a8_fixes[num_a8_fixes].orig_insn = insn;
5810 			  a8_fixes[num_a8_fixes].stub_name = stub_name;
5811 			  a8_fixes[num_a8_fixes].stub_type = stub_type;
5812 			  a8_fixes[num_a8_fixes].branch_type =
5813 			    is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5814 
5815 			  num_a8_fixes++;
5816 			}
5817 		    }
5818 		}
5819 
5820 	      i += insn_32bit ? 4 : 2;
5821 	      last_was_32bit = insn_32bit;
5822 	      last_was_branch = is_32bit_branch;
5823 	    }
5824 	}
5825 
5826       if (elf_section_data (section)->this_hdr.contents == NULL)
5827 	free (contents);
5828     }
5829 
5830   *a8_fixes_p = a8_fixes;
5831   *num_a8_fixes_p = num_a8_fixes;
5832   *a8_fix_table_size_p = a8_fix_table_size;
5833 
5834   return FALSE;
5835 }
5836 
5837 /* Create or update a stub entry depending on whether the stub can already be
5838    found in HTAB.  The stub is identified by:
5839    - its type STUB_TYPE
5840    - its source branch (note that several can share the same stub) whose
5841      section and relocation (if any) are given by SECTION and IRELA
5842      respectively
5843    - its target symbol whose input section, hash, name, value and branch type
5844      are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5845      respectively
5846 
5847    If found, the value of the stub's target symbol is updated from SYM_VALUE
5848    and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
5849    TRUE and the stub entry is initialized.
5850 
5851    Returns the stub that was created or updated, or NULL if an error
5852    occurred.  */
5853 
5854 static struct elf32_arm_stub_hash_entry *
5855 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5856 		       enum elf32_arm_stub_type stub_type, asection *section,
5857 		       Elf_Internal_Rela *irela, asection *sym_sec,
5858 		       struct elf32_arm_link_hash_entry *hash, char *sym_name,
5859 		       bfd_vma sym_value, enum arm_st_branch_type branch_type,
5860 		       bfd_boolean *new_stub)
5861 {
5862   const asection *id_sec;
5863   char *stub_name;
5864   struct elf32_arm_stub_hash_entry *stub_entry;
5865   unsigned int r_type;
5866   bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5867 
5868   BFD_ASSERT (stub_type != arm_stub_none);
5869   *new_stub = FALSE;
5870 
5871   if (sym_claimed)
5872     stub_name = sym_name;
5873   else
5874     {
5875       BFD_ASSERT (irela);
5876       BFD_ASSERT (section);
5877       BFD_ASSERT (section->id <= htab->top_id);
5878 
5879       /* Support for grouping stub sections.  */
5880       id_sec = htab->stub_group[section->id].link_sec;
5881 
5882       /* Get the name of this stub.  */
5883       stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5884 				       stub_type);
5885       if (!stub_name)
5886 	return NULL;
5887     }
5888 
5889   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5890 				     FALSE);
5891   /* The proper stub has already been created, just update its value.  */
5892   if (stub_entry != NULL)
5893     {
5894       if (!sym_claimed)
5895 	free (stub_name);
5896       stub_entry->target_value = sym_value;
5897       return stub_entry;
5898     }
5899 
5900   stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5901   if (stub_entry == NULL)
5902     {
5903       if (!sym_claimed)
5904 	free (stub_name);
5905       return NULL;
5906     }
5907 
5908   stub_entry->target_value = sym_value;
5909   stub_entry->target_section = sym_sec;
5910   stub_entry->stub_type = stub_type;
5911   stub_entry->h = hash;
5912   stub_entry->branch_type = branch_type;
5913 
5914   if (sym_claimed)
5915     stub_entry->output_name = sym_name;
5916   else
5917     {
5918       if (sym_name == NULL)
5919 	sym_name = "unnamed";
5920       stub_entry->output_name = (char *)
5921 	bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5922 				   + strlen (sym_name));
5923       if (stub_entry->output_name == NULL)
5924 	{
5925 	  free (stub_name);
5926 	  return NULL;
5927 	}
5928 
5929       /* For historical reasons, use the existing names for ARM-to-Thumb and
5930 	 Thumb-to-ARM stubs.  */
5931       r_type = ELF32_R_TYPE (irela->r_info);
5932       if ((r_type == (unsigned int) R_ARM_THM_CALL
5933 	   || r_type == (unsigned int) R_ARM_THM_JUMP24
5934 	   || r_type == (unsigned int) R_ARM_THM_JUMP19)
5935 	  && branch_type == ST_BRANCH_TO_ARM)
5936 	sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5937       else if ((r_type == (unsigned int) R_ARM_CALL
5938 		|| r_type == (unsigned int) R_ARM_JUMP24)
5939 	       && branch_type == ST_BRANCH_TO_THUMB)
5940 	sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5941       else
5942 	sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5943     }
5944 
5945   *new_stub = TRUE;
5946   return stub_entry;
5947 }
5948 
5949 /* Scan symbols in INPUT_BFD to identify secure entry functions needing a
5950    gateway veneer to transition from non secure to secure state and create them
5951    accordingly.
5952 
5953    "ARMv8-M Security Extensions: Requirements on Development Tools" document
5954    defines the conditions that govern Secure Gateway veneer creation for a
5955    given symbol <SYM> as follows:
5956    - it has function type
5957    - it has non local binding
5958    - a symbol named __acle_se_<SYM> (called special symbol) exists with the
5959      same type, binding and value as <SYM> (called normal symbol).
5960    An entry function can handle secure state transition itself in which case
5961    its special symbol would have a different value from the normal symbol.
5962 
5963    OUT_ATTR gives the output attributes, SYM_HASHES the symbol index to hash
5964    entry mapping while HTAB gives the name to hash entry mapping.
5965    *CMSE_STUB_CREATED is increased by the number of secure gateway veneer
5966    created.
5967 
5968    The return value gives whether a stub failed to be allocated.  */
5969 
5970 static bfd_boolean
5971 cmse_scan (bfd *input_bfd, struct elf32_arm_link_hash_table *htab,
5972 	   obj_attribute *out_attr, struct elf_link_hash_entry **sym_hashes,
5973 	   int *cmse_stub_created)
5974 {
5975   const struct elf_backend_data *bed;
5976   Elf_Internal_Shdr *symtab_hdr;
5977   unsigned i, j, sym_count, ext_start;
5978   Elf_Internal_Sym *cmse_sym, *local_syms;
5979   struct elf32_arm_link_hash_entry *hash, *cmse_hash = NULL;
5980   enum arm_st_branch_type branch_type;
5981   char *sym_name, *lsym_name;
5982   bfd_vma sym_value;
5983   asection *section;
5984   struct elf32_arm_stub_hash_entry *stub_entry;
5985   bfd_boolean is_v8m, new_stub, cmse_invalid, ret = TRUE;
5986 
5987   bed = get_elf_backend_data (input_bfd);
5988   symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5989   sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
5990   ext_start = symtab_hdr->sh_info;
5991   is_v8m = (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
5992 	    && out_attr[Tag_CPU_arch_profile].i == 'M');
5993 
5994   local_syms = (Elf_Internal_Sym *) symtab_hdr->contents;
5995   if (local_syms == NULL)
5996     local_syms = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5997 				       symtab_hdr->sh_info, 0, NULL, NULL,
5998 				       NULL);
5999   if (symtab_hdr->sh_info && local_syms == NULL)
6000     return FALSE;
6001 
6002   /* Scan symbols.  */
6003   for (i = 0; i < sym_count; i++)
6004     {
6005       cmse_invalid = FALSE;
6006 
6007       if (i < ext_start)
6008 	{
6009 	  cmse_sym = &local_syms[i];
6010 	  sym_name = bfd_elf_string_from_elf_section (input_bfd,
6011 						      symtab_hdr->sh_link,
6012 						      cmse_sym->st_name);
6013 	  if (!sym_name || !CONST_STRNEQ (sym_name, CMSE_PREFIX))
6014 	    continue;
6015 
6016 	  /* Special symbol with local binding.  */
6017 	  cmse_invalid = TRUE;
6018 	}
6019       else
6020 	{
6021 	  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
6022 	  sym_name = (char *) cmse_hash->root.root.root.string;
6023 	  if (!CONST_STRNEQ (sym_name, CMSE_PREFIX))
6024 	    continue;
6025 
6026 	  /* Special symbol has incorrect binding or type.  */
6027 	  if ((cmse_hash->root.root.type != bfd_link_hash_defined
6028 	       && cmse_hash->root.root.type != bfd_link_hash_defweak)
6029 	      || cmse_hash->root.type != STT_FUNC)
6030 	    cmse_invalid = TRUE;
6031 	}
6032 
6033       if (!is_v8m)
6034 	{
6035 	  _bfd_error_handler (_("%pB: special symbol `%s' only allowed for "
6036 				"ARMv8-M architecture or later"),
6037 			      input_bfd, sym_name);
6038 	  is_v8m = TRUE; /* Avoid multiple warning.  */
6039 	  ret = FALSE;
6040 	}
6041 
6042       if (cmse_invalid)
6043 	{
6044 	  _bfd_error_handler (_("%pB: invalid special symbol `%s'; it must be"
6045 				" a global or weak function symbol"),
6046 			      input_bfd, sym_name);
6047 	  ret = FALSE;
6048 	  if (i < ext_start)
6049 	    continue;
6050 	}
6051 
6052       sym_name += strlen (CMSE_PREFIX);
6053       hash = (struct elf32_arm_link_hash_entry *)
6054 	elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6055 
6056       /* No associated normal symbol or it is neither global nor weak.  */
6057       if (!hash
6058 	  || (hash->root.root.type != bfd_link_hash_defined
6059 	      && hash->root.root.type != bfd_link_hash_defweak)
6060 	  || hash->root.type != STT_FUNC)
6061 	{
6062 	  /* Initialize here to avoid warning about use of possibly
6063 	     uninitialized variable.  */
6064 	  j = 0;
6065 
6066 	  if (!hash)
6067 	    {
6068 	      /* Searching for a normal symbol with local binding.  */
6069 	      for (; j < ext_start; j++)
6070 		{
6071 		  lsym_name =
6072 		    bfd_elf_string_from_elf_section (input_bfd,
6073 						     symtab_hdr->sh_link,
6074 						     local_syms[j].st_name);
6075 		  if (!strcmp (sym_name, lsym_name))
6076 		    break;
6077 		}
6078 	    }
6079 
6080 	  if (hash || j < ext_start)
6081 	    {
6082 	      _bfd_error_handler
6083 		(_("%pB: invalid standard symbol `%s'; it must be "
6084 		   "a global or weak function symbol"),
6085 		 input_bfd, sym_name);
6086 	    }
6087 	  else
6088 	    _bfd_error_handler
6089 	      (_("%pB: absent standard symbol `%s'"), input_bfd, sym_name);
6090 	  ret = FALSE;
6091 	  if (!hash)
6092 	    continue;
6093 	}
6094 
6095       sym_value = hash->root.root.u.def.value;
6096       section = hash->root.root.u.def.section;
6097 
6098       if (cmse_hash->root.root.u.def.section != section)
6099 	{
6100 	  _bfd_error_handler
6101 	    (_("%pB: `%s' and its special symbol are in different sections"),
6102 	     input_bfd, sym_name);
6103 	  ret = FALSE;
6104 	}
6105       if (cmse_hash->root.root.u.def.value != sym_value)
6106 	continue; /* Ignore: could be an entry function starting with SG.  */
6107 
6108 	/* If this section is a link-once section that will be discarded, then
6109 	   don't create any stubs.  */
6110       if (section->output_section == NULL)
6111 	{
6112 	  _bfd_error_handler
6113 	    (_("%pB: entry function `%s' not output"), input_bfd, sym_name);
6114 	  continue;
6115 	}
6116 
6117       if (hash->root.size == 0)
6118 	{
6119 	  _bfd_error_handler
6120 	    (_("%pB: entry function `%s' is empty"), input_bfd, sym_name);
6121 	  ret = FALSE;
6122 	}
6123 
6124       if (!ret)
6125 	continue;
6126       branch_type = ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6127       stub_entry
6128 	= elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6129 				 NULL, NULL, section, hash, sym_name,
6130 				 sym_value, branch_type, &new_stub);
6131 
6132       if (stub_entry == NULL)
6133 	 ret = FALSE;
6134       else
6135 	{
6136 	  BFD_ASSERT (new_stub);
6137 	  (*cmse_stub_created)++;
6138 	}
6139     }
6140 
6141   if (!symtab_hdr->contents)
6142     free (local_syms);
6143   return ret;
6144 }
6145 
6146 /* Return TRUE iff a symbol identified by its linker HASH entry is a secure
6147    code entry function, ie can be called from non secure code without using a
6148    veneer.  */
6149 
6150 static bfd_boolean
6151 cmse_entry_fct_p (struct elf32_arm_link_hash_entry *hash)
6152 {
6153   bfd_byte contents[4];
6154   uint32_t first_insn;
6155   asection *section;
6156   file_ptr offset;
6157   bfd *abfd;
6158 
6159   /* Defined symbol of function type.  */
6160   if (hash->root.root.type != bfd_link_hash_defined
6161       && hash->root.root.type != bfd_link_hash_defweak)
6162     return FALSE;
6163   if (hash->root.type != STT_FUNC)
6164     return FALSE;
6165 
6166   /* Read first instruction.  */
6167   section = hash->root.root.u.def.section;
6168   abfd = section->owner;
6169   offset = hash->root.root.u.def.value - section->vma;
6170   if (!bfd_get_section_contents (abfd, section, contents, offset,
6171 				 sizeof (contents)))
6172     return FALSE;
6173 
6174   first_insn = bfd_get_32 (abfd, contents);
6175 
6176   /* Starts by SG instruction.  */
6177   return first_insn == 0xe97fe97f;
6178 }
6179 
6180 /* Output the name (in symbol table) of the veneer GEN_ENTRY if it is a new
6181    secure gateway veneers (ie. the veneers was not in the input import library)
6182    and there is no output import library (GEN_INFO->out_implib_bfd is NULL.  */
6183 
6184 static bfd_boolean
6185 arm_list_new_cmse_stub (struct bfd_hash_entry *gen_entry, void *gen_info)
6186 {
6187   struct elf32_arm_stub_hash_entry *stub_entry;
6188   struct bfd_link_info *info;
6189 
6190   /* Massage our args to the form they really have.  */
6191   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
6192   info = (struct bfd_link_info *) gen_info;
6193 
6194   if (info->out_implib_bfd)
6195     return TRUE;
6196 
6197   if (stub_entry->stub_type != arm_stub_cmse_branch_thumb_only)
6198     return TRUE;
6199 
6200   if (stub_entry->stub_offset == (bfd_vma) -1)
6201     _bfd_error_handler ("  %s", stub_entry->output_name);
6202 
6203   return TRUE;
6204 }
6205 
6206 /* Set offset of each secure gateway veneers so that its address remain
6207    identical to the one in the input import library referred by
6208    HTAB->in_implib_bfd.  A warning is issued for veneers that disappeared
6209    (present in input import library but absent from the executable being
6210    linked) or if new veneers appeared and there is no output import library
6211    (INFO->out_implib_bfd is NULL and *CMSE_STUB_CREATED is bigger than the
6212    number of secure gateway veneers found in the input import library.
6213 
6214    The function returns whether an error occurred.  If no error occurred,
6215    *CMSE_STUB_CREATED gives the number of SG veneers created by both cmse_scan
6216    and this function and HTAB->new_cmse_stub_offset is set to the biggest
6217    veneer observed set for new veneers to be layed out after.  */
6218 
6219 static bfd_boolean
6220 set_cmse_veneer_addr_from_implib (struct bfd_link_info *info,
6221 				  struct elf32_arm_link_hash_table *htab,
6222 				  int *cmse_stub_created)
6223 {
6224   long symsize;
6225   char *sym_name;
6226   flagword flags;
6227   long i, symcount;
6228   bfd *in_implib_bfd;
6229   asection *stub_out_sec;
6230   bfd_boolean ret = TRUE;
6231   Elf_Internal_Sym *intsym;
6232   const char *out_sec_name;
6233   bfd_size_type cmse_stub_size;
6234   asymbol **sympp = NULL, *sym;
6235   struct elf32_arm_link_hash_entry *hash;
6236   const insn_sequence *cmse_stub_template;
6237   struct elf32_arm_stub_hash_entry *stub_entry;
6238   int cmse_stub_template_size, new_cmse_stubs_created = *cmse_stub_created;
6239   bfd_vma veneer_value, stub_offset, next_cmse_stub_offset;
6240   bfd_vma cmse_stub_array_start = (bfd_vma) -1, cmse_stub_sec_vma = 0;
6241 
6242   /* No input secure gateway import library.  */
6243   if (!htab->in_implib_bfd)
6244     return TRUE;
6245 
6246   in_implib_bfd = htab->in_implib_bfd;
6247   if (!htab->cmse_implib)
6248     {
6249       _bfd_error_handler (_("%pB: --in-implib only supported for Secure "
6250 			    "Gateway import libraries"), in_implib_bfd);
6251       return FALSE;
6252     }
6253 
6254   /* Get symbol table size.  */
6255   symsize = bfd_get_symtab_upper_bound (in_implib_bfd);
6256   if (symsize < 0)
6257     return FALSE;
6258 
6259   /* Read in the input secure gateway import library's symbol table.  */
6260   sympp = (asymbol **) bfd_malloc (symsize);
6261   if (sympp == NULL)
6262     return FALSE;
6263 
6264   symcount = bfd_canonicalize_symtab (in_implib_bfd, sympp);
6265   if (symcount < 0)
6266     {
6267       ret = FALSE;
6268       goto free_sym_buf;
6269     }
6270 
6271   htab->new_cmse_stub_offset = 0;
6272   cmse_stub_size =
6273     find_stub_size_and_template (arm_stub_cmse_branch_thumb_only,
6274 				 &cmse_stub_template,
6275 				 &cmse_stub_template_size);
6276   out_sec_name =
6277     arm_dedicated_stub_output_section_name (arm_stub_cmse_branch_thumb_only);
6278   stub_out_sec =
6279     bfd_get_section_by_name (htab->obfd, out_sec_name);
6280   if (stub_out_sec != NULL)
6281     cmse_stub_sec_vma = stub_out_sec->vma;
6282 
6283   /* Set addresses of veneers mentionned in input secure gateway import
6284      library's symbol table.  */
6285   for (i = 0; i < symcount; i++)
6286     {
6287       sym = sympp[i];
6288       flags = sym->flags;
6289       sym_name = (char *) bfd_asymbol_name (sym);
6290       intsym = &((elf_symbol_type *) sym)->internal_elf_sym;
6291 
6292       if (sym->section != bfd_abs_section_ptr
6293 	  || !(flags & (BSF_GLOBAL | BSF_WEAK))
6294 	  || (flags & BSF_FUNCTION) != BSF_FUNCTION
6295 	  || (ARM_GET_SYM_BRANCH_TYPE (intsym->st_target_internal)
6296 	      != ST_BRANCH_TO_THUMB))
6297 	{
6298 	  _bfd_error_handler (_("%pB: invalid import library entry: `%s'; "
6299 				"symbol should be absolute, global and "
6300 				"refer to Thumb functions"),
6301 			      in_implib_bfd, sym_name);
6302 	  ret = FALSE;
6303 	  continue;
6304 	}
6305 
6306       veneer_value = bfd_asymbol_value (sym);
6307       stub_offset = veneer_value - cmse_stub_sec_vma;
6308       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, sym_name,
6309 					 FALSE, FALSE);
6310       hash = (struct elf32_arm_link_hash_entry *)
6311 	elf_link_hash_lookup (&(htab)->root, sym_name, FALSE, FALSE, TRUE);
6312 
6313       /* Stub entry should have been created by cmse_scan or the symbol be of
6314 	 a secure function callable from non secure code.  */
6315       if (!stub_entry && !hash)
6316 	{
6317 	  bfd_boolean new_stub;
6318 
6319 	  _bfd_error_handler
6320 	    (_("entry function `%s' disappeared from secure code"), sym_name);
6321 	  hash = (struct elf32_arm_link_hash_entry *)
6322 	    elf_link_hash_lookup (&(htab)->root, sym_name, TRUE, TRUE, TRUE);
6323 	  stub_entry
6324 	    = elf32_arm_create_stub (htab, arm_stub_cmse_branch_thumb_only,
6325 				     NULL, NULL, bfd_abs_section_ptr, hash,
6326 				     sym_name, veneer_value,
6327 				     ST_BRANCH_TO_THUMB, &new_stub);
6328 	  if (stub_entry == NULL)
6329 	    ret = FALSE;
6330 	  else
6331 	  {
6332 	    BFD_ASSERT (new_stub);
6333 	    new_cmse_stubs_created++;
6334 	    (*cmse_stub_created)++;
6335 	  }
6336 	  stub_entry->stub_template_size = stub_entry->stub_size = 0;
6337 	  stub_entry->stub_offset = stub_offset;
6338 	}
6339       /* Symbol found is not callable from non secure code.  */
6340       else if (!stub_entry)
6341 	{
6342 	  if (!cmse_entry_fct_p (hash))
6343 	    {
6344 	      _bfd_error_handler (_("`%s' refers to a non entry function"),
6345 				  sym_name);
6346 	      ret = FALSE;
6347 	    }
6348 	  continue;
6349 	}
6350       else
6351 	{
6352 	  /* Only stubs for SG veneers should have been created.  */
6353 	  BFD_ASSERT (stub_entry->stub_type == arm_stub_cmse_branch_thumb_only);
6354 
6355 	  /* Check visibility hasn't changed.  */
6356 	  if (!!(flags & BSF_GLOBAL)
6357 	      != (hash->root.root.type == bfd_link_hash_defined))
6358 	    _bfd_error_handler
6359 	      (_("%pB: visibility of symbol `%s' has changed"), in_implib_bfd,
6360 	       sym_name);
6361 
6362 	  stub_entry->stub_offset = stub_offset;
6363 	}
6364 
6365       /* Size should match that of a SG veneer.  */
6366       if (intsym->st_size != cmse_stub_size)
6367 	{
6368 	  _bfd_error_handler (_("%pB: incorrect size for symbol `%s'"),
6369 			      in_implib_bfd, sym_name);
6370 	  ret = FALSE;
6371 	}
6372 
6373       /* Previous veneer address is before current SG veneer section.  */
6374       if (veneer_value < cmse_stub_sec_vma)
6375 	{
6376 	  /* Avoid offset underflow.  */
6377 	  if (stub_entry)
6378 	    stub_entry->stub_offset = 0;
6379 	  stub_offset = 0;
6380 	  ret = FALSE;
6381 	}
6382 
6383       /* Complain if stub offset not a multiple of stub size.  */
6384       if (stub_offset % cmse_stub_size)
6385 	{
6386 	  _bfd_error_handler
6387 	    (_("offset of veneer for entry function `%s' not a multiple of "
6388 	       "its size"), sym_name);
6389 	  ret = FALSE;
6390 	}
6391 
6392       if (!ret)
6393 	continue;
6394 
6395       new_cmse_stubs_created--;
6396       if (veneer_value < cmse_stub_array_start)
6397 	cmse_stub_array_start = veneer_value;
6398       next_cmse_stub_offset = stub_offset + ((cmse_stub_size + 7) & ~7);
6399       if (next_cmse_stub_offset > htab->new_cmse_stub_offset)
6400 	htab->new_cmse_stub_offset = next_cmse_stub_offset;
6401     }
6402 
6403   if (!info->out_implib_bfd && new_cmse_stubs_created != 0)
6404     {
6405       BFD_ASSERT (new_cmse_stubs_created > 0);
6406       _bfd_error_handler
6407 	(_("new entry function(s) introduced but no output import library "
6408 	   "specified:"));
6409       bfd_hash_traverse (&htab->stub_hash_table, arm_list_new_cmse_stub, info);
6410     }
6411 
6412   if (cmse_stub_array_start != cmse_stub_sec_vma)
6413     {
6414       _bfd_error_handler
6415 	(_("start address of `%s' is different from previous link"),
6416 	 out_sec_name);
6417       ret = FALSE;
6418     }
6419 
6420 free_sym_buf:
6421   free (sympp);
6422   return ret;
6423 }
6424 
6425 /* Determine and set the size of the stub section for a final link.
6426 
6427    The basic idea here is to examine all the relocations looking for
6428    PC-relative calls to a target that is unreachable with a "bl"
6429    instruction.  */
6430 
6431 bfd_boolean
6432 elf32_arm_size_stubs (bfd *output_bfd,
6433 		      bfd *stub_bfd,
6434 		      struct bfd_link_info *info,
6435 		      bfd_signed_vma group_size,
6436 		      asection * (*add_stub_section) (const char *, asection *,
6437 						      asection *,
6438 						      unsigned int),
6439 		      void (*layout_sections_again) (void))
6440 {
6441   bfd_boolean ret = TRUE;
6442   obj_attribute *out_attr;
6443   int cmse_stub_created = 0;
6444   bfd_size_type stub_group_size;
6445   bfd_boolean m_profile, stubs_always_after_branch, first_veneer_scan = TRUE;
6446   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
6447   struct a8_erratum_fix *a8_fixes = NULL;
6448   unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
6449   struct a8_erratum_reloc *a8_relocs = NULL;
6450   unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
6451 
6452   if (htab == NULL)
6453     return FALSE;
6454 
6455   if (htab->fix_cortex_a8)
6456     {
6457       a8_fixes = (struct a8_erratum_fix *)
6458 	  bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
6459       a8_relocs = (struct a8_erratum_reloc *)
6460 	  bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
6461     }
6462 
6463   /* Propagate mach to stub bfd, because it may not have been
6464      finalized when we created stub_bfd.  */
6465   bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
6466 		     bfd_get_mach (output_bfd));
6467 
6468   /* Stash our params away.  */
6469   htab->stub_bfd = stub_bfd;
6470   htab->add_stub_section = add_stub_section;
6471   htab->layout_sections_again = layout_sections_again;
6472   stubs_always_after_branch = group_size < 0;
6473 
6474   out_attr = elf_known_obj_attributes_proc (output_bfd);
6475   m_profile = out_attr[Tag_CPU_arch_profile].i == 'M';
6476 
6477   /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
6478      as the first half of a 32-bit branch straddling two 4K pages.  This is a
6479      crude way of enforcing that.  */
6480   if (htab->fix_cortex_a8)
6481     stubs_always_after_branch = 1;
6482 
6483   if (group_size < 0)
6484     stub_group_size = -group_size;
6485   else
6486     stub_group_size = group_size;
6487 
6488   if (stub_group_size == 1)
6489     {
6490       /* Default values.  */
6491       /* Thumb branch range is +-4MB has to be used as the default
6492 	 maximum size (a given section can contain both ARM and Thumb
6493 	 code, so the worst case has to be taken into account).
6494 
6495 	 This value is 24K less than that, which allows for 2025
6496 	 12-byte stubs.  If we exceed that, then we will fail to link.
6497 	 The user will have to relink with an explicit group size
6498 	 option.  */
6499       stub_group_size = 4170000;
6500     }
6501 
6502   group_sections (htab, stub_group_size, stubs_always_after_branch);
6503 
6504   /* If we're applying the cortex A8 fix, we need to determine the
6505      program header size now, because we cannot change it later --
6506      that could alter section placements.  Notice the A8 erratum fix
6507      ends up requiring the section addresses to remain unchanged
6508      modulo the page size.  That's something we cannot represent
6509      inside BFD, and we don't want to force the section alignment to
6510      be the page size.  */
6511   if (htab->fix_cortex_a8)
6512     (*htab->layout_sections_again) ();
6513 
6514   while (1)
6515     {
6516       bfd *input_bfd;
6517       unsigned int bfd_indx;
6518       asection *stub_sec;
6519       enum elf32_arm_stub_type stub_type;
6520       bfd_boolean stub_changed = FALSE;
6521       unsigned prev_num_a8_fixes = num_a8_fixes;
6522 
6523       num_a8_fixes = 0;
6524       for (input_bfd = info->input_bfds, bfd_indx = 0;
6525 	   input_bfd != NULL;
6526 	   input_bfd = input_bfd->link.next, bfd_indx++)
6527 	{
6528 	  Elf_Internal_Shdr *symtab_hdr;
6529 	  asection *section;
6530 	  Elf_Internal_Sym *local_syms = NULL;
6531 
6532 	  if (!is_arm_elf (input_bfd))
6533 	    continue;
6534 	  if ((input_bfd->flags & DYNAMIC) != 0
6535 	      && (elf_sym_hashes (input_bfd) == NULL
6536 		  || (elf_dyn_lib_class (input_bfd) & DYN_AS_NEEDED) != 0))
6537 	    continue;
6538 
6539 	  num_a8_relocs = 0;
6540 
6541 	  /* We'll need the symbol table in a second.  */
6542 	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
6543 	  if (symtab_hdr->sh_info == 0)
6544 	    continue;
6545 
6546 	  /* Limit scan of symbols to object file whose profile is
6547 	     Microcontroller to not hinder performance in the general case.  */
6548 	  if (m_profile && first_veneer_scan)
6549 	    {
6550 	      struct elf_link_hash_entry **sym_hashes;
6551 
6552 	      sym_hashes = elf_sym_hashes (input_bfd);
6553 	      if (!cmse_scan (input_bfd, htab, out_attr, sym_hashes,
6554 			      &cmse_stub_created))
6555 		goto error_ret_free_local;
6556 
6557 	      if (cmse_stub_created != 0)
6558 		stub_changed = TRUE;
6559 	    }
6560 
6561 	  /* Walk over each section attached to the input bfd.  */
6562 	  for (section = input_bfd->sections;
6563 	       section != NULL;
6564 	       section = section->next)
6565 	    {
6566 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
6567 
6568 	      /* If there aren't any relocs, then there's nothing more
6569 		 to do.  */
6570 	      if ((section->flags & SEC_RELOC) == 0
6571 		  || section->reloc_count == 0
6572 		  || (section->flags & SEC_CODE) == 0)
6573 		continue;
6574 
6575 	      /* If this section is a link-once section that will be
6576 		 discarded, then don't create any stubs.  */
6577 	      if (section->output_section == NULL
6578 		  || section->output_section->owner != output_bfd)
6579 		continue;
6580 
6581 	      /* Get the relocs.  */
6582 	      internal_relocs
6583 		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
6584 					     NULL, info->keep_memory);
6585 	      if (internal_relocs == NULL)
6586 		goto error_ret_free_local;
6587 
6588 	      /* Now examine each relocation.  */
6589 	      irela = internal_relocs;
6590 	      irelaend = irela + section->reloc_count;
6591 	      for (; irela < irelaend; irela++)
6592 		{
6593 		  unsigned int r_type, r_indx;
6594 		  asection *sym_sec;
6595 		  bfd_vma sym_value;
6596 		  bfd_vma destination;
6597 		  struct elf32_arm_link_hash_entry *hash;
6598 		  const char *sym_name;
6599 		  unsigned char st_type;
6600 		  enum arm_st_branch_type branch_type;
6601 		  bfd_boolean created_stub = FALSE;
6602 
6603 		  r_type = ELF32_R_TYPE (irela->r_info);
6604 		  r_indx = ELF32_R_SYM (irela->r_info);
6605 
6606 		  if (r_type >= (unsigned int) R_ARM_max)
6607 		    {
6608 		      bfd_set_error (bfd_error_bad_value);
6609 		    error_ret_free_internal:
6610 		      if (elf_section_data (section)->relocs == NULL)
6611 			free (internal_relocs);
6612 		    /* Fall through.  */
6613 		    error_ret_free_local:
6614 		      if (local_syms != NULL
6615 			  && (symtab_hdr->contents
6616 			      != (unsigned char *) local_syms))
6617 			free (local_syms);
6618 		      return FALSE;
6619 		    }
6620 
6621 		  hash = NULL;
6622 		  if (r_indx >= symtab_hdr->sh_info)
6623 		    hash = elf32_arm_hash_entry
6624 		      (elf_sym_hashes (input_bfd)
6625 		       [r_indx - symtab_hdr->sh_info]);
6626 
6627 		  /* Only look for stubs on branch instructions, or
6628 		     non-relaxed TLSCALL  */
6629 		  if ((r_type != (unsigned int) R_ARM_CALL)
6630 		      && (r_type != (unsigned int) R_ARM_THM_CALL)
6631 		      && (r_type != (unsigned int) R_ARM_JUMP24)
6632 		      && (r_type != (unsigned int) R_ARM_THM_JUMP19)
6633 		      && (r_type != (unsigned int) R_ARM_THM_XPC22)
6634 		      && (r_type != (unsigned int) R_ARM_THM_JUMP24)
6635 		      && (r_type != (unsigned int) R_ARM_PLT32)
6636 		      && !((r_type == (unsigned int) R_ARM_TLS_CALL
6637 			    || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6638 			   && r_type == elf32_arm_tls_transition
6639 			       (info, r_type, &hash->root)
6640 			   && ((hash ? hash->tls_type
6641 				: (elf32_arm_local_got_tls_type
6642 				   (input_bfd)[r_indx]))
6643 			       & GOT_TLS_GDESC) != 0))
6644 		    continue;
6645 
6646 		  /* Now determine the call target, its name, value,
6647 		     section.  */
6648 		  sym_sec = NULL;
6649 		  sym_value = 0;
6650 		  destination = 0;
6651 		  sym_name = NULL;
6652 
6653 		  if (r_type == (unsigned int) R_ARM_TLS_CALL
6654 		      || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
6655 		    {
6656 		      /* A non-relaxed TLS call.  The target is the
6657 			 plt-resident trampoline and nothing to do
6658 			 with the symbol.  */
6659 		      BFD_ASSERT (htab->tls_trampoline > 0);
6660 		      sym_sec = htab->root.splt;
6661 		      sym_value = htab->tls_trampoline;
6662 		      hash = 0;
6663 		      st_type = STT_FUNC;
6664 		      branch_type = ST_BRANCH_TO_ARM;
6665 		    }
6666 		  else if (!hash)
6667 		    {
6668 		      /* It's a local symbol.  */
6669 		      Elf_Internal_Sym *sym;
6670 
6671 		      if (local_syms == NULL)
6672 			{
6673 			  local_syms
6674 			    = (Elf_Internal_Sym *) symtab_hdr->contents;
6675 			  if (local_syms == NULL)
6676 			    local_syms
6677 			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
6678 						      symtab_hdr->sh_info, 0,
6679 						      NULL, NULL, NULL);
6680 			  if (local_syms == NULL)
6681 			    goto error_ret_free_internal;
6682 			}
6683 
6684 		      sym = local_syms + r_indx;
6685 		      if (sym->st_shndx == SHN_UNDEF)
6686 			sym_sec = bfd_und_section_ptr;
6687 		      else if (sym->st_shndx == SHN_ABS)
6688 			sym_sec = bfd_abs_section_ptr;
6689 		      else if (sym->st_shndx == SHN_COMMON)
6690 			sym_sec = bfd_com_section_ptr;
6691 		      else
6692 			sym_sec =
6693 			  bfd_section_from_elf_index (input_bfd, sym->st_shndx);
6694 
6695 		      if (!sym_sec)
6696 			/* This is an undefined symbol.  It can never
6697 			   be resolved.  */
6698 			continue;
6699 
6700 		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
6701 			sym_value = sym->st_value;
6702 		      destination = (sym_value + irela->r_addend
6703 				     + sym_sec->output_offset
6704 				     + sym_sec->output_section->vma);
6705 		      st_type = ELF_ST_TYPE (sym->st_info);
6706 		      branch_type =
6707 			ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
6708 		      sym_name
6709 			= bfd_elf_string_from_elf_section (input_bfd,
6710 							   symtab_hdr->sh_link,
6711 							   sym->st_name);
6712 		    }
6713 		  else
6714 		    {
6715 		      /* It's an external symbol.  */
6716 		      while (hash->root.root.type == bfd_link_hash_indirect
6717 			     || hash->root.root.type == bfd_link_hash_warning)
6718 			hash = ((struct elf32_arm_link_hash_entry *)
6719 				hash->root.root.u.i.link);
6720 
6721 		      if (hash->root.root.type == bfd_link_hash_defined
6722 			  || hash->root.root.type == bfd_link_hash_defweak)
6723 			{
6724 			  sym_sec = hash->root.root.u.def.section;
6725 			  sym_value = hash->root.root.u.def.value;
6726 
6727 			  struct elf32_arm_link_hash_table *globals =
6728 						  elf32_arm_hash_table (info);
6729 
6730 			  /* For a destination in a shared library,
6731 			     use the PLT stub as target address to
6732 			     decide whether a branch stub is
6733 			     needed.  */
6734 			  if (globals != NULL
6735 			      && globals->root.splt != NULL
6736 			      && hash != NULL
6737 			      && hash->root.plt.offset != (bfd_vma) -1)
6738 			    {
6739 			      sym_sec = globals->root.splt;
6740 			      sym_value = hash->root.plt.offset;
6741 			      if (sym_sec->output_section != NULL)
6742 				destination = (sym_value
6743 					       + sym_sec->output_offset
6744 					       + sym_sec->output_section->vma);
6745 			    }
6746 			  else if (sym_sec->output_section != NULL)
6747 			    destination = (sym_value + irela->r_addend
6748 					   + sym_sec->output_offset
6749 					   + sym_sec->output_section->vma);
6750 			}
6751 		      else if ((hash->root.root.type == bfd_link_hash_undefined)
6752 			       || (hash->root.root.type == bfd_link_hash_undefweak))
6753 			{
6754 			  /* For a shared library, use the PLT stub as
6755 			     target address to decide whether a long
6756 			     branch stub is needed.
6757 			     For absolute code, they cannot be handled.  */
6758 			  struct elf32_arm_link_hash_table *globals =
6759 			    elf32_arm_hash_table (info);
6760 
6761 			  if (globals != NULL
6762 			      && globals->root.splt != NULL
6763 			      && hash != NULL
6764 			      && hash->root.plt.offset != (bfd_vma) -1)
6765 			    {
6766 			      sym_sec = globals->root.splt;
6767 			      sym_value = hash->root.plt.offset;
6768 			      if (sym_sec->output_section != NULL)
6769 				destination = (sym_value
6770 					       + sym_sec->output_offset
6771 					       + sym_sec->output_section->vma);
6772 			    }
6773 			  else
6774 			    continue;
6775 			}
6776 		      else
6777 			{
6778 			  bfd_set_error (bfd_error_bad_value);
6779 			  goto error_ret_free_internal;
6780 			}
6781 		      st_type = hash->root.type;
6782 		      branch_type =
6783 			ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
6784 		      sym_name = hash->root.root.root.string;
6785 		    }
6786 
6787 		  do
6788 		    {
6789 		      bfd_boolean new_stub;
6790 		      struct elf32_arm_stub_hash_entry *stub_entry;
6791 
6792 		      /* Determine what (if any) linker stub is needed.  */
6793 		      stub_type = arm_type_of_stub (info, section, irela,
6794 						    st_type, &branch_type,
6795 						    hash, destination, sym_sec,
6796 						    input_bfd, sym_name);
6797 		      if (stub_type == arm_stub_none)
6798 			break;
6799 
6800 		      /* We've either created a stub for this reloc already,
6801 			 or we are about to.  */
6802 		      stub_entry =
6803 			elf32_arm_create_stub (htab, stub_type, section, irela,
6804 					       sym_sec, hash,
6805 					       (char *) sym_name, sym_value,
6806 					       branch_type, &new_stub);
6807 
6808 		      created_stub = stub_entry != NULL;
6809 		      if (!created_stub)
6810 			goto error_ret_free_internal;
6811 		      else if (!new_stub)
6812 			break;
6813 		      else
6814 			stub_changed = TRUE;
6815 		    }
6816 		  while (0);
6817 
6818 		  /* Look for relocations which might trigger Cortex-A8
6819 		     erratum.  */
6820 		  if (htab->fix_cortex_a8
6821 		      && (r_type == (unsigned int) R_ARM_THM_JUMP24
6822 			  || r_type == (unsigned int) R_ARM_THM_JUMP19
6823 			  || r_type == (unsigned int) R_ARM_THM_CALL
6824 			  || r_type == (unsigned int) R_ARM_THM_XPC22))
6825 		    {
6826 		      bfd_vma from = section->output_section->vma
6827 				     + section->output_offset
6828 				     + irela->r_offset;
6829 
6830 		      if ((from & 0xfff) == 0xffe)
6831 			{
6832 			  /* Found a candidate.  Note we haven't checked the
6833 			     destination is within 4K here: if we do so (and
6834 			     don't create an entry in a8_relocs) we can't tell
6835 			     that a branch should have been relocated when
6836 			     scanning later.  */
6837 			  if (num_a8_relocs == a8_reloc_table_size)
6838 			    {
6839 			      a8_reloc_table_size *= 2;
6840 			      a8_relocs = (struct a8_erratum_reloc *)
6841 				  bfd_realloc (a8_relocs,
6842 					       sizeof (struct a8_erratum_reloc)
6843 					       * a8_reloc_table_size);
6844 			    }
6845 
6846 			  a8_relocs[num_a8_relocs].from = from;
6847 			  a8_relocs[num_a8_relocs].destination = destination;
6848 			  a8_relocs[num_a8_relocs].r_type = r_type;
6849 			  a8_relocs[num_a8_relocs].branch_type = branch_type;
6850 			  a8_relocs[num_a8_relocs].sym_name = sym_name;
6851 			  a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
6852 			  a8_relocs[num_a8_relocs].hash = hash;
6853 
6854 			  num_a8_relocs++;
6855 			}
6856 		    }
6857 		}
6858 
6859 	      /* We're done with the internal relocs, free them.  */
6860 	      if (elf_section_data (section)->relocs == NULL)
6861 		free (internal_relocs);
6862 	    }
6863 
6864 	  if (htab->fix_cortex_a8)
6865 	    {
6866 	      /* Sort relocs which might apply to Cortex-A8 erratum.  */
6867 	      qsort (a8_relocs, num_a8_relocs,
6868 		     sizeof (struct a8_erratum_reloc),
6869 		     &a8_reloc_compare);
6870 
6871 	      /* Scan for branches which might trigger Cortex-A8 erratum.  */
6872 	      if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
6873 					  &num_a8_fixes, &a8_fix_table_size,
6874 					  a8_relocs, num_a8_relocs,
6875 					  prev_num_a8_fixes, &stub_changed)
6876 		  != 0)
6877 		goto error_ret_free_local;
6878 	    }
6879 
6880 	  if (local_syms != NULL
6881 	      && symtab_hdr->contents != (unsigned char *) local_syms)
6882 	    {
6883 	      if (!info->keep_memory)
6884 		free (local_syms);
6885 	      else
6886 		symtab_hdr->contents = (unsigned char *) local_syms;
6887 	    }
6888 	}
6889 
6890       if (first_veneer_scan
6891 	  && !set_cmse_veneer_addr_from_implib (info, htab,
6892 						&cmse_stub_created))
6893 	ret = FALSE;
6894 
6895       if (prev_num_a8_fixes != num_a8_fixes)
6896 	stub_changed = TRUE;
6897 
6898       if (!stub_changed)
6899 	break;
6900 
6901       /* OK, we've added some stubs.  Find out the new size of the
6902 	 stub sections.  */
6903       for (stub_sec = htab->stub_bfd->sections;
6904 	   stub_sec != NULL;
6905 	   stub_sec = stub_sec->next)
6906 	{
6907 	  /* Ignore non-stub sections.  */
6908 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
6909 	    continue;
6910 
6911 	  stub_sec->size = 0;
6912 	}
6913 
6914       /* Add new SG veneers after those already in the input import
6915 	 library.  */
6916       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6917 	   stub_type++)
6918 	{
6919 	  bfd_vma *start_offset_p;
6920 	  asection **stub_sec_p;
6921 
6922 	  start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
6923 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6924 	  if (start_offset_p == NULL)
6925 	    continue;
6926 
6927 	  BFD_ASSERT (stub_sec_p != NULL);
6928 	  if (*stub_sec_p != NULL)
6929 	    (*stub_sec_p)->size = *start_offset_p;
6930 	}
6931 
6932       /* Compute stub section size, considering padding.  */
6933       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
6934       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
6935 	   stub_type++)
6936 	{
6937 	  int size, padding;
6938 	  asection **stub_sec_p;
6939 
6940 	  padding = arm_dedicated_stub_section_padding (stub_type);
6941 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
6942 	  /* Skip if no stub input section or no stub section padding
6943 	     required.  */
6944 	  if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
6945 	    continue;
6946 	  /* Stub section padding required but no dedicated section.  */
6947 	  BFD_ASSERT (stub_sec_p);
6948 
6949 	  size = (*stub_sec_p)->size;
6950 	  size = (size + padding - 1) & ~(padding - 1);
6951 	  (*stub_sec_p)->size = size;
6952 	}
6953 
6954       /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
6955       if (htab->fix_cortex_a8)
6956 	for (i = 0; i < num_a8_fixes; i++)
6957 	  {
6958 	    stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
6959 			 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
6960 
6961 	    if (stub_sec == NULL)
6962 	      return FALSE;
6963 
6964 	    stub_sec->size
6965 	      += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
6966 					      NULL);
6967 	  }
6968 
6969 
6970       /* Ask the linker to do its stuff.  */
6971       (*htab->layout_sections_again) ();
6972       first_veneer_scan = FALSE;
6973     }
6974 
6975   /* Add stubs for Cortex-A8 erratum fixes now.  */
6976   if (htab->fix_cortex_a8)
6977     {
6978       for (i = 0; i < num_a8_fixes; i++)
6979 	{
6980 	  struct elf32_arm_stub_hash_entry *stub_entry;
6981 	  char *stub_name = a8_fixes[i].stub_name;
6982 	  asection *section = a8_fixes[i].section;
6983 	  unsigned int section_id = a8_fixes[i].section->id;
6984 	  asection *link_sec = htab->stub_group[section_id].link_sec;
6985 	  asection *stub_sec = htab->stub_group[section_id].stub_sec;
6986 	  const insn_sequence *template_sequence;
6987 	  int template_size, size = 0;
6988 
6989 	  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
6990 					     TRUE, FALSE);
6991 	  if (stub_entry == NULL)
6992 	    {
6993 	      _bfd_error_handler (_("%pB: cannot create stub entry %s"),
6994 				  section->owner, stub_name);
6995 	      return FALSE;
6996 	    }
6997 
6998 	  stub_entry->stub_sec = stub_sec;
6999 	  stub_entry->stub_offset = (bfd_vma) -1;
7000 	  stub_entry->id_sec = link_sec;
7001 	  stub_entry->stub_type = a8_fixes[i].stub_type;
7002 	  stub_entry->source_value = a8_fixes[i].offset;
7003 	  stub_entry->target_section = a8_fixes[i].section;
7004 	  stub_entry->target_value = a8_fixes[i].target_offset;
7005 	  stub_entry->orig_insn = a8_fixes[i].orig_insn;
7006 	  stub_entry->branch_type = a8_fixes[i].branch_type;
7007 
7008 	  size = find_stub_size_and_template (a8_fixes[i].stub_type,
7009 					      &template_sequence,
7010 					      &template_size);
7011 
7012 	  stub_entry->stub_size = size;
7013 	  stub_entry->stub_template = template_sequence;
7014 	  stub_entry->stub_template_size = template_size;
7015 	}
7016 
7017       /* Stash the Cortex-A8 erratum fix array for use later in
7018 	 elf32_arm_write_section().  */
7019       htab->a8_erratum_fixes = a8_fixes;
7020       htab->num_a8_erratum_fixes = num_a8_fixes;
7021     }
7022   else
7023     {
7024       htab->a8_erratum_fixes = NULL;
7025       htab->num_a8_erratum_fixes = 0;
7026     }
7027   return ret;
7028 }
7029 
7030 /* Build all the stubs associated with the current output file.  The
7031    stubs are kept in a hash table attached to the main linker hash
7032    table.  We also set up the .plt entries for statically linked PIC
7033    functions here.  This function is called via arm_elf_finish in the
7034    linker.  */
7035 
7036 bfd_boolean
7037 elf32_arm_build_stubs (struct bfd_link_info *info)
7038 {
7039   asection *stub_sec;
7040   struct bfd_hash_table *table;
7041   enum elf32_arm_stub_type stub_type;
7042   struct elf32_arm_link_hash_table *htab;
7043 
7044   htab = elf32_arm_hash_table (info);
7045   if (htab == NULL)
7046     return FALSE;
7047 
7048   for (stub_sec = htab->stub_bfd->sections;
7049        stub_sec != NULL;
7050        stub_sec = stub_sec->next)
7051     {
7052       bfd_size_type size;
7053 
7054       /* Ignore non-stub sections.  */
7055       if (!strstr (stub_sec->name, STUB_SUFFIX))
7056 	continue;
7057 
7058       /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
7059 	 must at least be done for stub section requiring padding and for SG
7060 	 veneers to ensure that a non secure code branching to a removed SG
7061 	 veneer causes an error.  */
7062       size = stub_sec->size;
7063       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
7064       if (stub_sec->contents == NULL && size != 0)
7065 	return FALSE;
7066 
7067       stub_sec->size = 0;
7068     }
7069 
7070   /* Add new SG veneers after those already in the input import library.  */
7071   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7072     {
7073       bfd_vma *start_offset_p;
7074       asection **stub_sec_p;
7075 
7076       start_offset_p = arm_new_stubs_start_offset_ptr (htab, stub_type);
7077       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
7078       if (start_offset_p == NULL)
7079 	continue;
7080 
7081       BFD_ASSERT (stub_sec_p != NULL);
7082       if (*stub_sec_p != NULL)
7083 	(*stub_sec_p)->size = *start_offset_p;
7084     }
7085 
7086   /* Build the stubs as directed by the stub hash table.  */
7087   table = &htab->stub_hash_table;
7088   bfd_hash_traverse (table, arm_build_one_stub, info);
7089   if (htab->fix_cortex_a8)
7090     {
7091       /* Place the cortex a8 stubs last.  */
7092       htab->fix_cortex_a8 = -1;
7093       bfd_hash_traverse (table, arm_build_one_stub, info);
7094     }
7095 
7096   return TRUE;
7097 }
7098 
7099 /* Locate the Thumb encoded calling stub for NAME.  */
7100 
7101 static struct elf_link_hash_entry *
7102 find_thumb_glue (struct bfd_link_info *link_info,
7103 		 const char *name,
7104 		 char **error_message)
7105 {
7106   char *tmp_name;
7107   struct elf_link_hash_entry *hash;
7108   struct elf32_arm_link_hash_table *hash_table;
7109 
7110   /* We need a pointer to the armelf specific hash table.  */
7111   hash_table = elf32_arm_hash_table (link_info);
7112   if (hash_table == NULL)
7113     return NULL;
7114 
7115   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7116 				  + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
7117 
7118   BFD_ASSERT (tmp_name);
7119 
7120   sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
7121 
7122   hash = elf_link_hash_lookup
7123     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7124 
7125   if (hash == NULL
7126       && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7127 		   "Thumb", tmp_name, name) == -1)
7128     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7129 
7130   free (tmp_name);
7131 
7132   return hash;
7133 }
7134 
7135 /* Locate the ARM encoded calling stub for NAME.  */
7136 
7137 static struct elf_link_hash_entry *
7138 find_arm_glue (struct bfd_link_info *link_info,
7139 	       const char *name,
7140 	       char **error_message)
7141 {
7142   char *tmp_name;
7143   struct elf_link_hash_entry *myh;
7144   struct elf32_arm_link_hash_table *hash_table;
7145 
7146   /* We need a pointer to the elfarm specific hash table.  */
7147   hash_table = elf32_arm_hash_table (link_info);
7148   if (hash_table == NULL)
7149     return NULL;
7150 
7151   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7152 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7153   BFD_ASSERT (tmp_name);
7154 
7155   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7156 
7157   myh = elf_link_hash_lookup
7158     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
7159 
7160   if (myh == NULL
7161       && asprintf (error_message, _("unable to find %s glue '%s' for '%s'"),
7162 		   "ARM", tmp_name, name) == -1)
7163     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
7164 
7165   free (tmp_name);
7166 
7167   return myh;
7168 }
7169 
7170 /* ARM->Thumb glue (static images):
7171 
7172    .arm
7173    __func_from_arm:
7174    ldr r12, __func_addr
7175    bx  r12
7176    __func_addr:
7177    .word func    @ behave as if you saw a ARM_32 reloc.
7178 
7179    (v5t static images)
7180    .arm
7181    __func_from_arm:
7182    ldr pc, __func_addr
7183    __func_addr:
7184    .word func    @ behave as if you saw a ARM_32 reloc.
7185 
7186    (relocatable images)
7187    .arm
7188    __func_from_arm:
7189    ldr r12, __func_offset
7190    add r12, r12, pc
7191    bx  r12
7192    __func_offset:
7193    .word func - .   */
7194 
7195 #define ARM2THUMB_STATIC_GLUE_SIZE 12
7196 static const insn32 a2t1_ldr_insn = 0xe59fc000;
7197 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
7198 static const insn32 a2t3_func_addr_insn = 0x00000001;
7199 
7200 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
7201 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
7202 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
7203 
7204 #define ARM2THUMB_PIC_GLUE_SIZE 16
7205 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
7206 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
7207 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
7208 
7209 /* Thumb->ARM:				Thumb->(non-interworking aware) ARM
7210 
7211      .thumb				.thumb
7212      .align 2				.align 2
7213  __func_from_thumb:		    __func_from_thumb:
7214      bx pc				push {r6, lr}
7215      nop				ldr  r6, __func_addr
7216      .arm				mov  lr, pc
7217      b func				bx   r6
7218 					.arm
7219 				    ;; back_to_thumb
7220 					ldmia r13! {r6, lr}
7221 					bx    lr
7222 				    __func_addr:
7223 					.word	     func  */
7224 
7225 #define THUMB2ARM_GLUE_SIZE 8
7226 static const insn16 t2a1_bx_pc_insn = 0x4778;
7227 static const insn16 t2a2_noop_insn = 0x46c0;
7228 static const insn32 t2a3_b_insn = 0xea000000;
7229 
7230 #define VFP11_ERRATUM_VENEER_SIZE 8
7231 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
7232 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
7233 
7234 #define ARM_BX_VENEER_SIZE 12
7235 static const insn32 armbx1_tst_insn = 0xe3100001;
7236 static const insn32 armbx2_moveq_insn = 0x01a0f000;
7237 static const insn32 armbx3_bx_insn = 0xe12fff10;
7238 
7239 #ifndef ELFARM_NABI_C_INCLUDED
7240 static void
7241 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
7242 {
7243   asection * s;
7244   bfd_byte * contents;
7245 
7246   if (size == 0)
7247     {
7248       /* Do not include empty glue sections in the output.  */
7249       if (abfd != NULL)
7250 	{
7251 	  s = bfd_get_linker_section (abfd, name);
7252 	  if (s != NULL)
7253 	    s->flags |= SEC_EXCLUDE;
7254 	}
7255       return;
7256     }
7257 
7258   BFD_ASSERT (abfd != NULL);
7259 
7260   s = bfd_get_linker_section (abfd, name);
7261   BFD_ASSERT (s != NULL);
7262 
7263   contents = (bfd_byte *) bfd_zalloc (abfd, size);
7264 
7265   BFD_ASSERT (s->size == size);
7266   s->contents = contents;
7267 }
7268 
7269 bfd_boolean
7270 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
7271 {
7272   struct elf32_arm_link_hash_table * globals;
7273 
7274   globals = elf32_arm_hash_table (info);
7275   BFD_ASSERT (globals != NULL);
7276 
7277   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7278 				   globals->arm_glue_size,
7279 				   ARM2THUMB_GLUE_SECTION_NAME);
7280 
7281   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7282 				   globals->thumb_glue_size,
7283 				   THUMB2ARM_GLUE_SECTION_NAME);
7284 
7285   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7286 				   globals->vfp11_erratum_glue_size,
7287 				   VFP11_ERRATUM_VENEER_SECTION_NAME);
7288 
7289   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7290 				   globals->stm32l4xx_erratum_glue_size,
7291 				   STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7292 
7293   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
7294 				   globals->bx_glue_size,
7295 				   ARM_BX_GLUE_SECTION_NAME);
7296 
7297   return TRUE;
7298 }
7299 
7300 /* Allocate space and symbols for calling a Thumb function from Arm mode.
7301    returns the symbol identifying the stub.  */
7302 
7303 static struct elf_link_hash_entry *
7304 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
7305 			  struct elf_link_hash_entry * h)
7306 {
7307   const char * name = h->root.root.string;
7308   asection * s;
7309   char * tmp_name;
7310   struct elf_link_hash_entry * myh;
7311   struct bfd_link_hash_entry * bh;
7312   struct elf32_arm_link_hash_table * globals;
7313   bfd_vma val;
7314   bfd_size_type size;
7315 
7316   globals = elf32_arm_hash_table (link_info);
7317   BFD_ASSERT (globals != NULL);
7318   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7319 
7320   s = bfd_get_linker_section
7321     (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
7322 
7323   BFD_ASSERT (s != NULL);
7324 
7325   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
7326 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
7327   BFD_ASSERT (tmp_name);
7328 
7329   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
7330 
7331   myh = elf_link_hash_lookup
7332     (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7333 
7334   if (myh != NULL)
7335     {
7336       /* We've already seen this guy.  */
7337       free (tmp_name);
7338       return myh;
7339     }
7340 
7341   /* The only trick here is using hash_table->arm_glue_size as the value.
7342      Even though the section isn't allocated yet, this is where we will be
7343      putting it.  The +1 on the value marks that the stub has not been
7344      output yet - not that it is a Thumb function.  */
7345   bh = NULL;
7346   val = globals->arm_glue_size + 1;
7347   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7348 				    tmp_name, BSF_GLOBAL, s, val,
7349 				    NULL, TRUE, FALSE, &bh);
7350 
7351   myh = (struct elf_link_hash_entry *) bh;
7352   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7353   myh->forced_local = 1;
7354 
7355   free (tmp_name);
7356 
7357   if (bfd_link_pic (link_info)
7358       || globals->root.is_relocatable_executable
7359       || globals->pic_veneer)
7360     size = ARM2THUMB_PIC_GLUE_SIZE;
7361   else if (globals->use_blx)
7362     size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
7363   else
7364     size = ARM2THUMB_STATIC_GLUE_SIZE;
7365 
7366   s->size += size;
7367   globals->arm_glue_size += size;
7368 
7369   return myh;
7370 }
7371 
7372 /* Allocate space for ARMv4 BX veneers.  */
7373 
7374 static void
7375 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
7376 {
7377   asection * s;
7378   struct elf32_arm_link_hash_table *globals;
7379   char *tmp_name;
7380   struct elf_link_hash_entry *myh;
7381   struct bfd_link_hash_entry *bh;
7382   bfd_vma val;
7383 
7384   /* BX PC does not need a veneer.  */
7385   if (reg == 15)
7386     return;
7387 
7388   globals = elf32_arm_hash_table (link_info);
7389   BFD_ASSERT (globals != NULL);
7390   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
7391 
7392   /* Check if this veneer has already been allocated.  */
7393   if (globals->bx_glue_offset[reg])
7394     return;
7395 
7396   s = bfd_get_linker_section
7397     (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
7398 
7399   BFD_ASSERT (s != NULL);
7400 
7401   /* Add symbol for veneer.  */
7402   tmp_name = (char *)
7403       bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
7404   BFD_ASSERT (tmp_name);
7405 
7406   sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
7407 
7408   myh = elf_link_hash_lookup
7409     (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
7410 
7411   BFD_ASSERT (myh == NULL);
7412 
7413   bh = NULL;
7414   val = globals->bx_glue_size;
7415   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
7416 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7417 				    NULL, TRUE, FALSE, &bh);
7418 
7419   myh = (struct elf_link_hash_entry *) bh;
7420   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7421   myh->forced_local = 1;
7422 
7423   s->size += ARM_BX_VENEER_SIZE;
7424   globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
7425   globals->bx_glue_size += ARM_BX_VENEER_SIZE;
7426 }
7427 
7428 
7429 /* Add an entry to the code/data map for section SEC.  */
7430 
7431 static void
7432 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
7433 {
7434   struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7435   unsigned int newidx;
7436 
7437   if (sec_data->map == NULL)
7438     {
7439       sec_data->map = (elf32_arm_section_map *)
7440 	  bfd_malloc (sizeof (elf32_arm_section_map));
7441       sec_data->mapcount = 0;
7442       sec_data->mapsize = 1;
7443     }
7444 
7445   newidx = sec_data->mapcount++;
7446 
7447   if (sec_data->mapcount > sec_data->mapsize)
7448     {
7449       sec_data->mapsize *= 2;
7450       sec_data->map = (elf32_arm_section_map *)
7451 	  bfd_realloc_or_free (sec_data->map, sec_data->mapsize
7452 			       * sizeof (elf32_arm_section_map));
7453     }
7454 
7455   if (sec_data->map)
7456     {
7457       sec_data->map[newidx].vma = vma;
7458       sec_data->map[newidx].type = type;
7459     }
7460 }
7461 
7462 
7463 /* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
7464    veneers are handled for now.  */
7465 
7466 static bfd_vma
7467 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
7468 			     elf32_vfp11_erratum_list *branch,
7469 			     bfd *branch_bfd,
7470 			     asection *branch_sec,
7471 			     unsigned int offset)
7472 {
7473   asection *s;
7474   struct elf32_arm_link_hash_table *hash_table;
7475   char *tmp_name;
7476   struct elf_link_hash_entry *myh;
7477   struct bfd_link_hash_entry *bh;
7478   bfd_vma val;
7479   struct _arm_elf_section_data *sec_data;
7480   elf32_vfp11_erratum_list *newerr;
7481 
7482   hash_table = elf32_arm_hash_table (link_info);
7483   BFD_ASSERT (hash_table != NULL);
7484   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7485 
7486   s = bfd_get_linker_section
7487     (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
7488 
7489   sec_data = elf32_arm_section_data (s);
7490 
7491   BFD_ASSERT (s != NULL);
7492 
7493   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7494 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7495   BFD_ASSERT (tmp_name);
7496 
7497   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7498 	   hash_table->num_vfp11_fixes);
7499 
7500   myh = elf_link_hash_lookup
7501     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7502 
7503   BFD_ASSERT (myh == NULL);
7504 
7505   bh = NULL;
7506   val = hash_table->vfp11_erratum_glue_size;
7507   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7508 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7509 				    NULL, TRUE, FALSE, &bh);
7510 
7511   myh = (struct elf_link_hash_entry *) bh;
7512   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7513   myh->forced_local = 1;
7514 
7515   /* Link veneer back to calling location.  */
7516   sec_data->erratumcount += 1;
7517   newerr = (elf32_vfp11_erratum_list *)
7518       bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7519 
7520   newerr->type = VFP11_ERRATUM_ARM_VENEER;
7521   newerr->vma = -1;
7522   newerr->u.v.branch = branch;
7523   newerr->u.v.id = hash_table->num_vfp11_fixes;
7524   branch->u.b.veneer = newerr;
7525 
7526   newerr->next = sec_data->erratumlist;
7527   sec_data->erratumlist = newerr;
7528 
7529   /* A symbol for the return from the veneer.  */
7530   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7531 	   hash_table->num_vfp11_fixes);
7532 
7533   myh = elf_link_hash_lookup
7534     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7535 
7536   if (myh != NULL)
7537     abort ();
7538 
7539   bh = NULL;
7540   val = offset + 4;
7541   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7542 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
7543 
7544   myh = (struct elf_link_hash_entry *) bh;
7545   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7546   myh->forced_local = 1;
7547 
7548   free (tmp_name);
7549 
7550   /* Generate a mapping symbol for the veneer section, and explicitly add an
7551      entry for that symbol to the code/data map for the section.  */
7552   if (hash_table->vfp11_erratum_glue_size == 0)
7553     {
7554       bh = NULL;
7555       /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
7556 	 ever requires this erratum fix.  */
7557       _bfd_generic_link_add_one_symbol (link_info,
7558 					hash_table->bfd_of_glue_owner, "$a",
7559 					BSF_LOCAL, s, 0, NULL,
7560 					TRUE, FALSE, &bh);
7561 
7562       myh = (struct elf_link_hash_entry *) bh;
7563       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7564       myh->forced_local = 1;
7565 
7566       /* The elf32_arm_init_maps function only cares about symbols from input
7567 	 BFDs.  We must make a note of this generated mapping symbol
7568 	 ourselves so that code byteswapping works properly in
7569 	 elf32_arm_write_section.  */
7570       elf32_arm_section_map_add (s, 'a', 0);
7571     }
7572 
7573   s->size += VFP11_ERRATUM_VENEER_SIZE;
7574   hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
7575   hash_table->num_vfp11_fixes++;
7576 
7577   /* The offset of the veneer.  */
7578   return val;
7579 }
7580 
7581 /* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
7582    veneers need to be handled because used only in Cortex-M.  */
7583 
7584 static bfd_vma
7585 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
7586 				 elf32_stm32l4xx_erratum_list *branch,
7587 				 bfd *branch_bfd,
7588 				 asection *branch_sec,
7589 				 unsigned int offset,
7590 				 bfd_size_type veneer_size)
7591 {
7592   asection *s;
7593   struct elf32_arm_link_hash_table *hash_table;
7594   char *tmp_name;
7595   struct elf_link_hash_entry *myh;
7596   struct bfd_link_hash_entry *bh;
7597   bfd_vma val;
7598   struct _arm_elf_section_data *sec_data;
7599   elf32_stm32l4xx_erratum_list *newerr;
7600 
7601   hash_table = elf32_arm_hash_table (link_info);
7602   BFD_ASSERT (hash_table != NULL);
7603   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
7604 
7605   s = bfd_get_linker_section
7606     (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7607 
7608   BFD_ASSERT (s != NULL);
7609 
7610   sec_data = elf32_arm_section_data (s);
7611 
7612   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7613 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7614   BFD_ASSERT (tmp_name);
7615 
7616   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7617 	   hash_table->num_stm32l4xx_fixes);
7618 
7619   myh = elf_link_hash_lookup
7620     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7621 
7622   BFD_ASSERT (myh == NULL);
7623 
7624   bh = NULL;
7625   val = hash_table->stm32l4xx_erratum_glue_size;
7626   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
7627 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
7628 				    NULL, TRUE, FALSE, &bh);
7629 
7630   myh = (struct elf_link_hash_entry *) bh;
7631   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7632   myh->forced_local = 1;
7633 
7634   /* Link veneer back to calling location.  */
7635   sec_data->stm32l4xx_erratumcount += 1;
7636   newerr = (elf32_stm32l4xx_erratum_list *)
7637       bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
7638 
7639   newerr->type = STM32L4XX_ERRATUM_VENEER;
7640   newerr->vma = -1;
7641   newerr->u.v.branch = branch;
7642   newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
7643   branch->u.b.veneer = newerr;
7644 
7645   newerr->next = sec_data->stm32l4xx_erratumlist;
7646   sec_data->stm32l4xx_erratumlist = newerr;
7647 
7648   /* A symbol for the return from the veneer.  */
7649   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7650 	   hash_table->num_stm32l4xx_fixes);
7651 
7652   myh = elf_link_hash_lookup
7653     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
7654 
7655   if (myh != NULL)
7656     abort ();
7657 
7658   bh = NULL;
7659   val = offset + 4;
7660   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
7661 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
7662 
7663   myh = (struct elf_link_hash_entry *) bh;
7664   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
7665   myh->forced_local = 1;
7666 
7667   free (tmp_name);
7668 
7669   /* Generate a mapping symbol for the veneer section, and explicitly add an
7670      entry for that symbol to the code/data map for the section.  */
7671   if (hash_table->stm32l4xx_erratum_glue_size == 0)
7672     {
7673       bh = NULL;
7674       /* Creates a THUMB symbol since there is no other choice.  */
7675       _bfd_generic_link_add_one_symbol (link_info,
7676 					hash_table->bfd_of_glue_owner, "$t",
7677 					BSF_LOCAL, s, 0, NULL,
7678 					TRUE, FALSE, &bh);
7679 
7680       myh = (struct elf_link_hash_entry *) bh;
7681       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
7682       myh->forced_local = 1;
7683 
7684       /* The elf32_arm_init_maps function only cares about symbols from input
7685 	 BFDs.  We must make a note of this generated mapping symbol
7686 	 ourselves so that code byteswapping works properly in
7687 	 elf32_arm_write_section.  */
7688       elf32_arm_section_map_add (s, 't', 0);
7689     }
7690 
7691   s->size += veneer_size;
7692   hash_table->stm32l4xx_erratum_glue_size += veneer_size;
7693   hash_table->num_stm32l4xx_fixes++;
7694 
7695   /* The offset of the veneer.  */
7696   return val;
7697 }
7698 
7699 #define ARM_GLUE_SECTION_FLAGS \
7700   (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
7701    | SEC_READONLY | SEC_LINKER_CREATED)
7702 
7703 /* Create a fake section for use by the ARM backend of the linker.  */
7704 
7705 static bfd_boolean
7706 arm_make_glue_section (bfd * abfd, const char * name)
7707 {
7708   asection * sec;
7709 
7710   sec = bfd_get_linker_section (abfd, name);
7711   if (sec != NULL)
7712     /* Already made.  */
7713     return TRUE;
7714 
7715   sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
7716 
7717   if (sec == NULL
7718       || !bfd_set_section_alignment (sec, 2))
7719     return FALSE;
7720 
7721   /* Set the gc mark to prevent the section from being removed by garbage
7722      collection, despite the fact that no relocs refer to this section.  */
7723   sec->gc_mark = 1;
7724 
7725   return TRUE;
7726 }
7727 
7728 /* Set size of .plt entries.  This function is called from the
7729    linker scripts in ld/emultempl/{armelf}.em.  */
7730 
7731 void
7732 bfd_elf32_arm_use_long_plt (void)
7733 {
7734   elf32_arm_use_long_plt_entry = TRUE;
7735 }
7736 
7737 /* Add the glue sections to ABFD.  This function is called from the
7738    linker scripts in ld/emultempl/{armelf}.em.  */
7739 
7740 bfd_boolean
7741 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
7742 					struct bfd_link_info *info)
7743 {
7744   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
7745   bfd_boolean dostm32l4xx = globals
7746     && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
7747   bfd_boolean addglue;
7748 
7749   /* If we are only performing a partial
7750      link do not bother adding the glue.  */
7751   if (bfd_link_relocatable (info))
7752     return TRUE;
7753 
7754   addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
7755     && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
7756     && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
7757     && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
7758 
7759   if (!dostm32l4xx)
7760     return addglue;
7761 
7762   return addglue
7763     && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
7764 }
7765 
7766 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
7767    ensures they are not marked for deletion by
7768    strip_excluded_output_sections () when veneers are going to be created
7769    later.  Not doing so would trigger assert on empty section size in
7770    lang_size_sections_1 ().  */
7771 
7772 void
7773 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
7774 {
7775   enum elf32_arm_stub_type stub_type;
7776 
7777   /* If we are only performing a partial
7778      link do not bother adding the glue.  */
7779   if (bfd_link_relocatable (info))
7780     return;
7781 
7782   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
7783     {
7784       asection *out_sec;
7785       const char *out_sec_name;
7786 
7787       if (!arm_dedicated_stub_output_section_required (stub_type))
7788 	continue;
7789 
7790      out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
7791      out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
7792      if (out_sec != NULL)
7793 	out_sec->flags |= SEC_KEEP;
7794     }
7795 }
7796 
7797 /* Select a BFD to be used to hold the sections used by the glue code.
7798    This function is called from the linker scripts in ld/emultempl/
7799    {armelf/pe}.em.  */
7800 
7801 bfd_boolean
7802 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
7803 {
7804   struct elf32_arm_link_hash_table *globals;
7805 
7806   /* If we are only performing a partial link
7807      do not bother getting a bfd to hold the glue.  */
7808   if (bfd_link_relocatable (info))
7809     return TRUE;
7810 
7811   /* Make sure we don't attach the glue sections to a dynamic object.  */
7812   BFD_ASSERT (!(abfd->flags & DYNAMIC));
7813 
7814   globals = elf32_arm_hash_table (info);
7815   BFD_ASSERT (globals != NULL);
7816 
7817   if (globals->bfd_of_glue_owner != NULL)
7818     return TRUE;
7819 
7820   /* Save the bfd for later use.  */
7821   globals->bfd_of_glue_owner = abfd;
7822 
7823   return TRUE;
7824 }
7825 
7826 static void
7827 check_use_blx (struct elf32_arm_link_hash_table *globals)
7828 {
7829   int cpu_arch;
7830 
7831   cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
7832 				       Tag_CPU_arch);
7833 
7834   if (globals->fix_arm1176)
7835     {
7836       if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
7837 	globals->use_blx = 1;
7838     }
7839   else
7840     {
7841       if (cpu_arch > TAG_CPU_ARCH_V4T)
7842 	globals->use_blx = 1;
7843     }
7844 }
7845 
7846 bfd_boolean
7847 bfd_elf32_arm_process_before_allocation (bfd *abfd,
7848 					 struct bfd_link_info *link_info)
7849 {
7850   Elf_Internal_Shdr *symtab_hdr;
7851   Elf_Internal_Rela *internal_relocs = NULL;
7852   Elf_Internal_Rela *irel, *irelend;
7853   bfd_byte *contents = NULL;
7854 
7855   asection *sec;
7856   struct elf32_arm_link_hash_table *globals;
7857 
7858   /* If we are only performing a partial link do not bother
7859      to construct any glue.  */
7860   if (bfd_link_relocatable (link_info))
7861     return TRUE;
7862 
7863   /* Here we have a bfd that is to be included on the link.  We have a
7864      hook to do reloc rummaging, before section sizes are nailed down.  */
7865   globals = elf32_arm_hash_table (link_info);
7866   BFD_ASSERT (globals != NULL);
7867 
7868   check_use_blx (globals);
7869 
7870   if (globals->byteswap_code && !bfd_big_endian (abfd))
7871     {
7872       _bfd_error_handler (_("%pB: BE8 images only valid in big-endian mode"),
7873 			  abfd);
7874       return FALSE;
7875     }
7876 
7877   /* PR 5398: If we have not decided to include any loadable sections in
7878      the output then we will not have a glue owner bfd.  This is OK, it
7879      just means that there is nothing else for us to do here.  */
7880   if (globals->bfd_of_glue_owner == NULL)
7881     return TRUE;
7882 
7883   /* Rummage around all the relocs and map the glue vectors.  */
7884   sec = abfd->sections;
7885 
7886   if (sec == NULL)
7887     return TRUE;
7888 
7889   for (; sec != NULL; sec = sec->next)
7890     {
7891       if (sec->reloc_count == 0)
7892 	continue;
7893 
7894       if ((sec->flags & SEC_EXCLUDE) != 0)
7895 	continue;
7896 
7897       symtab_hdr = & elf_symtab_hdr (abfd);
7898 
7899       /* Load the relocs.  */
7900       internal_relocs
7901 	= _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
7902 
7903       if (internal_relocs == NULL)
7904 	goto error_return;
7905 
7906       irelend = internal_relocs + sec->reloc_count;
7907       for (irel = internal_relocs; irel < irelend; irel++)
7908 	{
7909 	  long r_type;
7910 	  unsigned long r_index;
7911 
7912 	  struct elf_link_hash_entry *h;
7913 
7914 	  r_type = ELF32_R_TYPE (irel->r_info);
7915 	  r_index = ELF32_R_SYM (irel->r_info);
7916 
7917 	  /* These are the only relocation types we care about.  */
7918 	  if (   r_type != R_ARM_PC24
7919 	      && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
7920 	    continue;
7921 
7922 	  /* Get the section contents if we haven't done so already.  */
7923 	  if (contents == NULL)
7924 	    {
7925 	      /* Get cached copy if it exists.  */
7926 	      if (elf_section_data (sec)->this_hdr.contents != NULL)
7927 		contents = elf_section_data (sec)->this_hdr.contents;
7928 	      else
7929 		{
7930 		  /* Go get them off disk.  */
7931 		  if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7932 		    goto error_return;
7933 		}
7934 	    }
7935 
7936 	  if (r_type == R_ARM_V4BX)
7937 	    {
7938 	      int reg;
7939 
7940 	      reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
7941 	      record_arm_bx_glue (link_info, reg);
7942 	      continue;
7943 	    }
7944 
7945 	  /* If the relocation is not against a symbol it cannot concern us.  */
7946 	  h = NULL;
7947 
7948 	  /* We don't care about local symbols.  */
7949 	  if (r_index < symtab_hdr->sh_info)
7950 	    continue;
7951 
7952 	  /* This is an external symbol.  */
7953 	  r_index -= symtab_hdr->sh_info;
7954 	  h = (struct elf_link_hash_entry *)
7955 	    elf_sym_hashes (abfd)[r_index];
7956 
7957 	  /* If the relocation is against a static symbol it must be within
7958 	     the current section and so cannot be a cross ARM/Thumb relocation.  */
7959 	  if (h == NULL)
7960 	    continue;
7961 
7962 	  /* If the call will go through a PLT entry then we do not need
7963 	     glue.  */
7964 	  if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
7965 	    continue;
7966 
7967 	  switch (r_type)
7968 	    {
7969 	    case R_ARM_PC24:
7970 	      /* This one is a call from arm code.  We need to look up
7971 		 the target of the call.  If it is a thumb target, we
7972 		 insert glue.  */
7973 	      if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
7974 		  == ST_BRANCH_TO_THUMB)
7975 		record_arm_to_thumb_glue (link_info, h);
7976 	      break;
7977 
7978 	    default:
7979 	      abort ();
7980 	    }
7981 	}
7982 
7983       if (contents != NULL
7984 	  && elf_section_data (sec)->this_hdr.contents != contents)
7985 	free (contents);
7986       contents = NULL;
7987 
7988       if (internal_relocs != NULL
7989 	  && elf_section_data (sec)->relocs != internal_relocs)
7990 	free (internal_relocs);
7991       internal_relocs = NULL;
7992     }
7993 
7994   return TRUE;
7995 
7996 error_return:
7997   if (contents != NULL
7998       && elf_section_data (sec)->this_hdr.contents != contents)
7999     free (contents);
8000   if (internal_relocs != NULL
8001       && elf_section_data (sec)->relocs != internal_relocs)
8002     free (internal_relocs);
8003 
8004   return FALSE;
8005 }
8006 #endif
8007 
8008 
8009 /* Initialise maps of ARM/Thumb/data for input BFDs.  */
8010 
8011 void
8012 bfd_elf32_arm_init_maps (bfd *abfd)
8013 {
8014   Elf_Internal_Sym *isymbuf;
8015   Elf_Internal_Shdr *hdr;
8016   unsigned int i, localsyms;
8017 
8018   /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
8019   if (! is_arm_elf (abfd))
8020     return;
8021 
8022   if ((abfd->flags & DYNAMIC) != 0)
8023     return;
8024 
8025   hdr = & elf_symtab_hdr (abfd);
8026   localsyms = hdr->sh_info;
8027 
8028   /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
8029      should contain the number of local symbols, which should come before any
8030      global symbols.  Mapping symbols are always local.  */
8031   isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
8032 				  NULL);
8033 
8034   /* No internal symbols read?  Skip this BFD.  */
8035   if (isymbuf == NULL)
8036     return;
8037 
8038   for (i = 0; i < localsyms; i++)
8039     {
8040       Elf_Internal_Sym *isym = &isymbuf[i];
8041       asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
8042       const char *name;
8043 
8044       if (sec != NULL
8045 	  && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
8046 	{
8047 	  name = bfd_elf_string_from_elf_section (abfd,
8048 	    hdr->sh_link, isym->st_name);
8049 
8050 	  if (bfd_is_arm_special_symbol_name (name,
8051 					      BFD_ARM_SPECIAL_SYM_TYPE_MAP))
8052 	    elf32_arm_section_map_add (sec, name[1], isym->st_value);
8053 	}
8054     }
8055 }
8056 
8057 
8058 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
8059    say what they wanted.  */
8060 
8061 void
8062 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
8063 {
8064   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8065   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8066 
8067   if (globals == NULL)
8068     return;
8069 
8070   if (globals->fix_cortex_a8 == -1)
8071     {
8072       /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
8073       if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
8074 	  && (out_attr[Tag_CPU_arch_profile].i == 'A'
8075 	      || out_attr[Tag_CPU_arch_profile].i == 0))
8076 	globals->fix_cortex_a8 = 1;
8077       else
8078 	globals->fix_cortex_a8 = 0;
8079     }
8080 }
8081 
8082 
8083 void
8084 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
8085 {
8086   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8087   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8088 
8089   if (globals == NULL)
8090     return;
8091   /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
8092   if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
8093     {
8094       switch (globals->vfp11_fix)
8095 	{
8096 	case BFD_ARM_VFP11_FIX_DEFAULT:
8097 	case BFD_ARM_VFP11_FIX_NONE:
8098 	  globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8099 	  break;
8100 
8101 	default:
8102 	  /* Give a warning, but do as the user requests anyway.  */
8103 	  _bfd_error_handler (_("%pB: warning: selected VFP11 erratum "
8104 	    "workaround is not necessary for target architecture"), obfd);
8105 	}
8106     }
8107   else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
8108     /* For earlier architectures, we might need the workaround, but do not
8109        enable it by default.  If users is running with broken hardware, they
8110        must enable the erratum fix explicitly.  */
8111     globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
8112 }
8113 
8114 void
8115 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
8116 {
8117   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8118   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
8119 
8120   if (globals == NULL)
8121     return;
8122 
8123   /* We assume only Cortex-M4 may require the fix.  */
8124   if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
8125       || out_attr[Tag_CPU_arch_profile].i != 'M')
8126     {
8127       if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
8128 	/* Give a warning, but do as the user requests anyway.  */
8129 	_bfd_error_handler
8130 	  (_("%pB: warning: selected STM32L4XX erratum "
8131 	     "workaround is not necessary for target architecture"), obfd);
8132     }
8133 }
8134 
8135 enum bfd_arm_vfp11_pipe
8136 {
8137   VFP11_FMAC,
8138   VFP11_LS,
8139   VFP11_DS,
8140   VFP11_BAD
8141 };
8142 
8143 /* Return a VFP register number.  This is encoded as RX:X for single-precision
8144    registers, or X:RX for double-precision registers, where RX is the group of
8145    four bits in the instruction encoding and X is the single extension bit.
8146    RX and X fields are specified using their lowest (starting) bit.  The return
8147    value is:
8148 
8149      0...31: single-precision registers s0...s31
8150      32...63: double-precision registers d0...d31.
8151 
8152    Although X should be zero for VFP11 (encoding d0...d15 only), we might
8153    encounter VFP3 instructions, so we allow the full range for DP registers.  */
8154 
8155 static unsigned int
8156 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
8157 		     unsigned int x)
8158 {
8159   if (is_double)
8160     return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
8161   else
8162     return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
8163 }
8164 
8165 /* Set bits in *WMASK according to a register number REG as encoded by
8166    bfd_arm_vfp11_regno().  Ignore d16-d31.  */
8167 
8168 static void
8169 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
8170 {
8171   if (reg < 32)
8172     *wmask |= 1 << reg;
8173   else if (reg < 48)
8174     *wmask |= 3 << ((reg - 32) * 2);
8175 }
8176 
8177 /* Return TRUE if WMASK overwrites anything in REGS.  */
8178 
8179 static bfd_boolean
8180 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
8181 {
8182   int i;
8183 
8184   for (i = 0; i < numregs; i++)
8185     {
8186       unsigned int reg = regs[i];
8187 
8188       if (reg < 32 && (wmask & (1 << reg)) != 0)
8189 	return TRUE;
8190 
8191       reg -= 32;
8192 
8193       if (reg >= 16)
8194 	continue;
8195 
8196       if ((wmask & (3 << (reg * 2))) != 0)
8197 	return TRUE;
8198     }
8199 
8200   return FALSE;
8201 }
8202 
8203 /* In this function, we're interested in two things: finding input registers
8204    for VFP data-processing instructions, and finding the set of registers which
8205    arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
8206    hold the written set, so FLDM etc. are easy to deal with (we're only
8207    interested in 32 SP registers or 16 dp registers, due to the VFP version
8208    implemented by the chip in question).  DP registers are marked by setting
8209    both SP registers in the write mask).  */
8210 
8211 static enum bfd_arm_vfp11_pipe
8212 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
8213 			   int *numregs)
8214 {
8215   enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
8216   bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
8217 
8218   if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
8219     {
8220       unsigned int pqrs;
8221       unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8222       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8223 
8224       pqrs = ((insn & 0x00800000) >> 20)
8225 	   | ((insn & 0x00300000) >> 19)
8226 	   | ((insn & 0x00000040) >> 6);
8227 
8228       switch (pqrs)
8229 	{
8230 	case 0: /* fmac[sd].  */
8231 	case 1: /* fnmac[sd].  */
8232 	case 2: /* fmsc[sd].  */
8233 	case 3: /* fnmsc[sd].  */
8234 	  vpipe = VFP11_FMAC;
8235 	  bfd_arm_vfp11_write_mask (destmask, fd);
8236 	  regs[0] = fd;
8237 	  regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
8238 	  regs[2] = fm;
8239 	  *numregs = 3;
8240 	  break;
8241 
8242 	case 4: /* fmul[sd].  */
8243 	case 5: /* fnmul[sd].  */
8244 	case 6: /* fadd[sd].  */
8245 	case 7: /* fsub[sd].  */
8246 	  vpipe = VFP11_FMAC;
8247 	  goto vfp_binop;
8248 
8249 	case 8: /* fdiv[sd].  */
8250 	  vpipe = VFP11_DS;
8251 	  vfp_binop:
8252 	  bfd_arm_vfp11_write_mask (destmask, fd);
8253 	  regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
8254 	  regs[1] = fm;
8255 	  *numregs = 2;
8256 	  break;
8257 
8258 	case 15: /* extended opcode.  */
8259 	  {
8260 	    unsigned int extn = ((insn >> 15) & 0x1e)
8261 			      | ((insn >> 7) & 1);
8262 
8263 	    switch (extn)
8264 	      {
8265 	      case 0: /* fcpy[sd].  */
8266 	      case 1: /* fabs[sd].  */
8267 	      case 2: /* fneg[sd].  */
8268 	      case 8: /* fcmp[sd].  */
8269 	      case 9: /* fcmpe[sd].  */
8270 	      case 10: /* fcmpz[sd].  */
8271 	      case 11: /* fcmpez[sd].  */
8272 	      case 16: /* fuito[sd].  */
8273 	      case 17: /* fsito[sd].  */
8274 	      case 24: /* ftoui[sd].  */
8275 	      case 25: /* ftouiz[sd].  */
8276 	      case 26: /* ftosi[sd].  */
8277 	      case 27: /* ftosiz[sd].  */
8278 		/* These instructions will not bounce due to underflow.  */
8279 		*numregs = 0;
8280 		vpipe = VFP11_FMAC;
8281 		break;
8282 
8283 	      case 3: /* fsqrt[sd].  */
8284 		/* fsqrt cannot underflow, but it can (perhaps) overwrite
8285 		   registers to cause the erratum in previous instructions.  */
8286 		bfd_arm_vfp11_write_mask (destmask, fd);
8287 		vpipe = VFP11_DS;
8288 		break;
8289 
8290 	      case 15: /* fcvt{ds,sd}.  */
8291 		{
8292 		  int rnum = 0;
8293 
8294 		  bfd_arm_vfp11_write_mask (destmask, fd);
8295 
8296 		  /* Only FCVTSD can underflow.  */
8297 		  if ((insn & 0x100) != 0)
8298 		    regs[rnum++] = fm;
8299 
8300 		  *numregs = rnum;
8301 
8302 		  vpipe = VFP11_FMAC;
8303 		}
8304 		break;
8305 
8306 	      default:
8307 		return VFP11_BAD;
8308 	      }
8309 	  }
8310 	  break;
8311 
8312 	default:
8313 	  return VFP11_BAD;
8314 	}
8315     }
8316   /* Two-register transfer.  */
8317   else if ((insn & 0x0fe00ed0) == 0x0c400a10)
8318     {
8319       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
8320 
8321       if ((insn & 0x100000) == 0)
8322 	{
8323 	  if (is_double)
8324 	    bfd_arm_vfp11_write_mask (destmask, fm);
8325 	  else
8326 	    {
8327 	      bfd_arm_vfp11_write_mask (destmask, fm);
8328 	      bfd_arm_vfp11_write_mask (destmask, fm + 1);
8329 	    }
8330 	}
8331 
8332       vpipe = VFP11_LS;
8333     }
8334   else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
8335     {
8336       int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
8337       unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
8338 
8339       switch (puw)
8340 	{
8341 	case 0: /* Two-reg transfer.  We should catch these above.  */
8342 	  abort ();
8343 
8344 	case 2: /* fldm[sdx].  */
8345 	case 3:
8346 	case 5:
8347 	  {
8348 	    unsigned int i, offset = insn & 0xff;
8349 
8350 	    if (is_double)
8351 	      offset >>= 1;
8352 
8353 	    for (i = fd; i < fd + offset; i++)
8354 	      bfd_arm_vfp11_write_mask (destmask, i);
8355 	  }
8356 	  break;
8357 
8358 	case 4: /* fld[sd].  */
8359 	case 6:
8360 	  bfd_arm_vfp11_write_mask (destmask, fd);
8361 	  break;
8362 
8363 	default:
8364 	  return VFP11_BAD;
8365 	}
8366 
8367       vpipe = VFP11_LS;
8368     }
8369   /* Single-register transfer. Note L==0.  */
8370   else if ((insn & 0x0f100e10) == 0x0e000a10)
8371     {
8372       unsigned int opcode = (insn >> 21) & 7;
8373       unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
8374 
8375       switch (opcode)
8376 	{
8377 	case 0: /* fmsr/fmdlr.  */
8378 	case 1: /* fmdhr.  */
8379 	  /* Mark fmdhr and fmdlr as writing to the whole of the DP
8380 	     destination register.  I don't know if this is exactly right,
8381 	     but it is the conservative choice.  */
8382 	  bfd_arm_vfp11_write_mask (destmask, fn);
8383 	  break;
8384 
8385 	case 7: /* fmxr.  */
8386 	  break;
8387 	}
8388 
8389       vpipe = VFP11_LS;
8390     }
8391 
8392   return vpipe;
8393 }
8394 
8395 
8396 static int elf32_arm_compare_mapping (const void * a, const void * b);
8397 
8398 
8399 /* Look for potentially-troublesome code sequences which might trigger the
8400    VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
8401    (available from ARM) for details of the erratum.  A short version is
8402    described in ld.texinfo.  */
8403 
8404 bfd_boolean
8405 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
8406 {
8407   asection *sec;
8408   bfd_byte *contents = NULL;
8409   int state = 0;
8410   int regs[3], numregs = 0;
8411   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8412   int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
8413 
8414   if (globals == NULL)
8415     return FALSE;
8416 
8417   /* We use a simple FSM to match troublesome VFP11 instruction sequences.
8418      The states transition as follows:
8419 
8420        0 -> 1 (vector) or 0 -> 2 (scalar)
8421 	   A VFP FMAC-pipeline instruction has been seen. Fill
8422 	   regs[0]..regs[numregs-1] with its input operands. Remember this
8423 	   instruction in 'first_fmac'.
8424 
8425        1 -> 2
8426 	   Any instruction, except for a VFP instruction which overwrites
8427 	   regs[*].
8428 
8429        1 -> 3 [ -> 0 ]  or
8430        2 -> 3 [ -> 0 ]
8431 	   A VFP instruction has been seen which overwrites any of regs[*].
8432 	   We must make a veneer!  Reset state to 0 before examining next
8433 	   instruction.
8434 
8435        2 -> 0
8436 	   If we fail to match anything in state 2, reset to state 0 and reset
8437 	   the instruction pointer to the instruction after 'first_fmac'.
8438 
8439      If the VFP11 vector mode is in use, there must be at least two unrelated
8440      instructions between anti-dependent VFP11 instructions to properly avoid
8441      triggering the erratum, hence the use of the extra state 1.  */
8442 
8443   /* If we are only performing a partial link do not bother
8444      to construct any glue.  */
8445   if (bfd_link_relocatable (link_info))
8446     return TRUE;
8447 
8448   /* Skip if this bfd does not correspond to an ELF image.  */
8449   if (! is_arm_elf (abfd))
8450     return TRUE;
8451 
8452   /* We should have chosen a fix type by the time we get here.  */
8453   BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
8454 
8455   if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
8456     return TRUE;
8457 
8458   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8459   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8460     return TRUE;
8461 
8462   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8463     {
8464       unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
8465       struct _arm_elf_section_data *sec_data;
8466 
8467       /* If we don't have executable progbits, we're not interested in this
8468 	 section.  Also skip if section is to be excluded.  */
8469       if (elf_section_type (sec) != SHT_PROGBITS
8470 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8471 	  || (sec->flags & SEC_EXCLUDE) != 0
8472 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8473 	  || sec->output_section == bfd_abs_section_ptr
8474 	  || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
8475 	continue;
8476 
8477       sec_data = elf32_arm_section_data (sec);
8478 
8479       if (sec_data->mapcount == 0)
8480 	continue;
8481 
8482       if (elf_section_data (sec)->this_hdr.contents != NULL)
8483 	contents = elf_section_data (sec)->this_hdr.contents;
8484       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8485 	goto error_return;
8486 
8487       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8488 	     elf32_arm_compare_mapping);
8489 
8490       for (span = 0; span < sec_data->mapcount; span++)
8491 	{
8492 	  unsigned int span_start = sec_data->map[span].vma;
8493 	  unsigned int span_end = (span == sec_data->mapcount - 1)
8494 				  ? sec->size : sec_data->map[span + 1].vma;
8495 	  char span_type = sec_data->map[span].type;
8496 
8497 	  /* FIXME: Only ARM mode is supported at present.  We may need to
8498 	     support Thumb-2 mode also at some point.  */
8499 	  if (span_type != 'a')
8500 	    continue;
8501 
8502 	  for (i = span_start; i < span_end;)
8503 	    {
8504 	      unsigned int next_i = i + 4;
8505 	      unsigned int insn = bfd_big_endian (abfd)
8506 		? (((unsigned) contents[i] << 24)
8507 		   | (contents[i + 1] << 16)
8508 		   | (contents[i + 2] << 8)
8509 		   | contents[i + 3])
8510 		: (((unsigned) contents[i + 3] << 24)
8511 		   | (contents[i + 2] << 16)
8512 		   | (contents[i + 1] << 8)
8513 		   | contents[i]);
8514 	      unsigned int writemask = 0;
8515 	      enum bfd_arm_vfp11_pipe vpipe;
8516 
8517 	      switch (state)
8518 		{
8519 		case 0:
8520 		  vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
8521 						    &numregs);
8522 		  /* I'm assuming the VFP11 erratum can trigger with denorm
8523 		     operands on either the FMAC or the DS pipeline. This might
8524 		     lead to slightly overenthusiastic veneer insertion.  */
8525 		  if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
8526 		    {
8527 		      state = use_vector ? 1 : 2;
8528 		      first_fmac = i;
8529 		      veneer_of_insn = insn;
8530 		    }
8531 		  break;
8532 
8533 		case 1:
8534 		  {
8535 		    int other_regs[3], other_numregs;
8536 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8537 						      other_regs,
8538 						      &other_numregs);
8539 		    if (vpipe != VFP11_BAD
8540 			&& bfd_arm_vfp11_antidependency (writemask, regs,
8541 							 numregs))
8542 		      state = 3;
8543 		    else
8544 		      state = 2;
8545 		  }
8546 		  break;
8547 
8548 		case 2:
8549 		  {
8550 		    int other_regs[3], other_numregs;
8551 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
8552 						      other_regs,
8553 						      &other_numregs);
8554 		    if (vpipe != VFP11_BAD
8555 			&& bfd_arm_vfp11_antidependency (writemask, regs,
8556 							 numregs))
8557 		      state = 3;
8558 		    else
8559 		      {
8560 			state = 0;
8561 			next_i = first_fmac + 4;
8562 		      }
8563 		  }
8564 		  break;
8565 
8566 		case 3:
8567 		  abort ();  /* Should be unreachable.  */
8568 		}
8569 
8570 	      if (state == 3)
8571 		{
8572 		  elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
8573 		      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
8574 
8575 		  elf32_arm_section_data (sec)->erratumcount += 1;
8576 
8577 		  newerr->u.b.vfp_insn = veneer_of_insn;
8578 
8579 		  switch (span_type)
8580 		    {
8581 		    case 'a':
8582 		      newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
8583 		      break;
8584 
8585 		    default:
8586 		      abort ();
8587 		    }
8588 
8589 		  record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
8590 					       first_fmac);
8591 
8592 		  newerr->vma = -1;
8593 
8594 		  newerr->next = sec_data->erratumlist;
8595 		  sec_data->erratumlist = newerr;
8596 
8597 		  state = 0;
8598 		}
8599 
8600 	      i = next_i;
8601 	    }
8602 	}
8603 
8604       if (contents != NULL
8605 	  && elf_section_data (sec)->this_hdr.contents != contents)
8606 	free (contents);
8607       contents = NULL;
8608     }
8609 
8610   return TRUE;
8611 
8612 error_return:
8613   if (contents != NULL
8614       && elf_section_data (sec)->this_hdr.contents != contents)
8615     free (contents);
8616 
8617   return FALSE;
8618 }
8619 
8620 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
8621    after sections have been laid out, using specially-named symbols.  */
8622 
8623 void
8624 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
8625 					  struct bfd_link_info *link_info)
8626 {
8627   asection *sec;
8628   struct elf32_arm_link_hash_table *globals;
8629   char *tmp_name;
8630 
8631   if (bfd_link_relocatable (link_info))
8632     return;
8633 
8634   /* Skip if this bfd does not correspond to an ELF image.  */
8635   if (! is_arm_elf (abfd))
8636     return;
8637 
8638   globals = elf32_arm_hash_table (link_info);
8639   if (globals == NULL)
8640     return;
8641 
8642   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8643 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
8644   BFD_ASSERT (tmp_name);
8645 
8646   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8647     {
8648       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8649       elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
8650 
8651       for (; errnode != NULL; errnode = errnode->next)
8652 	{
8653 	  struct elf_link_hash_entry *myh;
8654 	  bfd_vma vma;
8655 
8656 	  switch (errnode->type)
8657 	    {
8658 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
8659 	    case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
8660 	      /* Find veneer symbol.  */
8661 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
8662 		       errnode->u.b.veneer->u.v.id);
8663 
8664 	      myh = elf_link_hash_lookup
8665 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8666 
8667 	      if (myh == NULL)
8668 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8669 				    abfd, "VFP11", tmp_name);
8670 
8671 	      vma = myh->root.u.def.section->output_section->vma
8672 		    + myh->root.u.def.section->output_offset
8673 		    + myh->root.u.def.value;
8674 
8675 	      errnode->u.b.veneer->vma = vma;
8676 	      break;
8677 
8678 	    case VFP11_ERRATUM_ARM_VENEER:
8679 	    case VFP11_ERRATUM_THUMB_VENEER:
8680 	      /* Find return location.  */
8681 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
8682 		       errnode->u.v.id);
8683 
8684 	      myh = elf_link_hash_lookup
8685 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8686 
8687 	      if (myh == NULL)
8688 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8689 				    abfd, "VFP11", tmp_name);
8690 
8691 	      vma = myh->root.u.def.section->output_section->vma
8692 		    + myh->root.u.def.section->output_offset
8693 		    + myh->root.u.def.value;
8694 
8695 	      errnode->u.v.branch->vma = vma;
8696 	      break;
8697 
8698 	    default:
8699 	      abort ();
8700 	    }
8701 	}
8702     }
8703 
8704   free (tmp_name);
8705 }
8706 
8707 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
8708    return locations after sections have been laid out, using
8709    specially-named symbols.  */
8710 
8711 void
8712 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
8713 					      struct bfd_link_info *link_info)
8714 {
8715   asection *sec;
8716   struct elf32_arm_link_hash_table *globals;
8717   char *tmp_name;
8718 
8719   if (bfd_link_relocatable (link_info))
8720     return;
8721 
8722   /* Skip if this bfd does not correspond to an ELF image.  */
8723   if (! is_arm_elf (abfd))
8724     return;
8725 
8726   globals = elf32_arm_hash_table (link_info);
8727   if (globals == NULL)
8728     return;
8729 
8730   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
8731 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
8732   BFD_ASSERT (tmp_name);
8733 
8734   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8735     {
8736       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
8737       elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
8738 
8739       for (; errnode != NULL; errnode = errnode->next)
8740 	{
8741 	  struct elf_link_hash_entry *myh;
8742 	  bfd_vma vma;
8743 
8744 	  switch (errnode->type)
8745 	    {
8746 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
8747 	      /* Find veneer symbol.  */
8748 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
8749 		       errnode->u.b.veneer->u.v.id);
8750 
8751 	      myh = elf_link_hash_lookup
8752 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8753 
8754 	      if (myh == NULL)
8755 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8756 				    abfd, "STM32L4XX", tmp_name);
8757 
8758 	      vma = myh->root.u.def.section->output_section->vma
8759 		+ myh->root.u.def.section->output_offset
8760 		+ myh->root.u.def.value;
8761 
8762 	      errnode->u.b.veneer->vma = vma;
8763 	      break;
8764 
8765 	    case STM32L4XX_ERRATUM_VENEER:
8766 	      /* Find return location.  */
8767 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
8768 		       errnode->u.v.id);
8769 
8770 	      myh = elf_link_hash_lookup
8771 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
8772 
8773 	      if (myh == NULL)
8774 		_bfd_error_handler (_("%pB: unable to find %s veneer `%s'"),
8775 				    abfd, "STM32L4XX", tmp_name);
8776 
8777 	      vma = myh->root.u.def.section->output_section->vma
8778 		+ myh->root.u.def.section->output_offset
8779 		+ myh->root.u.def.value;
8780 
8781 	      errnode->u.v.branch->vma = vma;
8782 	      break;
8783 
8784 	    default:
8785 	      abort ();
8786 	    }
8787 	}
8788     }
8789 
8790   free (tmp_name);
8791 }
8792 
8793 static inline bfd_boolean
8794 is_thumb2_ldmia (const insn32 insn)
8795 {
8796   /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
8797      1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
8798   return (insn & 0xffd02000) == 0xe8900000;
8799 }
8800 
8801 static inline bfd_boolean
8802 is_thumb2_ldmdb (const insn32 insn)
8803 {
8804   /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
8805      1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
8806   return (insn & 0xffd02000) == 0xe9100000;
8807 }
8808 
8809 static inline bfd_boolean
8810 is_thumb2_vldm (const insn32 insn)
8811 {
8812   /* A6.5 Extension register load or store instruction
8813      A7.7.229
8814      We look for SP 32-bit and DP 64-bit registers.
8815      Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
8816      <list> is consecutive 64-bit registers
8817      1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
8818      Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
8819      <list> is consecutive 32-bit registers
8820      1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
8821      if P==0 && U==1 && W==1 && Rn=1101 VPOP
8822      if PUW=010 || PUW=011 || PUW=101 VLDM.  */
8823   return
8824     (((insn & 0xfe100f00) == 0xec100b00) ||
8825      ((insn & 0xfe100f00) == 0xec100a00))
8826     && /* (IA without !).  */
8827     (((((insn << 7) >> 28) & 0xd) == 0x4)
8828      /* (IA with !), includes VPOP (when reg number is SP).  */
8829      || ((((insn << 7) >> 28) & 0xd) == 0x5)
8830      /* (DB with !).  */
8831      || ((((insn << 7) >> 28) & 0xd) == 0x9));
8832 }
8833 
8834 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
8835    VLDM opcode and:
8836  - computes the number and the mode of memory accesses
8837  - decides if the replacement should be done:
8838    . replaces only if > 8-word accesses
8839    . or (testing purposes only) replaces all accesses.  */
8840 
8841 static bfd_boolean
8842 stm32l4xx_need_create_replacing_stub (const insn32 insn,
8843 				      bfd_arm_stm32l4xx_fix stm32l4xx_fix)
8844 {
8845   int nb_words = 0;
8846 
8847   /* The field encoding the register list is the same for both LDMIA
8848      and LDMDB encodings.  */
8849   if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
8850     nb_words = elf32_arm_popcount (insn & 0x0000ffff);
8851   else if (is_thumb2_vldm (insn))
8852    nb_words = (insn & 0xff);
8853 
8854   /* DEFAULT mode accounts for the real bug condition situation,
8855      ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
8856   return
8857     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
8858     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
8859 }
8860 
8861 /* Look for potentially-troublesome code sequences which might trigger
8862    the STM STM32L4XX erratum.  */
8863 
8864 bfd_boolean
8865 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
8866 				      struct bfd_link_info *link_info)
8867 {
8868   asection *sec;
8869   bfd_byte *contents = NULL;
8870   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
8871 
8872   if (globals == NULL)
8873     return FALSE;
8874 
8875   /* If we are only performing a partial link do not bother
8876      to construct any glue.  */
8877   if (bfd_link_relocatable (link_info))
8878     return TRUE;
8879 
8880   /* Skip if this bfd does not correspond to an ELF image.  */
8881   if (! is_arm_elf (abfd))
8882     return TRUE;
8883 
8884   if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
8885     return TRUE;
8886 
8887   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
8888   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
8889     return TRUE;
8890 
8891   for (sec = abfd->sections; sec != NULL; sec = sec->next)
8892     {
8893       unsigned int i, span;
8894       struct _arm_elf_section_data *sec_data;
8895 
8896       /* If we don't have executable progbits, we're not interested in this
8897 	 section.  Also skip if section is to be excluded.  */
8898       if (elf_section_type (sec) != SHT_PROGBITS
8899 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
8900 	  || (sec->flags & SEC_EXCLUDE) != 0
8901 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
8902 	  || sec->output_section == bfd_abs_section_ptr
8903 	  || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
8904 	continue;
8905 
8906       sec_data = elf32_arm_section_data (sec);
8907 
8908       if (sec_data->mapcount == 0)
8909 	continue;
8910 
8911       if (elf_section_data (sec)->this_hdr.contents != NULL)
8912 	contents = elf_section_data (sec)->this_hdr.contents;
8913       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
8914 	goto error_return;
8915 
8916       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
8917 	     elf32_arm_compare_mapping);
8918 
8919       for (span = 0; span < sec_data->mapcount; span++)
8920 	{
8921 	  unsigned int span_start = sec_data->map[span].vma;
8922 	  unsigned int span_end = (span == sec_data->mapcount - 1)
8923 	    ? sec->size : sec_data->map[span + 1].vma;
8924 	  char span_type = sec_data->map[span].type;
8925 	  int itblock_current_pos = 0;
8926 
8927 	  /* Only Thumb2 mode need be supported with this CM4 specific
8928 	     code, we should not encounter any arm mode eg span_type
8929 	     != 'a'.  */
8930 	  if (span_type != 't')
8931 	    continue;
8932 
8933 	  for (i = span_start; i < span_end;)
8934 	    {
8935 	      unsigned int insn = bfd_get_16 (abfd, &contents[i]);
8936 	      bfd_boolean insn_32bit = FALSE;
8937 	      bfd_boolean is_ldm = FALSE;
8938 	      bfd_boolean is_vldm = FALSE;
8939 	      bfd_boolean is_not_last_in_it_block = FALSE;
8940 
8941 	      /* The first 16-bits of all 32-bit thumb2 instructions start
8942 		 with opcode[15..13]=0b111 and the encoded op1 can be anything
8943 		 except opcode[12..11]!=0b00.
8944 		 See 32-bit Thumb instruction encoding.  */
8945 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
8946 		insn_32bit = TRUE;
8947 
8948 	      /* Compute the predicate that tells if the instruction
8949 		 is concerned by the IT block
8950 		 - Creates an error if there is a ldm that is not
8951 		   last in the IT block thus cannot be replaced
8952 		 - Otherwise we can create a branch at the end of the
8953 		   IT block, it will be controlled naturally by IT
8954 		   with the proper pseudo-predicate
8955 		 - So the only interesting predicate is the one that
8956 		   tells that we are not on the last item of an IT
8957 		   block.  */
8958 	      if (itblock_current_pos != 0)
8959 		  is_not_last_in_it_block = !!--itblock_current_pos;
8960 
8961 	      if (insn_32bit)
8962 		{
8963 		  /* Load the rest of the insn (in manual-friendly order).  */
8964 		  insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
8965 		  is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
8966 		  is_vldm = is_thumb2_vldm (insn);
8967 
8968 		  /* Veneers are created for (v)ldm depending on
8969 		     option flags and memory accesses conditions; but
8970 		     if the instruction is not the last instruction of
8971 		     an IT block, we cannot create a jump there, so we
8972 		     bail out.  */
8973 		    if ((is_ldm || is_vldm)
8974 			&& stm32l4xx_need_create_replacing_stub
8975 			(insn, globals->stm32l4xx_fix))
8976 		      {
8977 			if (is_not_last_in_it_block)
8978 			  {
8979 			    _bfd_error_handler
8980 			      /* xgettext:c-format */
8981 			      (_("%pB(%pA+%#x): error: multiple load detected"
8982 				 " in non-last IT block instruction:"
8983 				 " STM32L4XX veneer cannot be generated; "
8984 				 "use gcc option -mrestrict-it to generate"
8985 				 " only one instruction per IT block"),
8986 			       abfd, sec, i);
8987 			  }
8988 			else
8989 			  {
8990 			    elf32_stm32l4xx_erratum_list *newerr =
8991 			      (elf32_stm32l4xx_erratum_list *)
8992 			      bfd_zmalloc
8993 			      (sizeof (elf32_stm32l4xx_erratum_list));
8994 
8995 			    elf32_arm_section_data (sec)
8996 			      ->stm32l4xx_erratumcount += 1;
8997 			    newerr->u.b.insn = insn;
8998 			    /* We create only thumb branches.  */
8999 			    newerr->type =
9000 			      STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
9001 			    record_stm32l4xx_erratum_veneer
9002 			      (link_info, newerr, abfd, sec,
9003 			       i,
9004 			       is_ldm ?
9005 			       STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
9006 			       STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
9007 			    newerr->vma = -1;
9008 			    newerr->next = sec_data->stm32l4xx_erratumlist;
9009 			    sec_data->stm32l4xx_erratumlist = newerr;
9010 			  }
9011 		      }
9012 		}
9013 	      else
9014 		{
9015 		  /* A7.7.37 IT p208
9016 		     IT blocks are only encoded in T1
9017 		     Encoding T1: IT{x{y{z}}} <firstcond>
9018 		     1 0 1 1 - 1 1 1 1 - firstcond - mask
9019 		     if mask = '0000' then see 'related encodings'
9020 		     We don't deal with UNPREDICTABLE, just ignore these.
9021 		     There can be no nested IT blocks so an IT block
9022 		     is naturally a new one for which it is worth
9023 		     computing its size.  */
9024 		  bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00)
9025 		    && ((insn & 0x000f) != 0x0000);
9026 		  /* If we have a new IT block we compute its size.  */
9027 		  if (is_newitblock)
9028 		    {
9029 		      /* Compute the number of instructions controlled
9030 			 by the IT block, it will be used to decide
9031 			 whether we are inside an IT block or not.  */
9032 		      unsigned int mask = insn & 0x000f;
9033 		      itblock_current_pos = 4 - ctz (mask);
9034 		    }
9035 		}
9036 
9037 	      i += insn_32bit ? 4 : 2;
9038 	    }
9039 	}
9040 
9041       if (contents != NULL
9042 	  && elf_section_data (sec)->this_hdr.contents != contents)
9043 	free (contents);
9044       contents = NULL;
9045     }
9046 
9047   return TRUE;
9048 
9049 error_return:
9050   if (contents != NULL
9051       && elf_section_data (sec)->this_hdr.contents != contents)
9052     free (contents);
9053 
9054   return FALSE;
9055 }
9056 
9057 /* Set target relocation values needed during linking.  */
9058 
9059 void
9060 bfd_elf32_arm_set_target_params (struct bfd *output_bfd,
9061 				 struct bfd_link_info *link_info,
9062 				 struct elf32_arm_params *params)
9063 {
9064   struct elf32_arm_link_hash_table *globals;
9065 
9066   globals = elf32_arm_hash_table (link_info);
9067   if (globals == NULL)
9068     return;
9069 
9070   globals->target1_is_rel = params->target1_is_rel;
9071   if (globals->fdpic_p)
9072     globals->target2_reloc = R_ARM_GOT32;
9073   else if (strcmp (params->target2_type, "rel") == 0)
9074     globals->target2_reloc = R_ARM_REL32;
9075   else if (strcmp (params->target2_type, "abs") == 0)
9076     globals->target2_reloc = R_ARM_ABS32;
9077   else if (strcmp (params->target2_type, "got-rel") == 0)
9078     globals->target2_reloc = R_ARM_GOT_PREL;
9079   else
9080     {
9081       _bfd_error_handler (_("invalid TARGET2 relocation type '%s'"),
9082 			  params->target2_type);
9083     }
9084   globals->fix_v4bx = params->fix_v4bx;
9085   globals->use_blx |= params->use_blx;
9086   globals->vfp11_fix = params->vfp11_denorm_fix;
9087   globals->stm32l4xx_fix = params->stm32l4xx_fix;
9088   if (globals->fdpic_p)
9089     globals->pic_veneer = 1;
9090   else
9091     globals->pic_veneer = params->pic_veneer;
9092   globals->fix_cortex_a8 = params->fix_cortex_a8;
9093   globals->fix_arm1176 = params->fix_arm1176;
9094   globals->cmse_implib = params->cmse_implib;
9095   globals->in_implib_bfd = params->in_implib_bfd;
9096 
9097   BFD_ASSERT (is_arm_elf (output_bfd));
9098   elf_arm_tdata (output_bfd)->no_enum_size_warning
9099     = params->no_enum_size_warning;
9100   elf_arm_tdata (output_bfd)->no_wchar_size_warning
9101     = params->no_wchar_size_warning;
9102 }
9103 
9104 /* Replace the target offset of a Thumb bl or b.w instruction.  */
9105 
9106 static void
9107 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
9108 {
9109   bfd_vma upper;
9110   bfd_vma lower;
9111   int reloc_sign;
9112 
9113   BFD_ASSERT ((offset & 1) == 0);
9114 
9115   upper = bfd_get_16 (abfd, insn);
9116   lower = bfd_get_16 (abfd, insn + 2);
9117   reloc_sign = (offset < 0) ? 1 : 0;
9118   upper = (upper & ~(bfd_vma) 0x7ff)
9119 	  | ((offset >> 12) & 0x3ff)
9120 	  | (reloc_sign << 10);
9121   lower = (lower & ~(bfd_vma) 0x2fff)
9122 	  | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
9123 	  | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
9124 	  | ((offset >> 1) & 0x7ff);
9125   bfd_put_16 (abfd, upper, insn);
9126   bfd_put_16 (abfd, lower, insn + 2);
9127 }
9128 
9129 /* Thumb code calling an ARM function.  */
9130 
9131 static int
9132 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
9133 			 const char *		name,
9134 			 bfd *			input_bfd,
9135 			 bfd *			output_bfd,
9136 			 asection *		input_section,
9137 			 bfd_byte *		hit_data,
9138 			 asection *		sym_sec,
9139 			 bfd_vma		offset,
9140 			 bfd_signed_vma		addend,
9141 			 bfd_vma		val,
9142 			 char **error_message)
9143 {
9144   asection * s = 0;
9145   bfd_vma my_offset;
9146   long int ret_offset;
9147   struct elf_link_hash_entry * myh;
9148   struct elf32_arm_link_hash_table * globals;
9149 
9150   myh = find_thumb_glue (info, name, error_message);
9151   if (myh == NULL)
9152     return FALSE;
9153 
9154   globals = elf32_arm_hash_table (info);
9155   BFD_ASSERT (globals != NULL);
9156   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9157 
9158   my_offset = myh->root.u.def.value;
9159 
9160   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9161 			      THUMB2ARM_GLUE_SECTION_NAME);
9162 
9163   BFD_ASSERT (s != NULL);
9164   BFD_ASSERT (s->contents != NULL);
9165   BFD_ASSERT (s->output_section != NULL);
9166 
9167   if ((my_offset & 0x01) == 0x01)
9168     {
9169       if (sym_sec != NULL
9170 	  && sym_sec->owner != NULL
9171 	  && !INTERWORK_FLAG (sym_sec->owner))
9172 	{
9173 	  _bfd_error_handler
9174 	    (_("%pB(%s): warning: interworking not enabled;"
9175 	       " first occurrence: %pB: %s call to %s"),
9176 	     sym_sec->owner, name, input_bfd, "Thumb", "ARM");
9177 
9178 	  return FALSE;
9179 	}
9180 
9181       --my_offset;
9182       myh->root.u.def.value = my_offset;
9183 
9184       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
9185 		      s->contents + my_offset);
9186 
9187       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
9188 		      s->contents + my_offset + 2);
9189 
9190       ret_offset =
9191 	/* Address of destination of the stub.  */
9192 	((bfd_signed_vma) val)
9193 	- ((bfd_signed_vma)
9194 	   /* Offset from the start of the current section
9195 	      to the start of the stubs.  */
9196 	   (s->output_offset
9197 	    /* Offset of the start of this stub from the start of the stubs.  */
9198 	    + my_offset
9199 	    /* Address of the start of the current section.  */
9200 	    + s->output_section->vma)
9201 	   /* The branch instruction is 4 bytes into the stub.  */
9202 	   + 4
9203 	   /* ARM branches work from the pc of the instruction + 8.  */
9204 	   + 8);
9205 
9206       put_arm_insn (globals, output_bfd,
9207 		    (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
9208 		    s->contents + my_offset + 4);
9209     }
9210 
9211   BFD_ASSERT (my_offset <= globals->thumb_glue_size);
9212 
9213   /* Now go back and fix up the original BL insn to point to here.  */
9214   ret_offset =
9215     /* Address of where the stub is located.  */
9216     (s->output_section->vma + s->output_offset + my_offset)
9217      /* Address of where the BL is located.  */
9218     - (input_section->output_section->vma + input_section->output_offset
9219        + offset)
9220     /* Addend in the relocation.  */
9221     - addend
9222     /* Biassing for PC-relative addressing.  */
9223     - 8;
9224 
9225   insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
9226 
9227   return TRUE;
9228 }
9229 
9230 /* Populate an Arm to Thumb stub.  Returns the stub symbol.  */
9231 
9232 static struct elf_link_hash_entry *
9233 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
9234 			     const char *	    name,
9235 			     bfd *		    input_bfd,
9236 			     bfd *		    output_bfd,
9237 			     asection *		    sym_sec,
9238 			     bfd_vma		    val,
9239 			     asection *		    s,
9240 			     char **		    error_message)
9241 {
9242   bfd_vma my_offset;
9243   long int ret_offset;
9244   struct elf_link_hash_entry * myh;
9245   struct elf32_arm_link_hash_table * globals;
9246 
9247   myh = find_arm_glue (info, name, error_message);
9248   if (myh == NULL)
9249     return NULL;
9250 
9251   globals = elf32_arm_hash_table (info);
9252   BFD_ASSERT (globals != NULL);
9253   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9254 
9255   my_offset = myh->root.u.def.value;
9256 
9257   if ((my_offset & 0x01) == 0x01)
9258     {
9259       if (sym_sec != NULL
9260 	  && sym_sec->owner != NULL
9261 	  && !INTERWORK_FLAG (sym_sec->owner))
9262 	{
9263 	  _bfd_error_handler
9264 	    (_("%pB(%s): warning: interworking not enabled;"
9265 	       " first occurrence: %pB: %s call to %s"),
9266 	     sym_sec->owner, name, input_bfd, "ARM", "Thumb");
9267 	}
9268 
9269       --my_offset;
9270       myh->root.u.def.value = my_offset;
9271 
9272       if (bfd_link_pic (info)
9273 	  || globals->root.is_relocatable_executable
9274 	  || globals->pic_veneer)
9275 	{
9276 	  /* For relocatable objects we can't use absolute addresses,
9277 	     so construct the address from a relative offset.  */
9278 	  /* TODO: If the offset is small it's probably worth
9279 	     constructing the address with adds.  */
9280 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
9281 			s->contents + my_offset);
9282 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
9283 			s->contents + my_offset + 4);
9284 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
9285 			s->contents + my_offset + 8);
9286 	  /* Adjust the offset by 4 for the position of the add,
9287 	     and 8 for the pipeline offset.  */
9288 	  ret_offset = (val - (s->output_offset
9289 			       + s->output_section->vma
9290 			       + my_offset + 12))
9291 		       | 1;
9292 	  bfd_put_32 (output_bfd, ret_offset,
9293 		      s->contents + my_offset + 12);
9294 	}
9295       else if (globals->use_blx)
9296 	{
9297 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
9298 			s->contents + my_offset);
9299 
9300 	  /* It's a thumb address.  Add the low order bit.  */
9301 	  bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
9302 		      s->contents + my_offset + 4);
9303 	}
9304       else
9305 	{
9306 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
9307 			s->contents + my_offset);
9308 
9309 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
9310 			s->contents + my_offset + 4);
9311 
9312 	  /* It's a thumb address.  Add the low order bit.  */
9313 	  bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
9314 		      s->contents + my_offset + 8);
9315 
9316 	  my_offset += 12;
9317 	}
9318     }
9319 
9320   BFD_ASSERT (my_offset <= globals->arm_glue_size);
9321 
9322   return myh;
9323 }
9324 
9325 /* Arm code calling a Thumb function.  */
9326 
9327 static int
9328 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
9329 			 const char *		name,
9330 			 bfd *			input_bfd,
9331 			 bfd *			output_bfd,
9332 			 asection *		input_section,
9333 			 bfd_byte *		hit_data,
9334 			 asection *		sym_sec,
9335 			 bfd_vma		offset,
9336 			 bfd_signed_vma		addend,
9337 			 bfd_vma		val,
9338 			 char **error_message)
9339 {
9340   unsigned long int tmp;
9341   bfd_vma my_offset;
9342   asection * s;
9343   long int ret_offset;
9344   struct elf_link_hash_entry * myh;
9345   struct elf32_arm_link_hash_table * globals;
9346 
9347   globals = elf32_arm_hash_table (info);
9348   BFD_ASSERT (globals != NULL);
9349   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9350 
9351   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9352 			      ARM2THUMB_GLUE_SECTION_NAME);
9353   BFD_ASSERT (s != NULL);
9354   BFD_ASSERT (s->contents != NULL);
9355   BFD_ASSERT (s->output_section != NULL);
9356 
9357   myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
9358 				     sym_sec, val, s, error_message);
9359   if (!myh)
9360     return FALSE;
9361 
9362   my_offset = myh->root.u.def.value;
9363   tmp = bfd_get_32 (input_bfd, hit_data);
9364   tmp = tmp & 0xFF000000;
9365 
9366   /* Somehow these are both 4 too far, so subtract 8.  */
9367   ret_offset = (s->output_offset
9368 		+ my_offset
9369 		+ s->output_section->vma
9370 		- (input_section->output_offset
9371 		   + input_section->output_section->vma
9372 		   + offset + addend)
9373 		- 8);
9374 
9375   tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
9376 
9377   bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
9378 
9379   return TRUE;
9380 }
9381 
9382 /* Populate Arm stub for an exported Thumb function.  */
9383 
9384 static bfd_boolean
9385 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
9386 {
9387   struct bfd_link_info * info = (struct bfd_link_info *) inf;
9388   asection * s;
9389   struct elf_link_hash_entry * myh;
9390   struct elf32_arm_link_hash_entry *eh;
9391   struct elf32_arm_link_hash_table * globals;
9392   asection *sec;
9393   bfd_vma val;
9394   char *error_message;
9395 
9396   eh = elf32_arm_hash_entry (h);
9397   /* Allocate stubs for exported Thumb functions on v4t.  */
9398   if (eh->export_glue == NULL)
9399     return TRUE;
9400 
9401   globals = elf32_arm_hash_table (info);
9402   BFD_ASSERT (globals != NULL);
9403   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9404 
9405   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9406 			      ARM2THUMB_GLUE_SECTION_NAME);
9407   BFD_ASSERT (s != NULL);
9408   BFD_ASSERT (s->contents != NULL);
9409   BFD_ASSERT (s->output_section != NULL);
9410 
9411   sec = eh->export_glue->root.u.def.section;
9412 
9413   BFD_ASSERT (sec->output_section != NULL);
9414 
9415   val = eh->export_glue->root.u.def.value + sec->output_offset
9416 	+ sec->output_section->vma;
9417 
9418   myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
9419 				     h->root.u.def.section->owner,
9420 				     globals->obfd, sec, val, s,
9421 				     &error_message);
9422   BFD_ASSERT (myh);
9423   return TRUE;
9424 }
9425 
9426 /* Populate ARMv4 BX veneers.  Returns the absolute adress of the veneer.  */
9427 
9428 static bfd_vma
9429 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
9430 {
9431   bfd_byte *p;
9432   bfd_vma glue_addr;
9433   asection *s;
9434   struct elf32_arm_link_hash_table *globals;
9435 
9436   globals = elf32_arm_hash_table (info);
9437   BFD_ASSERT (globals != NULL);
9438   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
9439 
9440   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
9441 			      ARM_BX_GLUE_SECTION_NAME);
9442   BFD_ASSERT (s != NULL);
9443   BFD_ASSERT (s->contents != NULL);
9444   BFD_ASSERT (s->output_section != NULL);
9445 
9446   BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
9447 
9448   glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
9449 
9450   if ((globals->bx_glue_offset[reg] & 1) == 0)
9451     {
9452       p = s->contents + glue_addr;
9453       bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
9454       bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
9455       bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
9456       globals->bx_glue_offset[reg] |= 1;
9457     }
9458 
9459   return glue_addr + s->output_section->vma + s->output_offset;
9460 }
9461 
9462 /* Generate Arm stubs for exported Thumb symbols.  */
9463 static void
9464 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
9465 				  struct bfd_link_info *link_info)
9466 {
9467   struct elf32_arm_link_hash_table * globals;
9468 
9469   if (link_info == NULL)
9470     /* Ignore this if we are not called by the ELF backend linker.  */
9471     return;
9472 
9473   globals = elf32_arm_hash_table (link_info);
9474   if (globals == NULL)
9475     return;
9476 
9477   /* If blx is available then exported Thumb symbols are OK and there is
9478      nothing to do.  */
9479   if (globals->use_blx)
9480     return;
9481 
9482   elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
9483 			  link_info);
9484 }
9485 
9486 /* Reserve space for COUNT dynamic relocations in relocation selection
9487    SRELOC.  */
9488 
9489 static void
9490 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
9491 			      bfd_size_type count)
9492 {
9493   struct elf32_arm_link_hash_table *htab;
9494 
9495   htab = elf32_arm_hash_table (info);
9496   BFD_ASSERT (htab->root.dynamic_sections_created);
9497   if (sreloc == NULL)
9498     abort ();
9499   sreloc->size += RELOC_SIZE (htab) * count;
9500 }
9501 
9502 /* Reserve space for COUNT R_ARM_IRELATIVE relocations.  If the link is
9503    dynamic, the relocations should go in SRELOC, otherwise they should
9504    go in the special .rel.iplt section.  */
9505 
9506 static void
9507 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
9508 			    bfd_size_type count)
9509 {
9510   struct elf32_arm_link_hash_table *htab;
9511 
9512   htab = elf32_arm_hash_table (info);
9513   if (!htab->root.dynamic_sections_created)
9514     htab->root.irelplt->size += RELOC_SIZE (htab) * count;
9515   else
9516     {
9517       BFD_ASSERT (sreloc != NULL);
9518       sreloc->size += RELOC_SIZE (htab) * count;
9519     }
9520 }
9521 
9522 /* Add relocation REL to the end of relocation section SRELOC.  */
9523 
9524 static void
9525 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
9526 			asection *sreloc, Elf_Internal_Rela *rel)
9527 {
9528   bfd_byte *loc;
9529   struct elf32_arm_link_hash_table *htab;
9530 
9531   htab = elf32_arm_hash_table (info);
9532   if (!htab->root.dynamic_sections_created
9533       && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
9534     sreloc = htab->root.irelplt;
9535   if (sreloc == NULL)
9536     abort ();
9537   loc = sreloc->contents;
9538   loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
9539   if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
9540     abort ();
9541   SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
9542 }
9543 
9544 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
9545    IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
9546    to .plt.  */
9547 
9548 static void
9549 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
9550 			      bfd_boolean is_iplt_entry,
9551 			      union gotplt_union *root_plt,
9552 			      struct arm_plt_info *arm_plt)
9553 {
9554   struct elf32_arm_link_hash_table *htab;
9555   asection *splt;
9556   asection *sgotplt;
9557 
9558   htab = elf32_arm_hash_table (info);
9559 
9560   if (is_iplt_entry)
9561     {
9562       splt = htab->root.iplt;
9563       sgotplt = htab->root.igotplt;
9564 
9565       /* NaCl uses a special first entry in .iplt too.  */
9566       if (htab->nacl_p && splt->size == 0)
9567 	splt->size += htab->plt_header_size;
9568 
9569       /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt.  */
9570       elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
9571     }
9572   else
9573     {
9574       splt = htab->root.splt;
9575       sgotplt = htab->root.sgotplt;
9576 
9577     if (htab->fdpic_p)
9578       {
9579 	/* Allocate room for R_ARM_FUNCDESC_VALUE.  */
9580 	/* For lazy binding, relocations will be put into .rel.plt, in
9581 	   .rel.got otherwise.  */
9582 	/* FIXME: today we don't support lazy binding so put it in .rel.got */
9583 	if (info->flags & DF_BIND_NOW)
9584 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
9585 	else
9586 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9587       }
9588     else
9589       {
9590 	/* Allocate room for an R_JUMP_SLOT relocation in .rel.plt.  */
9591 	elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
9592       }
9593 
9594       /* If this is the first .plt entry, make room for the special
9595 	 first entry.  */
9596       if (splt->size == 0)
9597 	splt->size += htab->plt_header_size;
9598 
9599       htab->next_tls_desc_index++;
9600     }
9601 
9602   /* Allocate the PLT entry itself, including any leading Thumb stub.  */
9603   if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9604     splt->size += PLT_THUMB_STUB_SIZE;
9605   root_plt->offset = splt->size;
9606   splt->size += htab->plt_entry_size;
9607 
9608   if (!htab->symbian_p)
9609     {
9610       /* We also need to make an entry in the .got.plt section, which
9611 	 will be placed in the .got section by the linker script.  */
9612       if (is_iplt_entry)
9613 	arm_plt->got_offset = sgotplt->size;
9614       else
9615 	arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
9616       if (htab->fdpic_p)
9617 	/* Function descriptor takes 64 bits in GOT.  */
9618 	sgotplt->size += 8;
9619       else
9620 	sgotplt->size += 4;
9621     }
9622 }
9623 
9624 static bfd_vma
9625 arm_movw_immediate (bfd_vma value)
9626 {
9627   return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
9628 }
9629 
9630 static bfd_vma
9631 arm_movt_immediate (bfd_vma value)
9632 {
9633   return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
9634 }
9635 
9636 /* Fill in a PLT entry and its associated GOT slot.  If DYNINDX == -1,
9637    the entry lives in .iplt and resolves to (*SYM_VALUE)().
9638    Otherwise, DYNINDX is the index of the symbol in the dynamic
9639    symbol table and SYM_VALUE is undefined.
9640 
9641    ROOT_PLT points to the offset of the PLT entry from the start of its
9642    section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
9643    bookkeeping information.
9644 
9645    Returns FALSE if there was a problem.  */
9646 
9647 static bfd_boolean
9648 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
9649 			      union gotplt_union *root_plt,
9650 			      struct arm_plt_info *arm_plt,
9651 			      int dynindx, bfd_vma sym_value)
9652 {
9653   struct elf32_arm_link_hash_table *htab;
9654   asection *sgot;
9655   asection *splt;
9656   asection *srel;
9657   bfd_byte *loc;
9658   bfd_vma plt_index;
9659   Elf_Internal_Rela rel;
9660   bfd_vma plt_header_size;
9661   bfd_vma got_header_size;
9662 
9663   htab = elf32_arm_hash_table (info);
9664 
9665   /* Pick the appropriate sections and sizes.  */
9666   if (dynindx == -1)
9667     {
9668       splt = htab->root.iplt;
9669       sgot = htab->root.igotplt;
9670       srel = htab->root.irelplt;
9671 
9672       /* There are no reserved entries in .igot.plt, and no special
9673 	 first entry in .iplt.  */
9674       got_header_size = 0;
9675       plt_header_size = 0;
9676     }
9677   else
9678     {
9679       splt = htab->root.splt;
9680       sgot = htab->root.sgotplt;
9681       srel = htab->root.srelplt;
9682 
9683       got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
9684       plt_header_size = htab->plt_header_size;
9685     }
9686   BFD_ASSERT (splt != NULL && srel != NULL);
9687 
9688   /* Fill in the entry in the procedure linkage table.  */
9689   if (htab->symbian_p)
9690     {
9691       BFD_ASSERT (dynindx >= 0);
9692       put_arm_insn (htab, output_bfd,
9693 		    elf32_arm_symbian_plt_entry[0],
9694 		    splt->contents + root_plt->offset);
9695       bfd_put_32 (output_bfd,
9696 		  elf32_arm_symbian_plt_entry[1],
9697 		  splt->contents + root_plt->offset + 4);
9698 
9699       /* Fill in the entry in the .rel.plt section.  */
9700       rel.r_offset = (splt->output_section->vma
9701 		      + splt->output_offset
9702 		      + root_plt->offset + 4);
9703       rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
9704 
9705       /* Get the index in the procedure linkage table which
9706 	 corresponds to this symbol.  This is the index of this symbol
9707 	 in all the symbols for which we are making plt entries.  The
9708 	 first entry in the procedure linkage table is reserved.  */
9709       plt_index = ((root_plt->offset - plt_header_size)
9710 		   / htab->plt_entry_size);
9711     }
9712   else
9713     {
9714       bfd_vma got_offset, got_address, plt_address;
9715       bfd_vma got_displacement, initial_got_entry;
9716       bfd_byte * ptr;
9717 
9718       BFD_ASSERT (sgot != NULL);
9719 
9720       /* Get the offset into the .(i)got.plt table of the entry that
9721 	 corresponds to this function.  */
9722       got_offset = (arm_plt->got_offset & -2);
9723 
9724       /* Get the index in the procedure linkage table which
9725 	 corresponds to this symbol.  This is the index of this symbol
9726 	 in all the symbols for which we are making plt entries.
9727 	 After the reserved .got.plt entries, all symbols appear in
9728 	 the same order as in .plt.  */
9729       if (htab->fdpic_p)
9730 	/* Function descriptor takes 8 bytes.  */
9731 	plt_index = (got_offset - got_header_size) / 8;
9732       else
9733 	plt_index = (got_offset - got_header_size) / 4;
9734 
9735       /* Calculate the address of the GOT entry.  */
9736       got_address = (sgot->output_section->vma
9737 		     + sgot->output_offset
9738 		     + got_offset);
9739 
9740       /* ...and the address of the PLT entry.  */
9741       plt_address = (splt->output_section->vma
9742 		     + splt->output_offset
9743 		     + root_plt->offset);
9744 
9745       ptr = splt->contents + root_plt->offset;
9746       if (htab->vxworks_p && bfd_link_pic (info))
9747 	{
9748 	  unsigned int i;
9749 	  bfd_vma val;
9750 
9751 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9752 	    {
9753 	      val = elf32_arm_vxworks_shared_plt_entry[i];
9754 	      if (i == 2)
9755 		val |= got_address - sgot->output_section->vma;
9756 	      if (i == 5)
9757 		val |= plt_index * RELOC_SIZE (htab);
9758 	      if (i == 2 || i == 5)
9759 		bfd_put_32 (output_bfd, val, ptr);
9760 	      else
9761 		put_arm_insn (htab, output_bfd, val, ptr);
9762 	    }
9763 	}
9764       else if (htab->vxworks_p)
9765 	{
9766 	  unsigned int i;
9767 	  bfd_vma val;
9768 
9769 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
9770 	    {
9771 	      val = elf32_arm_vxworks_exec_plt_entry[i];
9772 	      if (i == 2)
9773 		val |= got_address;
9774 	      if (i == 4)
9775 		val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
9776 	      if (i == 5)
9777 		val |= plt_index * RELOC_SIZE (htab);
9778 	      if (i == 2 || i == 5)
9779 		bfd_put_32 (output_bfd, val, ptr);
9780 	      else
9781 		put_arm_insn (htab, output_bfd, val, ptr);
9782 	    }
9783 
9784 	  loc = (htab->srelplt2->contents
9785 		 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
9786 
9787 	  /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
9788 	     referencing the GOT for this PLT entry.  */
9789 	  rel.r_offset = plt_address + 8;
9790 	  rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
9791 	  rel.r_addend = got_offset;
9792 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9793 	  loc += RELOC_SIZE (htab);
9794 
9795 	  /* Create the R_ARM_ABS32 relocation referencing the
9796 	     beginning of the PLT for this GOT entry.  */
9797 	  rel.r_offset = got_address;
9798 	  rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
9799 	  rel.r_addend = 0;
9800 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
9801 	}
9802       else if (htab->nacl_p)
9803 	{
9804 	  /* Calculate the displacement between the PLT slot and the
9805 	     common tail that's part of the special initial PLT slot.  */
9806 	  int32_t tail_displacement
9807 	    = ((splt->output_section->vma + splt->output_offset
9808 		+ ARM_NACL_PLT_TAIL_OFFSET)
9809 	       - (plt_address + htab->plt_entry_size + 4));
9810 	  BFD_ASSERT ((tail_displacement & 3) == 0);
9811 	  tail_displacement >>= 2;
9812 
9813 	  BFD_ASSERT ((tail_displacement & 0xff000000) == 0
9814 		      || (-tail_displacement & 0xff000000) == 0);
9815 
9816 	  /* Calculate the displacement between the PLT slot and the entry
9817 	     in the GOT.  The offset accounts for the value produced by
9818 	     adding to pc in the penultimate instruction of the PLT stub.  */
9819 	  got_displacement = (got_address
9820 			      - (plt_address + htab->plt_entry_size));
9821 
9822 	  /* NaCl does not support interworking at all.  */
9823 	  BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
9824 
9825 	  put_arm_insn (htab, output_bfd,
9826 			elf32_arm_nacl_plt_entry[0]
9827 			| arm_movw_immediate (got_displacement),
9828 			ptr + 0);
9829 	  put_arm_insn (htab, output_bfd,
9830 			elf32_arm_nacl_plt_entry[1]
9831 			| arm_movt_immediate (got_displacement),
9832 			ptr + 4);
9833 	  put_arm_insn (htab, output_bfd,
9834 			elf32_arm_nacl_plt_entry[2],
9835 			ptr + 8);
9836 	  put_arm_insn (htab, output_bfd,
9837 			elf32_arm_nacl_plt_entry[3]
9838 			| (tail_displacement & 0x00ffffff),
9839 			ptr + 12);
9840 	}
9841       else if (htab->fdpic_p)
9842 	{
9843 	  const bfd_vma *plt_entry = using_thumb_only(htab)
9844 	    ? elf32_arm_fdpic_thumb_plt_entry
9845 	    : elf32_arm_fdpic_plt_entry;
9846 
9847 	  /* Fill-up Thumb stub if needed.  */
9848 	  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9849 	    {
9850 	      put_thumb_insn (htab, output_bfd,
9851 			      elf32_arm_plt_thumb_stub[0], ptr - 4);
9852 	      put_thumb_insn (htab, output_bfd,
9853 			      elf32_arm_plt_thumb_stub[1], ptr - 2);
9854 	    }
9855 	  /* As we are using 32 bit instructions even for the Thumb
9856 	     version, we have to use 'put_arm_insn' instead of
9857 	     'put_thumb_insn'.  */
9858 	  put_arm_insn(htab, output_bfd, plt_entry[0], ptr + 0);
9859 	  put_arm_insn(htab, output_bfd, plt_entry[1], ptr + 4);
9860 	  put_arm_insn(htab, output_bfd, plt_entry[2], ptr + 8);
9861 	  put_arm_insn(htab, output_bfd, plt_entry[3], ptr + 12);
9862 	  bfd_put_32 (output_bfd, got_offset, ptr + 16);
9863 
9864 	  if (!(info->flags & DF_BIND_NOW))
9865 	    {
9866 	      /* funcdesc_value_reloc_offset.  */
9867 	      bfd_put_32 (output_bfd,
9868 			  htab->root.srelplt->reloc_count * RELOC_SIZE (htab),
9869 			  ptr + 20);
9870 	      put_arm_insn(htab, output_bfd, plt_entry[6], ptr + 24);
9871 	      put_arm_insn(htab, output_bfd, plt_entry[7], ptr + 28);
9872 	      put_arm_insn(htab, output_bfd, plt_entry[8], ptr + 32);
9873 	      put_arm_insn(htab, output_bfd, plt_entry[9], ptr + 36);
9874 	    }
9875 	}
9876       else if (using_thumb_only (htab))
9877 	{
9878 	  /* PR ld/16017: Generate thumb only PLT entries.  */
9879 	  if (!using_thumb2 (htab))
9880 	    {
9881 	      /* FIXME: We ought to be able to generate thumb-1 PLT
9882 		 instructions...  */
9883 	      _bfd_error_handler (_("%pB: warning: thumb-1 mode PLT generation not currently supported"),
9884 				  output_bfd);
9885 	      return FALSE;
9886 	    }
9887 
9888 	  /* Calculate the displacement between the PLT slot and the entry in
9889 	     the GOT.  The 12-byte offset accounts for the value produced by
9890 	     adding to pc in the 3rd instruction of the PLT stub.  */
9891 	  got_displacement = got_address - (plt_address + 12);
9892 
9893 	  /* As we are using 32 bit instructions we have to use 'put_arm_insn'
9894 	     instead of 'put_thumb_insn'.  */
9895 	  put_arm_insn (htab, output_bfd,
9896 			elf32_thumb2_plt_entry[0]
9897 			| ((got_displacement & 0x000000ff) << 16)
9898 			| ((got_displacement & 0x00000700) << 20)
9899 			| ((got_displacement & 0x00000800) >>  1)
9900 			| ((got_displacement & 0x0000f000) >> 12),
9901 			ptr + 0);
9902 	  put_arm_insn (htab, output_bfd,
9903 			elf32_thumb2_plt_entry[1]
9904 			| ((got_displacement & 0x00ff0000)      )
9905 			| ((got_displacement & 0x07000000) <<  4)
9906 			| ((got_displacement & 0x08000000) >> 17)
9907 			| ((got_displacement & 0xf0000000) >> 28),
9908 			ptr + 4);
9909 	  put_arm_insn (htab, output_bfd,
9910 			elf32_thumb2_plt_entry[2],
9911 			ptr + 8);
9912 	  put_arm_insn (htab, output_bfd,
9913 			elf32_thumb2_plt_entry[3],
9914 			ptr + 12);
9915 	}
9916       else
9917 	{
9918 	  /* Calculate the displacement between the PLT slot and the
9919 	     entry in the GOT.  The eight-byte offset accounts for the
9920 	     value produced by adding to pc in the first instruction
9921 	     of the PLT stub.  */
9922 	  got_displacement = got_address - (plt_address + 8);
9923 
9924 	  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
9925 	    {
9926 	      put_thumb_insn (htab, output_bfd,
9927 			      elf32_arm_plt_thumb_stub[0], ptr - 4);
9928 	      put_thumb_insn (htab, output_bfd,
9929 			      elf32_arm_plt_thumb_stub[1], ptr - 2);
9930 	    }
9931 
9932 	  if (!elf32_arm_use_long_plt_entry)
9933 	    {
9934 	      BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
9935 
9936 	      put_arm_insn (htab, output_bfd,
9937 			    elf32_arm_plt_entry_short[0]
9938 			    | ((got_displacement & 0x0ff00000) >> 20),
9939 			    ptr + 0);
9940 	      put_arm_insn (htab, output_bfd,
9941 			    elf32_arm_plt_entry_short[1]
9942 			    | ((got_displacement & 0x000ff000) >> 12),
9943 			    ptr+ 4);
9944 	      put_arm_insn (htab, output_bfd,
9945 			    elf32_arm_plt_entry_short[2]
9946 			    | (got_displacement & 0x00000fff),
9947 			    ptr + 8);
9948 #ifdef FOUR_WORD_PLT
9949 	      bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
9950 #endif
9951 	    }
9952 	  else
9953 	    {
9954 	      put_arm_insn (htab, output_bfd,
9955 			    elf32_arm_plt_entry_long[0]
9956 			    | ((got_displacement & 0xf0000000) >> 28),
9957 			    ptr + 0);
9958 	      put_arm_insn (htab, output_bfd,
9959 			    elf32_arm_plt_entry_long[1]
9960 			    | ((got_displacement & 0x0ff00000) >> 20),
9961 			    ptr + 4);
9962 	      put_arm_insn (htab, output_bfd,
9963 			    elf32_arm_plt_entry_long[2]
9964 			    | ((got_displacement & 0x000ff000) >> 12),
9965 			    ptr+ 8);
9966 	      put_arm_insn (htab, output_bfd,
9967 			    elf32_arm_plt_entry_long[3]
9968 			    | (got_displacement & 0x00000fff),
9969 			    ptr + 12);
9970 	    }
9971 	}
9972 
9973       /* Fill in the entry in the .rel(a).(i)plt section.  */
9974       rel.r_offset = got_address;
9975       rel.r_addend = 0;
9976       if (dynindx == -1)
9977 	{
9978 	  /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
9979 	     The dynamic linker or static executable then calls SYM_VALUE
9980 	     to determine the correct run-time value of the .igot.plt entry.  */
9981 	  rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
9982 	  initial_got_entry = sym_value;
9983 	}
9984       else
9985 	{
9986 	  /* For FDPIC we will have to resolve a R_ARM_FUNCDESC_VALUE
9987 	     used by PLT entry.  */
9988 	  if (htab->fdpic_p)
9989 	    {
9990 	      rel.r_info = ELF32_R_INFO (dynindx, R_ARM_FUNCDESC_VALUE);
9991 	      initial_got_entry = 0;
9992 	    }
9993 	  else
9994 	    {
9995 	      rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
9996 	      initial_got_entry = (splt->output_section->vma
9997 				   + splt->output_offset);
9998 	    }
9999 	}
10000 
10001       /* Fill in the entry in the global offset table.  */
10002       bfd_put_32 (output_bfd, initial_got_entry,
10003 		  sgot->contents + got_offset);
10004 
10005       if (htab->fdpic_p && !(info->flags & DF_BIND_NOW))
10006 	{
10007 	  /* Setup initial funcdesc value.  */
10008 	  /* FIXME: we don't support lazy binding because there is a
10009 	     race condition between both words getting written and
10010 	     some other thread attempting to read them. The ARM
10011 	     architecture does not have an atomic 64 bit load/store
10012 	     instruction that could be used to prevent it; it is
10013 	     recommended that threaded FDPIC applications run with the
10014 	     LD_BIND_NOW environment variable set.  */
10015 	  bfd_put_32(output_bfd, plt_address + 0x18,
10016 		     sgot->contents + got_offset);
10017 	  bfd_put_32(output_bfd, -1 /*TODO*/,
10018 		     sgot->contents + got_offset + 4);
10019 	}
10020     }
10021 
10022   if (dynindx == -1)
10023     elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
10024   else
10025     {
10026       if (htab->fdpic_p)
10027 	{
10028 	  /* For FDPIC we put PLT relocationss into .rel.got when not
10029 	     lazy binding otherwise we put them in .rel.plt.  For now,
10030 	     we don't support lazy binding so put it in .rel.got.  */
10031 	  if (info->flags & DF_BIND_NOW)
10032 	    elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelgot, &rel);
10033 	  else
10034 	    elf32_arm_add_dynreloc(output_bfd, info, htab->root.srelplt, &rel);
10035 	}
10036       else
10037 	{
10038 	  loc = srel->contents + plt_index * RELOC_SIZE (htab);
10039 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
10040 	}
10041     }
10042 
10043   return TRUE;
10044 }
10045 
10046 /* Some relocations map to different relocations depending on the
10047    target.  Return the real relocation.  */
10048 
10049 static int
10050 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
10051 		     int r_type)
10052 {
10053   switch (r_type)
10054     {
10055     case R_ARM_TARGET1:
10056       if (globals->target1_is_rel)
10057 	return R_ARM_REL32;
10058       else
10059 	return R_ARM_ABS32;
10060 
10061     case R_ARM_TARGET2:
10062       return globals->target2_reloc;
10063 
10064     default:
10065       return r_type;
10066     }
10067 }
10068 
10069 /* Return the base VMA address which should be subtracted from real addresses
10070    when resolving @dtpoff relocation.
10071    This is PT_TLS segment p_vaddr.  */
10072 
10073 static bfd_vma
10074 dtpoff_base (struct bfd_link_info *info)
10075 {
10076   /* If tls_sec is NULL, we should have signalled an error already.  */
10077   if (elf_hash_table (info)->tls_sec == NULL)
10078     return 0;
10079   return elf_hash_table (info)->tls_sec->vma;
10080 }
10081 
10082 /* Return the relocation value for @tpoff relocation
10083    if STT_TLS virtual address is ADDRESS.  */
10084 
10085 static bfd_vma
10086 tpoff (struct bfd_link_info *info, bfd_vma address)
10087 {
10088   struct elf_link_hash_table *htab = elf_hash_table (info);
10089   bfd_vma base;
10090 
10091   /* If tls_sec is NULL, we should have signalled an error already.  */
10092   if (htab->tls_sec == NULL)
10093     return 0;
10094   base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
10095   return address - htab->tls_sec->vma + base;
10096 }
10097 
10098 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
10099    VALUE is the relocation value.  */
10100 
10101 static bfd_reloc_status_type
10102 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
10103 {
10104   if (value > 0xfff)
10105     return bfd_reloc_overflow;
10106 
10107   value |= bfd_get_32 (abfd, data) & 0xfffff000;
10108   bfd_put_32 (abfd, value, data);
10109   return bfd_reloc_ok;
10110 }
10111 
10112 /* Handle TLS relaxations.  Relaxing is possible for symbols that use
10113    R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
10114    R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
10115 
10116    Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
10117    is to then call final_link_relocate.  Return other values in the
10118    case of error.
10119 
10120    FIXME:When --emit-relocs is in effect, we'll emit relocs describing
10121    the pre-relaxed code.  It would be nice if the relocs were updated
10122    to match the optimization.   */
10123 
10124 static bfd_reloc_status_type
10125 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
10126 		     bfd *input_bfd, asection *input_sec, bfd_byte *contents,
10127 		     Elf_Internal_Rela *rel, unsigned long is_local)
10128 {
10129   unsigned long insn;
10130 
10131   switch (ELF32_R_TYPE (rel->r_info))
10132     {
10133     default:
10134       return bfd_reloc_notsupported;
10135 
10136     case R_ARM_TLS_GOTDESC:
10137       if (is_local)
10138 	insn = 0;
10139       else
10140 	{
10141 	  insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10142 	  if (insn & 1)
10143 	    insn -= 5; /* THUMB */
10144 	  else
10145 	    insn -= 8; /* ARM */
10146 	}
10147       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10148       return bfd_reloc_continue;
10149 
10150     case R_ARM_THM_TLS_DESCSEQ:
10151       /* Thumb insn.  */
10152       insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
10153       if ((insn & 0xff78) == 0x4478)	  /* add rx, pc */
10154 	{
10155 	  if (is_local)
10156 	    /* nop */
10157 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10158 	}
10159       else if ((insn & 0xffc0) == 0x6840)  /* ldr rx,[ry,#4] */
10160 	{
10161 	  if (is_local)
10162 	    /* nop */
10163 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10164 	  else
10165 	    /* ldr rx,[ry] */
10166 	    bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
10167 	}
10168       else if ((insn & 0xff87) == 0x4780)  /* blx rx */
10169 	{
10170 	  if (is_local)
10171 	    /* nop */
10172 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
10173 	  else
10174 	    /* mov r0, rx */
10175 	    bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
10176 			contents + rel->r_offset);
10177 	}
10178       else
10179 	{
10180 	  if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10181 	    /* It's a 32 bit instruction, fetch the rest of it for
10182 	       error generation.  */
10183 	    insn = (insn << 16)
10184 	      | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
10185 	  _bfd_error_handler
10186 	    /* xgettext:c-format */
10187 	    (_("%pB(%pA+%#" PRIx64 "): "
10188 	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10189 	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10190 	     "Thumb", insn);
10191 	  return bfd_reloc_notsupported;
10192 	}
10193       break;
10194 
10195     case R_ARM_TLS_DESCSEQ:
10196       /* arm insn.  */
10197       insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
10198       if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
10199 	{
10200 	  if (is_local)
10201 	    /* mov rx, ry */
10202 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
10203 			contents + rel->r_offset);
10204 	}
10205       else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
10206 	{
10207 	  if (is_local)
10208 	    /* nop */
10209 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10210 	  else
10211 	    /* ldr rx,[ry] */
10212 	    bfd_put_32 (input_bfd, insn & 0xfffff000,
10213 			contents + rel->r_offset);
10214 	}
10215       else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
10216 	{
10217 	  if (is_local)
10218 	    /* nop */
10219 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
10220 	  else
10221 	    /* mov r0, rx */
10222 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
10223 			contents + rel->r_offset);
10224 	}
10225       else
10226 	{
10227 	  _bfd_error_handler
10228 	    /* xgettext:c-format */
10229 	    (_("%pB(%pA+%#" PRIx64 "): "
10230 	       "unexpected %s instruction '%#lx' in TLS trampoline"),
10231 	     input_bfd, input_sec, (uint64_t) rel->r_offset,
10232 	     "ARM", insn);
10233 	  return bfd_reloc_notsupported;
10234 	}
10235       break;
10236 
10237     case R_ARM_TLS_CALL:
10238       /* GD->IE relaxation, turn the instruction into 'nop' or
10239 	 'ldr r0, [pc,r0]'  */
10240       insn = is_local ? 0xe1a00000 : 0xe79f0000;
10241       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
10242       break;
10243 
10244     case R_ARM_THM_TLS_CALL:
10245       /* GD->IE relaxation.  */
10246       if (!is_local)
10247 	/* add r0,pc; ldr r0, [r0]  */
10248 	insn = 0x44786800;
10249       else if (using_thumb2 (globals))
10250 	/* nop.w */
10251 	insn = 0xf3af8000;
10252       else
10253 	/* nop; nop */
10254 	insn = 0xbf00bf00;
10255 
10256       bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
10257       bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
10258       break;
10259     }
10260   return bfd_reloc_ok;
10261 }
10262 
10263 /* For a given value of n, calculate the value of G_n as required to
10264    deal with group relocations.  We return it in the form of an
10265    encoded constant-and-rotation, together with the final residual.  If n is
10266    specified as less than zero, then final_residual is filled with the
10267    input value and no further action is performed.  */
10268 
10269 static bfd_vma
10270 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
10271 {
10272   int current_n;
10273   bfd_vma g_n;
10274   bfd_vma encoded_g_n = 0;
10275   bfd_vma residual = value; /* Also known as Y_n.  */
10276 
10277   for (current_n = 0; current_n <= n; current_n++)
10278     {
10279       int shift;
10280 
10281       /* Calculate which part of the value to mask.  */
10282       if (residual == 0)
10283 	shift = 0;
10284       else
10285 	{
10286 	  int msb;
10287 
10288 	  /* Determine the most significant bit in the residual and
10289 	     align the resulting value to a 2-bit boundary.  */
10290 	  for (msb = 30; msb >= 0; msb -= 2)
10291 	    if (residual & (3 << msb))
10292 	      break;
10293 
10294 	  /* The desired shift is now (msb - 6), or zero, whichever
10295 	     is the greater.  */
10296 	  shift = msb - 6;
10297 	  if (shift < 0)
10298 	    shift = 0;
10299 	}
10300 
10301       /* Calculate g_n in 32-bit as well as encoded constant+rotation form.  */
10302       g_n = residual & (0xff << shift);
10303       encoded_g_n = (g_n >> shift)
10304 		    | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
10305 
10306       /* Calculate the residual for the next time around.  */
10307       residual &= ~g_n;
10308     }
10309 
10310   *final_residual = residual;
10311 
10312   return encoded_g_n;
10313 }
10314 
10315 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
10316    Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise.  */
10317 
10318 static int
10319 identify_add_or_sub (bfd_vma insn)
10320 {
10321   int opcode = insn & 0x1e00000;
10322 
10323   if (opcode == 1 << 23) /* ADD */
10324     return 1;
10325 
10326   if (opcode == 1 << 22) /* SUB */
10327     return -1;
10328 
10329   return 0;
10330 }
10331 
10332 /* Perform a relocation as part of a final link.  */
10333 
10334 static bfd_reloc_status_type
10335 elf32_arm_final_link_relocate (reloc_howto_type *	    howto,
10336 			       bfd *			    input_bfd,
10337 			       bfd *			    output_bfd,
10338 			       asection *		    input_section,
10339 			       bfd_byte *		    contents,
10340 			       Elf_Internal_Rela *	    rel,
10341 			       bfd_vma			    value,
10342 			       struct bfd_link_info *	    info,
10343 			       asection *		    sym_sec,
10344 			       const char *		    sym_name,
10345 			       unsigned char		    st_type,
10346 			       enum arm_st_branch_type	    branch_type,
10347 			       struct elf_link_hash_entry * h,
10348 			       bfd_boolean *		    unresolved_reloc_p,
10349 			       char **			    error_message)
10350 {
10351   unsigned long			r_type = howto->type;
10352   unsigned long			r_symndx;
10353   bfd_byte *			hit_data = contents + rel->r_offset;
10354   bfd_vma *			local_got_offsets;
10355   bfd_vma *			local_tlsdesc_gotents;
10356   asection *			sgot;
10357   asection *			splt;
10358   asection *			sreloc = NULL;
10359   asection *			srelgot;
10360   bfd_vma			addend;
10361   bfd_signed_vma		signed_addend;
10362   unsigned char			dynreloc_st_type;
10363   bfd_vma			dynreloc_value;
10364   struct elf32_arm_link_hash_table * globals;
10365   struct elf32_arm_link_hash_entry *eh;
10366   union gotplt_union	       *root_plt;
10367   struct arm_plt_info	       *arm_plt;
10368   bfd_vma			plt_offset;
10369   bfd_vma			gotplt_offset;
10370   bfd_boolean			has_iplt_entry;
10371   bfd_boolean			resolved_to_zero;
10372 
10373   globals = elf32_arm_hash_table (info);
10374   if (globals == NULL)
10375     return bfd_reloc_notsupported;
10376 
10377   BFD_ASSERT (is_arm_elf (input_bfd));
10378   BFD_ASSERT (howto != NULL);
10379 
10380   /* Some relocation types map to different relocations depending on the
10381      target.  We pick the right one here.  */
10382   r_type = arm_real_reloc_type (globals, r_type);
10383 
10384   /* It is possible to have linker relaxations on some TLS access
10385      models.  Update our information here.  */
10386   r_type = elf32_arm_tls_transition (info, r_type, h);
10387 
10388   if (r_type != howto->type)
10389     howto = elf32_arm_howto_from_type (r_type);
10390 
10391   eh = (struct elf32_arm_link_hash_entry *) h;
10392   sgot = globals->root.sgot;
10393   local_got_offsets = elf_local_got_offsets (input_bfd);
10394   local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
10395 
10396   if (globals->root.dynamic_sections_created)
10397     srelgot = globals->root.srelgot;
10398   else
10399     srelgot = NULL;
10400 
10401   r_symndx = ELF32_R_SYM (rel->r_info);
10402 
10403   if (globals->use_rel)
10404     {
10405       addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
10406 
10407       if (addend & ((howto->src_mask + 1) >> 1))
10408 	{
10409 	  signed_addend = -1;
10410 	  signed_addend &= ~ howto->src_mask;
10411 	  signed_addend |= addend;
10412 	}
10413       else
10414 	signed_addend = addend;
10415     }
10416   else
10417     addend = signed_addend = rel->r_addend;
10418 
10419   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
10420      are resolving a function call relocation.  */
10421   if (using_thumb_only (globals)
10422       && (r_type == R_ARM_THM_CALL
10423 	  || r_type == R_ARM_THM_JUMP24)
10424       && branch_type == ST_BRANCH_TO_ARM)
10425     branch_type = ST_BRANCH_TO_THUMB;
10426 
10427   /* Record the symbol information that should be used in dynamic
10428      relocations.  */
10429   dynreloc_st_type = st_type;
10430   dynreloc_value = value;
10431   if (branch_type == ST_BRANCH_TO_THUMB)
10432     dynreloc_value |= 1;
10433 
10434   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
10435      VALUE appropriately for relocations that we resolve at link time.  */
10436   has_iplt_entry = FALSE;
10437   if (elf32_arm_get_plt_info (input_bfd, globals, eh, r_symndx, &root_plt,
10438 			      &arm_plt)
10439       && root_plt->offset != (bfd_vma) -1)
10440     {
10441       plt_offset = root_plt->offset;
10442       gotplt_offset = arm_plt->got_offset;
10443 
10444       if (h == NULL || eh->is_iplt)
10445 	{
10446 	  has_iplt_entry = TRUE;
10447 	  splt = globals->root.iplt;
10448 
10449 	  /* Populate .iplt entries here, because not all of them will
10450 	     be seen by finish_dynamic_symbol.  The lower bit is set if
10451 	     we have already populated the entry.  */
10452 	  if (plt_offset & 1)
10453 	    plt_offset--;
10454 	  else
10455 	    {
10456 	      if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
10457 						-1, dynreloc_value))
10458 		root_plt->offset |= 1;
10459 	      else
10460 		return bfd_reloc_notsupported;
10461 	    }
10462 
10463 	  /* Static relocations always resolve to the .iplt entry.  */
10464 	  st_type = STT_FUNC;
10465 	  value = (splt->output_section->vma
10466 		   + splt->output_offset
10467 		   + plt_offset);
10468 	  branch_type = ST_BRANCH_TO_ARM;
10469 
10470 	  /* If there are non-call relocations that resolve to the .iplt
10471 	     entry, then all dynamic ones must too.  */
10472 	  if (arm_plt->noncall_refcount != 0)
10473 	    {
10474 	      dynreloc_st_type = st_type;
10475 	      dynreloc_value = value;
10476 	    }
10477 	}
10478       else
10479 	/* We populate the .plt entry in finish_dynamic_symbol.  */
10480 	splt = globals->root.splt;
10481     }
10482   else
10483     {
10484       splt = NULL;
10485       plt_offset = (bfd_vma) -1;
10486       gotplt_offset = (bfd_vma) -1;
10487     }
10488 
10489   resolved_to_zero = (h != NULL
10490 		      && UNDEFWEAK_NO_DYNAMIC_RELOC (info, h));
10491 
10492   switch (r_type)
10493     {
10494     case R_ARM_NONE:
10495       /* We don't need to find a value for this symbol.  It's just a
10496 	 marker.  */
10497       *unresolved_reloc_p = FALSE;
10498       return bfd_reloc_ok;
10499 
10500     case R_ARM_ABS12:
10501       if (!globals->vxworks_p)
10502 	return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10503       /* Fall through.  */
10504 
10505     case R_ARM_PC24:
10506     case R_ARM_ABS32:
10507     case R_ARM_ABS32_NOI:
10508     case R_ARM_REL32:
10509     case R_ARM_REL32_NOI:
10510     case R_ARM_CALL:
10511     case R_ARM_JUMP24:
10512     case R_ARM_XPC25:
10513     case R_ARM_PREL31:
10514     case R_ARM_PLT32:
10515       /* Handle relocations which should use the PLT entry.  ABS32/REL32
10516 	 will use the symbol's value, which may point to a PLT entry, but we
10517 	 don't need to handle that here.  If we created a PLT entry, all
10518 	 branches in this object should go to it, except if the PLT is too
10519 	 far away, in which case a long branch stub should be inserted.  */
10520       if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
10521 	   && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
10522 	   && r_type != R_ARM_CALL
10523 	   && r_type != R_ARM_JUMP24
10524 	   && r_type != R_ARM_PLT32)
10525 	  && plt_offset != (bfd_vma) -1)
10526 	{
10527 	  /* If we've created a .plt section, and assigned a PLT entry
10528 	     to this function, it must either be a STT_GNU_IFUNC reference
10529 	     or not be known to bind locally.  In other cases, we should
10530 	     have cleared the PLT entry by now.  */
10531 	  BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
10532 
10533 	  value = (splt->output_section->vma
10534 		   + splt->output_offset
10535 		   + plt_offset);
10536 	  *unresolved_reloc_p = FALSE;
10537 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10538 					   contents, rel->r_offset, value,
10539 					   rel->r_addend);
10540 	}
10541 
10542       /* When generating a shared object or relocatable executable, these
10543 	 relocations are copied into the output file to be resolved at
10544 	 run time.  */
10545       if ((bfd_link_pic (info)
10546 	   || globals->root.is_relocatable_executable
10547 	   || globals->fdpic_p)
10548 	  && (input_section->flags & SEC_ALLOC)
10549 	  && !(globals->vxworks_p
10550 	       && strcmp (input_section->output_section->name,
10551 			  ".tls_vars") == 0)
10552 	  && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
10553 	      || !SYMBOL_CALLS_LOCAL (info, h))
10554 	  && !(input_bfd == globals->stub_bfd
10555 	       && strstr (input_section->name, STUB_SUFFIX))
10556 	  && (h == NULL
10557 	      || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10558 		  && !resolved_to_zero)
10559 	      || h->root.type != bfd_link_hash_undefweak)
10560 	  && r_type != R_ARM_PC24
10561 	  && r_type != R_ARM_CALL
10562 	  && r_type != R_ARM_JUMP24
10563 	  && r_type != R_ARM_PREL31
10564 	  && r_type != R_ARM_PLT32)
10565 	{
10566 	  Elf_Internal_Rela outrel;
10567 	  bfd_boolean skip, relocate;
10568 	  int isrofixup = 0;
10569 
10570 	  if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
10571 	      && !h->def_regular)
10572 	    {
10573 	      char *v = _("shared object");
10574 
10575 	      if (bfd_link_executable (info))
10576 		v = _("PIE executable");
10577 
10578 	      _bfd_error_handler
10579 		(_("%pB: relocation %s against external or undefined symbol `%s'"
10580 		   " can not be used when making a %s; recompile with -fPIC"), input_bfd,
10581 		 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
10582 	      return bfd_reloc_notsupported;
10583 	    }
10584 
10585 	  *unresolved_reloc_p = FALSE;
10586 
10587 	  if (sreloc == NULL && globals->root.dynamic_sections_created)
10588 	    {
10589 	      sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
10590 							   ! globals->use_rel);
10591 
10592 	      if (sreloc == NULL)
10593 		return bfd_reloc_notsupported;
10594 	    }
10595 
10596 	  skip = FALSE;
10597 	  relocate = FALSE;
10598 
10599 	  outrel.r_addend = addend;
10600 	  outrel.r_offset =
10601 	    _bfd_elf_section_offset (output_bfd, info, input_section,
10602 				     rel->r_offset);
10603 	  if (outrel.r_offset == (bfd_vma) -1)
10604 	    skip = TRUE;
10605 	  else if (outrel.r_offset == (bfd_vma) -2)
10606 	    skip = TRUE, relocate = TRUE;
10607 	  outrel.r_offset += (input_section->output_section->vma
10608 			      + input_section->output_offset);
10609 
10610 	  if (skip)
10611 	    memset (&outrel, 0, sizeof outrel);
10612 	  else if (h != NULL
10613 		   && h->dynindx != -1
10614 		   && (!bfd_link_pic (info)
10615 		       || !(bfd_link_pie (info)
10616 			    || SYMBOLIC_BIND (info, h))
10617 		       || !h->def_regular))
10618 	    outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
10619 	  else
10620 	    {
10621 	      int symbol;
10622 
10623 	      /* This symbol is local, or marked to become local.  */
10624 	      BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI
10625 			  || (globals->fdpic_p && !bfd_link_pic(info)));
10626 	      if (globals->symbian_p)
10627 		{
10628 		  asection *osec;
10629 
10630 		  /* On Symbian OS, the data segment and text segement
10631 		     can be relocated independently.  Therefore, we
10632 		     must indicate the segment to which this
10633 		     relocation is relative.  The BPABI allows us to
10634 		     use any symbol in the right segment; we just use
10635 		     the section symbol as it is convenient.  (We
10636 		     cannot use the symbol given by "h" directly as it
10637 		     will not appear in the dynamic symbol table.)
10638 
10639 		     Note that the dynamic linker ignores the section
10640 		     symbol value, so we don't subtract osec->vma
10641 		     from the emitted reloc addend.  */
10642 		  if (sym_sec)
10643 		    osec = sym_sec->output_section;
10644 		  else
10645 		    osec = input_section->output_section;
10646 		  symbol = elf_section_data (osec)->dynindx;
10647 		  if (symbol == 0)
10648 		    {
10649 		      struct elf_link_hash_table *htab = elf_hash_table (info);
10650 
10651 		      if ((osec->flags & SEC_READONLY) == 0
10652 			  && htab->data_index_section != NULL)
10653 			osec = htab->data_index_section;
10654 		      else
10655 			osec = htab->text_index_section;
10656 		      symbol = elf_section_data (osec)->dynindx;
10657 		    }
10658 		  BFD_ASSERT (symbol != 0);
10659 		}
10660 	      else
10661 		/* On SVR4-ish systems, the dynamic loader cannot
10662 		   relocate the text and data segments independently,
10663 		   so the symbol does not matter.  */
10664 		symbol = 0;
10665 	      if (dynreloc_st_type == STT_GNU_IFUNC)
10666 		/* We have an STT_GNU_IFUNC symbol that doesn't resolve
10667 		   to the .iplt entry.  Instead, every non-call reference
10668 		   must use an R_ARM_IRELATIVE relocation to obtain the
10669 		   correct run-time address.  */
10670 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
10671 	      else if (globals->fdpic_p && !bfd_link_pic(info))
10672 		isrofixup = 1;
10673 	      else
10674 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
10675 	      if (globals->use_rel)
10676 		relocate = TRUE;
10677 	      else
10678 		outrel.r_addend += dynreloc_value;
10679 	    }
10680 
10681 	  if (isrofixup)
10682 	    arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
10683 	  else
10684 	    elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
10685 
10686 	  /* If this reloc is against an external symbol, we do not want to
10687 	     fiddle with the addend.  Otherwise, we need to include the symbol
10688 	     value so that it becomes an addend for the dynamic reloc.  */
10689 	  if (! relocate)
10690 	    return bfd_reloc_ok;
10691 
10692 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
10693 					   contents, rel->r_offset,
10694 					   dynreloc_value, (bfd_vma) 0);
10695 	}
10696       else switch (r_type)
10697 	{
10698 	case R_ARM_ABS12:
10699 	  return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
10700 
10701 	case R_ARM_XPC25:	  /* Arm BLX instruction.  */
10702 	case R_ARM_CALL:
10703 	case R_ARM_JUMP24:
10704 	case R_ARM_PC24:	  /* Arm B/BL instruction.  */
10705 	case R_ARM_PLT32:
10706 	  {
10707 	  struct elf32_arm_stub_hash_entry *stub_entry = NULL;
10708 
10709 	  if (r_type == R_ARM_XPC25)
10710 	    {
10711 	      /* Check for Arm calling Arm function.  */
10712 	      /* FIXME: Should we translate the instruction into a BL
10713 		 instruction instead ?  */
10714 	      if (branch_type != ST_BRANCH_TO_THUMB)
10715 		_bfd_error_handler
10716 		  (_("\%pB: warning: %s BLX instruction targets"
10717 		     " %s function '%s'"),
10718 		   input_bfd, "ARM",
10719 		   "ARM", h ? h->root.root.string : "(local)");
10720 	    }
10721 	  else if (r_type == R_ARM_PC24)
10722 	    {
10723 	      /* Check for Arm calling Thumb function.  */
10724 	      if (branch_type == ST_BRANCH_TO_THUMB)
10725 		{
10726 		  if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
10727 					       output_bfd, input_section,
10728 					       hit_data, sym_sec, rel->r_offset,
10729 					       signed_addend, value,
10730 					       error_message))
10731 		    return bfd_reloc_ok;
10732 		  else
10733 		    return bfd_reloc_dangerous;
10734 		}
10735 	    }
10736 
10737 	  /* Check if a stub has to be inserted because the
10738 	     destination is too far or we are changing mode.  */
10739 	  if (   r_type == R_ARM_CALL
10740 	      || r_type == R_ARM_JUMP24
10741 	      || r_type == R_ARM_PLT32)
10742 	    {
10743 	      enum elf32_arm_stub_type stub_type = arm_stub_none;
10744 	      struct elf32_arm_link_hash_entry *hash;
10745 
10746 	      hash = (struct elf32_arm_link_hash_entry *) h;
10747 	      stub_type = arm_type_of_stub (info, input_section, rel,
10748 					    st_type, &branch_type,
10749 					    hash, value, sym_sec,
10750 					    input_bfd, sym_name);
10751 
10752 	      if (stub_type != arm_stub_none)
10753 		{
10754 		  /* The target is out of reach, so redirect the
10755 		     branch to the local stub for this function.  */
10756 		  stub_entry = elf32_arm_get_stub_entry (input_section,
10757 							 sym_sec, h,
10758 							 rel, globals,
10759 							 stub_type);
10760 		  {
10761 		    if (stub_entry != NULL)
10762 		      value = (stub_entry->stub_offset
10763 			       + stub_entry->stub_sec->output_offset
10764 			       + stub_entry->stub_sec->output_section->vma);
10765 
10766 		    if (plt_offset != (bfd_vma) -1)
10767 		      *unresolved_reloc_p = FALSE;
10768 		  }
10769 		}
10770 	      else
10771 		{
10772 		  /* If the call goes through a PLT entry, make sure to
10773 		     check distance to the right destination address.  */
10774 		  if (plt_offset != (bfd_vma) -1)
10775 		    {
10776 		      value = (splt->output_section->vma
10777 			       + splt->output_offset
10778 			       + plt_offset);
10779 		      *unresolved_reloc_p = FALSE;
10780 		      /* The PLT entry is in ARM mode, regardless of the
10781 			 target function.  */
10782 		      branch_type = ST_BRANCH_TO_ARM;
10783 		    }
10784 		}
10785 	    }
10786 
10787 	  /* The ARM ELF ABI says that this reloc is computed as: S - P + A
10788 	     where:
10789 	      S is the address of the symbol in the relocation.
10790 	      P is address of the instruction being relocated.
10791 	      A is the addend (extracted from the instruction) in bytes.
10792 
10793 	     S is held in 'value'.
10794 	     P is the base address of the section containing the
10795 	       instruction plus the offset of the reloc into that
10796 	       section, ie:
10797 		 (input_section->output_section->vma +
10798 		  input_section->output_offset +
10799 		  rel->r_offset).
10800 	     A is the addend, converted into bytes, ie:
10801 		 (signed_addend * 4)
10802 
10803 	     Note: None of these operations have knowledge of the pipeline
10804 	     size of the processor, thus it is up to the assembler to
10805 	     encode this information into the addend.  */
10806 	  value -= (input_section->output_section->vma
10807 		    + input_section->output_offset);
10808 	  value -= rel->r_offset;
10809 	  if (globals->use_rel)
10810 	    value += (signed_addend << howto->size);
10811 	  else
10812 	    /* RELA addends do not have to be adjusted by howto->size.  */
10813 	    value += signed_addend;
10814 
10815 	  signed_addend = value;
10816 	  signed_addend >>= howto->rightshift;
10817 
10818 	  /* A branch to an undefined weak symbol is turned into a jump to
10819 	     the next instruction unless a PLT entry will be created.
10820 	     Do the same for local undefined symbols (but not for STN_UNDEF).
10821 	     The jump to the next instruction is optimized as a NOP depending
10822 	     on the architecture.  */
10823 	  if (h ? (h->root.type == bfd_link_hash_undefweak
10824 		   && plt_offset == (bfd_vma) -1)
10825 	      : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
10826 	    {
10827 	      value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
10828 
10829 	      if (arch_has_arm_nop (globals))
10830 		value |= 0x0320f000;
10831 	      else
10832 		value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0.  */
10833 	    }
10834 	  else
10835 	    {
10836 	      /* Perform a signed range check.  */
10837 	      if (   signed_addend >   ((bfd_signed_vma)  (howto->dst_mask >> 1))
10838 		  || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
10839 		return bfd_reloc_overflow;
10840 
10841 	      addend = (value & 2);
10842 
10843 	      value = (signed_addend & howto->dst_mask)
10844 		| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
10845 
10846 	      if (r_type == R_ARM_CALL)
10847 		{
10848 		  /* Set the H bit in the BLX instruction.  */
10849 		  if (branch_type == ST_BRANCH_TO_THUMB)
10850 		    {
10851 		      if (addend)
10852 			value |= (1 << 24);
10853 		      else
10854 			value &= ~(bfd_vma)(1 << 24);
10855 		    }
10856 
10857 		  /* Select the correct instruction (BL or BLX).  */
10858 		  /* Only if we are not handling a BL to a stub. In this
10859 		     case, mode switching is performed by the stub.  */
10860 		  if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
10861 		    value |= (1 << 28);
10862 		  else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
10863 		    {
10864 		      value &= ~(bfd_vma)(1 << 28);
10865 		      value |= (1 << 24);
10866 		    }
10867 		}
10868 	    }
10869 	  }
10870 	  break;
10871 
10872 	case R_ARM_ABS32:
10873 	  value += addend;
10874 	  if (branch_type == ST_BRANCH_TO_THUMB)
10875 	    value |= 1;
10876 	  break;
10877 
10878 	case R_ARM_ABS32_NOI:
10879 	  value += addend;
10880 	  break;
10881 
10882 	case R_ARM_REL32:
10883 	  value += addend;
10884 	  if (branch_type == ST_BRANCH_TO_THUMB)
10885 	    value |= 1;
10886 	  value -= (input_section->output_section->vma
10887 		    + input_section->output_offset + rel->r_offset);
10888 	  break;
10889 
10890 	case R_ARM_REL32_NOI:
10891 	  value += addend;
10892 	  value -= (input_section->output_section->vma
10893 		    + input_section->output_offset + rel->r_offset);
10894 	  break;
10895 
10896 	case R_ARM_PREL31:
10897 	  value -= (input_section->output_section->vma
10898 		    + input_section->output_offset + rel->r_offset);
10899 	  value += signed_addend;
10900 	  if (! h || h->root.type != bfd_link_hash_undefweak)
10901 	    {
10902 	      /* Check for overflow.  */
10903 	      if ((value ^ (value >> 1)) & (1 << 30))
10904 		return bfd_reloc_overflow;
10905 	    }
10906 	  value &= 0x7fffffff;
10907 	  value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
10908 	  if (branch_type == ST_BRANCH_TO_THUMB)
10909 	    value |= 1;
10910 	  break;
10911 	}
10912 
10913       bfd_put_32 (input_bfd, value, hit_data);
10914       return bfd_reloc_ok;
10915 
10916     case R_ARM_ABS8:
10917       /* PR 16202: Refectch the addend using the correct size.  */
10918       if (globals->use_rel)
10919 	addend = bfd_get_8 (input_bfd, hit_data);
10920       value += addend;
10921 
10922       /* There is no way to tell whether the user intended to use a signed or
10923 	 unsigned addend.  When checking for overflow we accept either,
10924 	 as specified by the AAELF.  */
10925       if ((long) value > 0xff || (long) value < -0x80)
10926 	return bfd_reloc_overflow;
10927 
10928       bfd_put_8 (input_bfd, value, hit_data);
10929       return bfd_reloc_ok;
10930 
10931     case R_ARM_ABS16:
10932       /* PR 16202: Refectch the addend using the correct size.  */
10933       if (globals->use_rel)
10934 	addend = bfd_get_16 (input_bfd, hit_data);
10935       value += addend;
10936 
10937       /* See comment for R_ARM_ABS8.  */
10938       if ((long) value > 0xffff || (long) value < -0x8000)
10939 	return bfd_reloc_overflow;
10940 
10941       bfd_put_16 (input_bfd, value, hit_data);
10942       return bfd_reloc_ok;
10943 
10944     case R_ARM_THM_ABS5:
10945       /* Support ldr and str instructions for the thumb.  */
10946       if (globals->use_rel)
10947 	{
10948 	  /* Need to refetch addend.  */
10949 	  addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10950 	  /* ??? Need to determine shift amount from operand size.  */
10951 	  addend >>= howto->rightshift;
10952 	}
10953       value += addend;
10954 
10955       /* ??? Isn't value unsigned?  */
10956       if ((long) value > 0x1f || (long) value < -0x10)
10957 	return bfd_reloc_overflow;
10958 
10959       /* ??? Value needs to be properly shifted into place first.  */
10960       value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
10961       bfd_put_16 (input_bfd, value, hit_data);
10962       return bfd_reloc_ok;
10963 
10964     case R_ARM_THM_ALU_PREL_11_0:
10965       /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw).  */
10966       {
10967 	bfd_vma insn;
10968 	bfd_signed_vma relocation;
10969 
10970 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
10971 	     | bfd_get_16 (input_bfd, hit_data + 2);
10972 
10973 	if (globals->use_rel)
10974 	  {
10975 	    signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
10976 			  | ((insn & (1 << 26)) >> 15);
10977 	    if (insn & 0xf00000)
10978 	      signed_addend = -signed_addend;
10979 	  }
10980 
10981 	relocation = value + signed_addend;
10982 	relocation -= Pa (input_section->output_section->vma
10983 			  + input_section->output_offset
10984 			  + rel->r_offset);
10985 
10986 	/* PR 21523: Use an absolute value.  The user of this reloc will
10987 	   have already selected an ADD or SUB insn appropriately.  */
10988 	value = llabs (relocation);
10989 
10990 	if (value >= 0x1000)
10991 	  return bfd_reloc_overflow;
10992 
10993 	/* Destination is Thumb.  Force bit 0 to 1 to reflect this.  */
10994 	if (branch_type == ST_BRANCH_TO_THUMB)
10995 	  value |= 1;
10996 
10997 	insn = (insn & 0xfb0f8f00) | (value & 0xff)
10998 	     | ((value & 0x700) << 4)
10999 	     | ((value & 0x800) << 15);
11000 	if (relocation < 0)
11001 	  insn |= 0xa00000;
11002 
11003 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
11004 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11005 
11006 	return bfd_reloc_ok;
11007       }
11008 
11009     case R_ARM_THM_PC8:
11010       /* PR 10073:  This reloc is not generated by the GNU toolchain,
11011 	 but it is supported for compatibility with third party libraries
11012 	 generated by other compilers, specifically the ARM/IAR.  */
11013       {
11014 	bfd_vma insn;
11015 	bfd_signed_vma relocation;
11016 
11017 	insn = bfd_get_16 (input_bfd, hit_data);
11018 
11019 	if (globals->use_rel)
11020 	  addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
11021 
11022 	relocation = value + addend;
11023 	relocation -= Pa (input_section->output_section->vma
11024 			  + input_section->output_offset
11025 			  + rel->r_offset);
11026 
11027 	value = relocation;
11028 
11029 	/* We do not check for overflow of this reloc.  Although strictly
11030 	   speaking this is incorrect, it appears to be necessary in order
11031 	   to work with IAR generated relocs.  Since GCC and GAS do not
11032 	   generate R_ARM_THM_PC8 relocs, the lack of a check should not be
11033 	   a problem for them.  */
11034 	value &= 0x3fc;
11035 
11036 	insn = (insn & 0xff00) | (value >> 2);
11037 
11038 	bfd_put_16 (input_bfd, insn, hit_data);
11039 
11040 	return bfd_reloc_ok;
11041       }
11042 
11043     case R_ARM_THM_PC12:
11044       /* Corresponds to: ldr.w reg, [pc, #offset].  */
11045       {
11046 	bfd_vma insn;
11047 	bfd_signed_vma relocation;
11048 
11049 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
11050 	     | bfd_get_16 (input_bfd, hit_data + 2);
11051 
11052 	if (globals->use_rel)
11053 	  {
11054 	    signed_addend = insn & 0xfff;
11055 	    if (!(insn & (1 << 23)))
11056 	      signed_addend = -signed_addend;
11057 	  }
11058 
11059 	relocation = value + signed_addend;
11060 	relocation -= Pa (input_section->output_section->vma
11061 			  + input_section->output_offset
11062 			  + rel->r_offset);
11063 
11064 	value = relocation;
11065 
11066 	if (value >= 0x1000)
11067 	  return bfd_reloc_overflow;
11068 
11069 	insn = (insn & 0xff7ff000) | value;
11070 	if (relocation >= 0)
11071 	  insn |= (1 << 23);
11072 
11073 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
11074 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
11075 
11076 	return bfd_reloc_ok;
11077       }
11078 
11079     case R_ARM_THM_XPC22:
11080     case R_ARM_THM_CALL:
11081     case R_ARM_THM_JUMP24:
11082       /* Thumb BL (branch long instruction).  */
11083       {
11084 	bfd_vma relocation;
11085 	bfd_vma reloc_sign;
11086 	bfd_boolean overflow = FALSE;
11087 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11088 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11089 	bfd_signed_vma reloc_signed_max;
11090 	bfd_signed_vma reloc_signed_min;
11091 	bfd_vma check;
11092 	bfd_signed_vma signed_check;
11093 	int bitsize;
11094 	const int thumb2 = using_thumb2 (globals);
11095 	const int thumb2_bl = using_thumb2_bl (globals);
11096 
11097 	/* A branch to an undefined weak symbol is turned into a jump to
11098 	   the next instruction unless a PLT entry will be created.
11099 	   The jump to the next instruction is optimized as a NOP.W for
11100 	   Thumb-2 enabled architectures.  */
11101 	if (h && h->root.type == bfd_link_hash_undefweak
11102 	    && plt_offset == (bfd_vma) -1)
11103 	  {
11104 	    if (thumb2)
11105 	      {
11106 		bfd_put_16 (input_bfd, 0xf3af, hit_data);
11107 		bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
11108 	      }
11109 	    else
11110 	      {
11111 		bfd_put_16 (input_bfd, 0xe000, hit_data);
11112 		bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
11113 	      }
11114 	    return bfd_reloc_ok;
11115 	  }
11116 
11117 	/* Fetch the addend.  We use the Thumb-2 encoding (backwards compatible
11118 	   with Thumb-1) involving the J1 and J2 bits.  */
11119 	if (globals->use_rel)
11120 	  {
11121 	    bfd_vma s = (upper_insn & (1 << 10)) >> 10;
11122 	    bfd_vma upper = upper_insn & 0x3ff;
11123 	    bfd_vma lower = lower_insn & 0x7ff;
11124 	    bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
11125 	    bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
11126 	    bfd_vma i1 = j1 ^ s ? 0 : 1;
11127 	    bfd_vma i2 = j2 ^ s ? 0 : 1;
11128 
11129 	    addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
11130 	    /* Sign extend.  */
11131 	    addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
11132 
11133 	    signed_addend = addend;
11134 	  }
11135 
11136 	if (r_type == R_ARM_THM_XPC22)
11137 	  {
11138 	    /* Check for Thumb to Thumb call.  */
11139 	    /* FIXME: Should we translate the instruction into a BL
11140 	       instruction instead ?  */
11141 	    if (branch_type == ST_BRANCH_TO_THUMB)
11142 	      _bfd_error_handler
11143 		(_("%pB: warning: %s BLX instruction targets"
11144 		   " %s function '%s'"),
11145 		 input_bfd, "Thumb",
11146 		 "Thumb", h ? h->root.root.string : "(local)");
11147 	  }
11148 	else
11149 	  {
11150 	    /* If it is not a call to Thumb, assume call to Arm.
11151 	       If it is a call relative to a section name, then it is not a
11152 	       function call at all, but rather a long jump.  Calls through
11153 	       the PLT do not require stubs.  */
11154 	    if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
11155 	      {
11156 		if (globals->use_blx && r_type == R_ARM_THM_CALL)
11157 		  {
11158 		    /* Convert BL to BLX.  */
11159 		    lower_insn = (lower_insn & ~0x1000) | 0x0800;
11160 		  }
11161 		else if ((   r_type != R_ARM_THM_CALL)
11162 			 && (r_type != R_ARM_THM_JUMP24))
11163 		  {
11164 		    if (elf32_thumb_to_arm_stub
11165 			(info, sym_name, input_bfd, output_bfd, input_section,
11166 			 hit_data, sym_sec, rel->r_offset, signed_addend, value,
11167 			 error_message))
11168 		      return bfd_reloc_ok;
11169 		    else
11170 		      return bfd_reloc_dangerous;
11171 		  }
11172 	      }
11173 	    else if (branch_type == ST_BRANCH_TO_THUMB
11174 		     && globals->use_blx
11175 		     && r_type == R_ARM_THM_CALL)
11176 	      {
11177 		/* Make sure this is a BL.  */
11178 		lower_insn |= 0x1800;
11179 	      }
11180 	  }
11181 
11182 	enum elf32_arm_stub_type stub_type = arm_stub_none;
11183 	if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
11184 	  {
11185 	    /* Check if a stub has to be inserted because the destination
11186 	       is too far.  */
11187 	    struct elf32_arm_stub_hash_entry *stub_entry;
11188 	    struct elf32_arm_link_hash_entry *hash;
11189 
11190 	    hash = (struct elf32_arm_link_hash_entry *) h;
11191 
11192 	    stub_type = arm_type_of_stub (info, input_section, rel,
11193 					  st_type, &branch_type,
11194 					  hash, value, sym_sec,
11195 					  input_bfd, sym_name);
11196 
11197 	    if (stub_type != arm_stub_none)
11198 	      {
11199 		/* The target is out of reach or we are changing modes, so
11200 		   redirect the branch to the local stub for this
11201 		   function.  */
11202 		stub_entry = elf32_arm_get_stub_entry (input_section,
11203 						       sym_sec, h,
11204 						       rel, globals,
11205 						       stub_type);
11206 		if (stub_entry != NULL)
11207 		  {
11208 		    value = (stub_entry->stub_offset
11209 			     + stub_entry->stub_sec->output_offset
11210 			     + stub_entry->stub_sec->output_section->vma);
11211 
11212 		    if (plt_offset != (bfd_vma) -1)
11213 		      *unresolved_reloc_p = FALSE;
11214 		  }
11215 
11216 		/* If this call becomes a call to Arm, force BLX.  */
11217 		if (globals->use_blx && (r_type == R_ARM_THM_CALL))
11218 		  {
11219 		    if ((stub_entry
11220 			 && !arm_stub_is_thumb (stub_entry->stub_type))
11221 			|| branch_type != ST_BRANCH_TO_THUMB)
11222 		      lower_insn = (lower_insn & ~0x1000) | 0x0800;
11223 		  }
11224 	      }
11225 	  }
11226 
11227 	/* Handle calls via the PLT.  */
11228 	if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
11229 	  {
11230 	    value = (splt->output_section->vma
11231 		     + splt->output_offset
11232 		     + plt_offset);
11233 
11234 	    if (globals->use_blx
11235 		&& r_type == R_ARM_THM_CALL
11236 		&& ! using_thumb_only (globals))
11237 	      {
11238 		/* If the Thumb BLX instruction is available, convert
11239 		   the BL to a BLX instruction to call the ARM-mode
11240 		   PLT entry.  */
11241 		lower_insn = (lower_insn & ~0x1000) | 0x0800;
11242 		branch_type = ST_BRANCH_TO_ARM;
11243 	      }
11244 	    else
11245 	      {
11246 		if (! using_thumb_only (globals))
11247 		  /* Target the Thumb stub before the ARM PLT entry.  */
11248 		  value -= PLT_THUMB_STUB_SIZE;
11249 		branch_type = ST_BRANCH_TO_THUMB;
11250 	      }
11251 	    *unresolved_reloc_p = FALSE;
11252 	  }
11253 
11254 	relocation = value + signed_addend;
11255 
11256 	relocation -= (input_section->output_section->vma
11257 		       + input_section->output_offset
11258 		       + rel->r_offset);
11259 
11260 	check = relocation >> howto->rightshift;
11261 
11262 	/* If this is a signed value, the rightshift just dropped
11263 	   leading 1 bits (assuming twos complement).  */
11264 	if ((bfd_signed_vma) relocation >= 0)
11265 	  signed_check = check;
11266 	else
11267 	  signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
11268 
11269 	/* Calculate the permissable maximum and minimum values for
11270 	   this relocation according to whether we're relocating for
11271 	   Thumb-2 or not.  */
11272 	bitsize = howto->bitsize;
11273 	if (!thumb2_bl)
11274 	  bitsize -= 2;
11275 	reloc_signed_max = (1 << (bitsize - 1)) - 1;
11276 	reloc_signed_min = ~reloc_signed_max;
11277 
11278 	/* Assumes two's complement.  */
11279 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11280 	  overflow = TRUE;
11281 
11282 	if ((lower_insn & 0x5000) == 0x4000)
11283 	  /* For a BLX instruction, make sure that the relocation is rounded up
11284 	     to a word boundary.  This follows the semantics of the instruction
11285 	     which specifies that bit 1 of the target address will come from bit
11286 	     1 of the base address.  */
11287 	  relocation = (relocation + 2) & ~ 3;
11288 
11289 	/* Put RELOCATION back into the insn.  Assumes two's complement.
11290 	   We use the Thumb-2 encoding, which is safe even if dealing with
11291 	   a Thumb-1 instruction by virtue of our overflow check above.  */
11292 	reloc_sign = (signed_check < 0) ? 1 : 0;
11293 	upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
11294 		     | ((relocation >> 12) & 0x3ff)
11295 		     | (reloc_sign << 10);
11296 	lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
11297 		     | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
11298 		     | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
11299 		     | ((relocation >> 1) & 0x7ff);
11300 
11301 	/* Put the relocated value back in the object file:  */
11302 	bfd_put_16 (input_bfd, upper_insn, hit_data);
11303 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11304 
11305 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11306       }
11307       break;
11308 
11309     case R_ARM_THM_JUMP19:
11310       /* Thumb32 conditional branch instruction.  */
11311       {
11312 	bfd_vma relocation;
11313 	bfd_boolean overflow = FALSE;
11314 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
11315 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
11316 	bfd_signed_vma reloc_signed_max = 0xffffe;
11317 	bfd_signed_vma reloc_signed_min = -0x100000;
11318 	bfd_signed_vma signed_check;
11319 	enum elf32_arm_stub_type stub_type = arm_stub_none;
11320 	struct elf32_arm_stub_hash_entry *stub_entry;
11321 	struct elf32_arm_link_hash_entry *hash;
11322 
11323 	/* Need to refetch the addend, reconstruct the top three bits,
11324 	   and squish the two 11 bit pieces together.  */
11325 	if (globals->use_rel)
11326 	  {
11327 	    bfd_vma S     = (upper_insn & 0x0400) >> 10;
11328 	    bfd_vma upper = (upper_insn & 0x003f);
11329 	    bfd_vma J1    = (lower_insn & 0x2000) >> 13;
11330 	    bfd_vma J2    = (lower_insn & 0x0800) >> 11;
11331 	    bfd_vma lower = (lower_insn & 0x07ff);
11332 
11333 	    upper |= J1 << 6;
11334 	    upper |= J2 << 7;
11335 	    upper |= (!S) << 8;
11336 	    upper -= 0x0100; /* Sign extend.  */
11337 
11338 	    addend = (upper << 12) | (lower << 1);
11339 	    signed_addend = addend;
11340 	  }
11341 
11342 	/* Handle calls via the PLT.  */
11343 	if (plt_offset != (bfd_vma) -1)
11344 	  {
11345 	    value = (splt->output_section->vma
11346 		     + splt->output_offset
11347 		     + plt_offset);
11348 	    /* Target the Thumb stub before the ARM PLT entry.  */
11349 	    value -= PLT_THUMB_STUB_SIZE;
11350 	    *unresolved_reloc_p = FALSE;
11351 	  }
11352 
11353 	hash = (struct elf32_arm_link_hash_entry *)h;
11354 
11355 	stub_type = arm_type_of_stub (info, input_section, rel,
11356 				      st_type, &branch_type,
11357 				      hash, value, sym_sec,
11358 				      input_bfd, sym_name);
11359 	if (stub_type != arm_stub_none)
11360 	  {
11361 	    stub_entry = elf32_arm_get_stub_entry (input_section,
11362 						   sym_sec, h,
11363 						   rel, globals,
11364 						   stub_type);
11365 	    if (stub_entry != NULL)
11366 	      {
11367 		value = (stub_entry->stub_offset
11368 			+ stub_entry->stub_sec->output_offset
11369 			+ stub_entry->stub_sec->output_section->vma);
11370 	      }
11371 	  }
11372 
11373 	relocation = value + signed_addend;
11374 	relocation -= (input_section->output_section->vma
11375 		       + input_section->output_offset
11376 		       + rel->r_offset);
11377 	signed_check = (bfd_signed_vma) relocation;
11378 
11379 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11380 	  overflow = TRUE;
11381 
11382 	/* Put RELOCATION back into the insn.  */
11383 	{
11384 	  bfd_vma S  = (relocation & 0x00100000) >> 20;
11385 	  bfd_vma J2 = (relocation & 0x00080000) >> 19;
11386 	  bfd_vma J1 = (relocation & 0x00040000) >> 18;
11387 	  bfd_vma hi = (relocation & 0x0003f000) >> 12;
11388 	  bfd_vma lo = (relocation & 0x00000ffe) >>  1;
11389 
11390 	  upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
11391 	  lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
11392 	}
11393 
11394 	/* Put the relocated value back in the object file:  */
11395 	bfd_put_16 (input_bfd, upper_insn, hit_data);
11396 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
11397 
11398 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
11399       }
11400 
11401     case R_ARM_THM_JUMP11:
11402     case R_ARM_THM_JUMP8:
11403     case R_ARM_THM_JUMP6:
11404       /* Thumb B (branch) instruction).  */
11405       {
11406 	bfd_signed_vma relocation;
11407 	bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
11408 	bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
11409 	bfd_signed_vma signed_check;
11410 
11411 	/* CZB cannot jump backward.  */
11412 	if (r_type == R_ARM_THM_JUMP6)
11413 	  reloc_signed_min = 0;
11414 
11415 	if (globals->use_rel)
11416 	  {
11417 	    /* Need to refetch addend.  */
11418 	    addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
11419 	    if (addend & ((howto->src_mask + 1) >> 1))
11420 	      {
11421 		signed_addend = -1;
11422 		signed_addend &= ~ howto->src_mask;
11423 		signed_addend |= addend;
11424 	      }
11425 	    else
11426 	      signed_addend = addend;
11427 	    /* The value in the insn has been right shifted.  We need to
11428 	       undo this, so that we can perform the address calculation
11429 	       in terms of bytes.  */
11430 	    signed_addend <<= howto->rightshift;
11431 	  }
11432 	relocation = value + signed_addend;
11433 
11434 	relocation -= (input_section->output_section->vma
11435 		       + input_section->output_offset
11436 		       + rel->r_offset);
11437 
11438 	relocation >>= howto->rightshift;
11439 	signed_check = relocation;
11440 
11441 	if (r_type == R_ARM_THM_JUMP6)
11442 	  relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
11443 	else
11444 	  relocation &= howto->dst_mask;
11445 	relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
11446 
11447 	bfd_put_16 (input_bfd, relocation, hit_data);
11448 
11449 	/* Assumes two's complement.  */
11450 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
11451 	  return bfd_reloc_overflow;
11452 
11453 	return bfd_reloc_ok;
11454       }
11455 
11456     case R_ARM_ALU_PCREL7_0:
11457     case R_ARM_ALU_PCREL15_8:
11458     case R_ARM_ALU_PCREL23_15:
11459       {
11460 	bfd_vma insn;
11461 	bfd_vma relocation;
11462 
11463 	insn = bfd_get_32 (input_bfd, hit_data);
11464 	if (globals->use_rel)
11465 	  {
11466 	    /* Extract the addend.  */
11467 	    addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
11468 	    signed_addend = addend;
11469 	  }
11470 	relocation = value + signed_addend;
11471 
11472 	relocation -= (input_section->output_section->vma
11473 		       + input_section->output_offset
11474 		       + rel->r_offset);
11475 	insn = (insn & ~0xfff)
11476 	       | ((howto->bitpos << 7) & 0xf00)
11477 	       | ((relocation >> howto->bitpos) & 0xff);
11478 	bfd_put_32 (input_bfd, value, hit_data);
11479       }
11480       return bfd_reloc_ok;
11481 
11482     case R_ARM_GNU_VTINHERIT:
11483     case R_ARM_GNU_VTENTRY:
11484       return bfd_reloc_ok;
11485 
11486     case R_ARM_GOTOFF32:
11487       /* Relocation is relative to the start of the
11488 	 global offset table.  */
11489 
11490       BFD_ASSERT (sgot != NULL);
11491       if (sgot == NULL)
11492 	return bfd_reloc_notsupported;
11493 
11494       /* If we are addressing a Thumb function, we need to adjust the
11495 	 address by one, so that attempts to call the function pointer will
11496 	 correctly interpret it as Thumb code.  */
11497       if (branch_type == ST_BRANCH_TO_THUMB)
11498 	value += 1;
11499 
11500       /* Note that sgot->output_offset is not involved in this
11501 	 calculation.  We always want the start of .got.  If we
11502 	 define _GLOBAL_OFFSET_TABLE in a different way, as is
11503 	 permitted by the ABI, we might have to change this
11504 	 calculation.  */
11505       value -= sgot->output_section->vma;
11506       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11507 				       contents, rel->r_offset, value,
11508 				       rel->r_addend);
11509 
11510     case R_ARM_GOTPC:
11511       /* Use global offset table as symbol value.  */
11512       BFD_ASSERT (sgot != NULL);
11513 
11514       if (sgot == NULL)
11515 	return bfd_reloc_notsupported;
11516 
11517       *unresolved_reloc_p = FALSE;
11518       value = sgot->output_section->vma;
11519       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11520 				       contents, rel->r_offset, value,
11521 				       rel->r_addend);
11522 
11523     case R_ARM_GOT32:
11524     case R_ARM_GOT_PREL:
11525       /* Relocation is to the entry for this symbol in the
11526 	 global offset table.  */
11527       if (sgot == NULL)
11528 	return bfd_reloc_notsupported;
11529 
11530       if (dynreloc_st_type == STT_GNU_IFUNC
11531 	  && plt_offset != (bfd_vma) -1
11532 	  && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
11533 	{
11534 	  /* We have a relocation against a locally-binding STT_GNU_IFUNC
11535 	     symbol, and the relocation resolves directly to the runtime
11536 	     target rather than to the .iplt entry.  This means that any
11537 	     .got entry would be the same value as the .igot.plt entry,
11538 	     so there's no point creating both.  */
11539 	  sgot = globals->root.igotplt;
11540 	  value = sgot->output_offset + gotplt_offset;
11541 	}
11542       else if (h != NULL)
11543 	{
11544 	  bfd_vma off;
11545 
11546 	  off = h->got.offset;
11547 	  BFD_ASSERT (off != (bfd_vma) -1);
11548 	  if ((off & 1) != 0)
11549 	    {
11550 	      /* We have already processsed one GOT relocation against
11551 		 this symbol.  */
11552 	      off &= ~1;
11553 	      if (globals->root.dynamic_sections_created
11554 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11555 		*unresolved_reloc_p = FALSE;
11556 	    }
11557 	  else
11558 	    {
11559 	      Elf_Internal_Rela outrel;
11560 	      int isrofixup = 0;
11561 
11562 	      if (((h->dynindx != -1) || globals->fdpic_p)
11563 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
11564 		{
11565 		  /* If the symbol doesn't resolve locally in a static
11566 		     object, we have an undefined reference.  If the
11567 		     symbol doesn't resolve locally in a dynamic object,
11568 		     it should be resolved by the dynamic linker.  */
11569 		  if (globals->root.dynamic_sections_created)
11570 		    {
11571 		      outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
11572 		      *unresolved_reloc_p = FALSE;
11573 		    }
11574 		  else
11575 		    outrel.r_info = 0;
11576 		  outrel.r_addend = 0;
11577 		}
11578 	      else
11579 		{
11580 		  if (dynreloc_st_type == STT_GNU_IFUNC)
11581 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11582 		  else if (bfd_link_pic (info)
11583 			   && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
11584 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11585 		  else
11586 		    {
11587 		      outrel.r_info = 0;
11588 		      if (globals->fdpic_p)
11589 			isrofixup = 1;
11590 		    }
11591 		  outrel.r_addend = dynreloc_value;
11592 		}
11593 
11594 	      /* The GOT entry is initialized to zero by default.
11595 		 See if we should install a different value.  */
11596 	      if (outrel.r_addend != 0
11597 		  && (globals->use_rel || outrel.r_info == 0))
11598 		{
11599 		  bfd_put_32 (output_bfd, outrel.r_addend,
11600 			      sgot->contents + off);
11601 		  outrel.r_addend = 0;
11602 		}
11603 
11604 	      if (isrofixup)
11605 		arm_elf_add_rofixup (output_bfd,
11606 				     elf32_arm_hash_table(info)->srofixup,
11607 				     sgot->output_section->vma
11608 				     + sgot->output_offset + off);
11609 
11610 	      else if (outrel.r_info != 0)
11611 		{
11612 		  outrel.r_offset = (sgot->output_section->vma
11613 				     + sgot->output_offset
11614 				     + off);
11615 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11616 		}
11617 
11618 	      h->got.offset |= 1;
11619 	    }
11620 	  value = sgot->output_offset + off;
11621 	}
11622       else
11623 	{
11624 	  bfd_vma off;
11625 
11626 	  BFD_ASSERT (local_got_offsets != NULL
11627 		      && local_got_offsets[r_symndx] != (bfd_vma) -1);
11628 
11629 	  off = local_got_offsets[r_symndx];
11630 
11631 	  /* The offset must always be a multiple of 4.  We use the
11632 	     least significant bit to record whether we have already
11633 	     generated the necessary reloc.  */
11634 	  if ((off & 1) != 0)
11635 	    off &= ~1;
11636 	  else
11637 	    {
11638 	      Elf_Internal_Rela outrel;
11639 	      int isrofixup = 0;
11640 
11641 	      if (dynreloc_st_type == STT_GNU_IFUNC)
11642 		outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
11643 	      else if (bfd_link_pic (info))
11644 		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
11645 	      else
11646 		{
11647 		  outrel.r_info = 0;
11648 		  if (globals->fdpic_p)
11649 		    isrofixup = 1;
11650 		}
11651 
11652 	      /* The GOT entry is initialized to zero by default.
11653 		 See if we should install a different value.  */
11654 	      if (globals->use_rel || outrel.r_info == 0)
11655 		bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
11656 
11657 	      if (isrofixup)
11658 		arm_elf_add_rofixup (output_bfd,
11659 				     globals->srofixup,
11660 				     sgot->output_section->vma
11661 				     + sgot->output_offset + off);
11662 
11663 	      else if (outrel.r_info != 0)
11664 		{
11665 		  outrel.r_addend = addend + dynreloc_value;
11666 		  outrel.r_offset = (sgot->output_section->vma
11667 				     + sgot->output_offset
11668 				     + off);
11669 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11670 		}
11671 
11672 	      local_got_offsets[r_symndx] |= 1;
11673 	    }
11674 
11675 	  value = sgot->output_offset + off;
11676 	}
11677       if (r_type != R_ARM_GOT32)
11678 	value += sgot->output_section->vma;
11679 
11680       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11681 				       contents, rel->r_offset, value,
11682 				       rel->r_addend);
11683 
11684     case R_ARM_TLS_LDO32:
11685       value = value - dtpoff_base (info);
11686 
11687       return _bfd_final_link_relocate (howto, input_bfd, input_section,
11688 				       contents, rel->r_offset, value,
11689 				       rel->r_addend);
11690 
11691     case R_ARM_TLS_LDM32:
11692     case R_ARM_TLS_LDM32_FDPIC:
11693       {
11694 	bfd_vma off;
11695 
11696 	if (sgot == NULL)
11697 	  abort ();
11698 
11699 	off = globals->tls_ldm_got.offset;
11700 
11701 	if ((off & 1) != 0)
11702 	  off &= ~1;
11703 	else
11704 	  {
11705 	    /* If we don't know the module number, create a relocation
11706 	       for it.  */
11707 	    if (bfd_link_dll (info))
11708 	      {
11709 		Elf_Internal_Rela outrel;
11710 
11711 		if (srelgot == NULL)
11712 		  abort ();
11713 
11714 		outrel.r_addend = 0;
11715 		outrel.r_offset = (sgot->output_section->vma
11716 				   + sgot->output_offset + off);
11717 		outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
11718 
11719 		if (globals->use_rel)
11720 		  bfd_put_32 (output_bfd, outrel.r_addend,
11721 			      sgot->contents + off);
11722 
11723 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11724 	      }
11725 	    else
11726 	      bfd_put_32 (output_bfd, 1, sgot->contents + off);
11727 
11728 	    globals->tls_ldm_got.offset |= 1;
11729 	  }
11730 
11731 	if (r_type == R_ARM_TLS_LDM32_FDPIC)
11732 	  {
11733 	    bfd_put_32(output_bfd,
11734 		       globals->root.sgot->output_offset + off,
11735 		       contents + rel->r_offset);
11736 
11737 	    return bfd_reloc_ok;
11738 	  }
11739 	else
11740 	  {
11741 	    value = sgot->output_section->vma + sgot->output_offset + off
11742 	      - (input_section->output_section->vma
11743 		 + input_section->output_offset + rel->r_offset);
11744 
11745 	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
11746 					     contents, rel->r_offset, value,
11747 					     rel->r_addend);
11748 	  }
11749       }
11750 
11751     case R_ARM_TLS_CALL:
11752     case R_ARM_THM_TLS_CALL:
11753     case R_ARM_TLS_GD32:
11754     case R_ARM_TLS_GD32_FDPIC:
11755     case R_ARM_TLS_IE32:
11756     case R_ARM_TLS_IE32_FDPIC:
11757     case R_ARM_TLS_GOTDESC:
11758     case R_ARM_TLS_DESCSEQ:
11759     case R_ARM_THM_TLS_DESCSEQ:
11760       {
11761 	bfd_vma off, offplt;
11762 	int indx = 0;
11763 	char tls_type;
11764 
11765 	BFD_ASSERT (sgot != NULL);
11766 
11767 	if (h != NULL)
11768 	  {
11769 	    bfd_boolean dyn;
11770 	    dyn = globals->root.dynamic_sections_created;
11771 	    if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
11772 						 bfd_link_pic (info),
11773 						 h)
11774 		&& (!bfd_link_pic (info)
11775 		    || !SYMBOL_REFERENCES_LOCAL (info, h)))
11776 	      {
11777 		*unresolved_reloc_p = FALSE;
11778 		indx = h->dynindx;
11779 	      }
11780 	    off = h->got.offset;
11781 	    offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
11782 	    tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
11783 	  }
11784 	else
11785 	  {
11786 	    BFD_ASSERT (local_got_offsets != NULL);
11787 	    off = local_got_offsets[r_symndx];
11788 	    offplt = local_tlsdesc_gotents[r_symndx];
11789 	    tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
11790 	  }
11791 
11792 	/* Linker relaxations happens from one of the
11793 	   R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE.  */
11794 	if (ELF32_R_TYPE(rel->r_info) != r_type)
11795 	  tls_type = GOT_TLS_IE;
11796 
11797 	BFD_ASSERT (tls_type != GOT_UNKNOWN);
11798 
11799 	if ((off & 1) != 0)
11800 	  off &= ~1;
11801 	else
11802 	  {
11803 	    bfd_boolean need_relocs = FALSE;
11804 	    Elf_Internal_Rela outrel;
11805 	    int cur_off = off;
11806 
11807 	    /* The GOT entries have not been initialized yet.  Do it
11808 	       now, and emit any relocations.  If both an IE GOT and a
11809 	       GD GOT are necessary, we emit the GD first.  */
11810 
11811 	    if ((bfd_link_dll (info) || indx != 0)
11812 		&& (h == NULL
11813 		    || (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
11814 			&& !resolved_to_zero)
11815 		    || h->root.type != bfd_link_hash_undefweak))
11816 	      {
11817 		need_relocs = TRUE;
11818 		BFD_ASSERT (srelgot != NULL);
11819 	      }
11820 
11821 	    if (tls_type & GOT_TLS_GDESC)
11822 	      {
11823 		bfd_byte *loc;
11824 
11825 		/* We should have relaxed, unless this is an undefined
11826 		   weak symbol.  */
11827 		BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
11828 			    || bfd_link_dll (info));
11829 		BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
11830 			    <= globals->root.sgotplt->size);
11831 
11832 		outrel.r_addend = 0;
11833 		outrel.r_offset = (globals->root.sgotplt->output_section->vma
11834 				   + globals->root.sgotplt->output_offset
11835 				   + offplt
11836 				   + globals->sgotplt_jump_table_size);
11837 
11838 		outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
11839 		sreloc = globals->root.srelplt;
11840 		loc = sreloc->contents;
11841 		loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
11842 		BFD_ASSERT (loc + RELOC_SIZE (globals)
11843 			   <= sreloc->contents + sreloc->size);
11844 
11845 		SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
11846 
11847 		/* For globals, the first word in the relocation gets
11848 		   the relocation index and the top bit set, or zero,
11849 		   if we're binding now.  For locals, it gets the
11850 		   symbol's offset in the tls section.  */
11851 		bfd_put_32 (output_bfd,
11852 			    !h ? value - elf_hash_table (info)->tls_sec->vma
11853 			    : info->flags & DF_BIND_NOW ? 0
11854 			    : 0x80000000 | ELF32_R_SYM (outrel.r_info),
11855 			    globals->root.sgotplt->contents + offplt
11856 			    + globals->sgotplt_jump_table_size);
11857 
11858 		/* Second word in the relocation is always zero.  */
11859 		bfd_put_32 (output_bfd, 0,
11860 			    globals->root.sgotplt->contents + offplt
11861 			    + globals->sgotplt_jump_table_size + 4);
11862 	      }
11863 	    if (tls_type & GOT_TLS_GD)
11864 	      {
11865 		if (need_relocs)
11866 		  {
11867 		    outrel.r_addend = 0;
11868 		    outrel.r_offset = (sgot->output_section->vma
11869 				       + sgot->output_offset
11870 				       + cur_off);
11871 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
11872 
11873 		    if (globals->use_rel)
11874 		      bfd_put_32 (output_bfd, outrel.r_addend,
11875 				  sgot->contents + cur_off);
11876 
11877 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11878 
11879 		    if (indx == 0)
11880 		      bfd_put_32 (output_bfd, value - dtpoff_base (info),
11881 				  sgot->contents + cur_off + 4);
11882 		    else
11883 		      {
11884 			outrel.r_addend = 0;
11885 			outrel.r_info = ELF32_R_INFO (indx,
11886 						      R_ARM_TLS_DTPOFF32);
11887 			outrel.r_offset += 4;
11888 
11889 			if (globals->use_rel)
11890 			  bfd_put_32 (output_bfd, outrel.r_addend,
11891 				      sgot->contents + cur_off + 4);
11892 
11893 			elf32_arm_add_dynreloc (output_bfd, info,
11894 						srelgot, &outrel);
11895 		      }
11896 		  }
11897 		else
11898 		  {
11899 		    /* If we are not emitting relocations for a
11900 		       general dynamic reference, then we must be in a
11901 		       static link or an executable link with the
11902 		       symbol binding locally.  Mark it as belonging
11903 		       to module 1, the executable.  */
11904 		    bfd_put_32 (output_bfd, 1,
11905 				sgot->contents + cur_off);
11906 		    bfd_put_32 (output_bfd, value - dtpoff_base (info),
11907 				sgot->contents + cur_off + 4);
11908 		  }
11909 
11910 		cur_off += 8;
11911 	      }
11912 
11913 	    if (tls_type & GOT_TLS_IE)
11914 	      {
11915 		if (need_relocs)
11916 		  {
11917 		    if (indx == 0)
11918 		      outrel.r_addend = value - dtpoff_base (info);
11919 		    else
11920 		      outrel.r_addend = 0;
11921 		    outrel.r_offset = (sgot->output_section->vma
11922 				       + sgot->output_offset
11923 				       + cur_off);
11924 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
11925 
11926 		    if (globals->use_rel)
11927 		      bfd_put_32 (output_bfd, outrel.r_addend,
11928 				  sgot->contents + cur_off);
11929 
11930 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
11931 		  }
11932 		else
11933 		  bfd_put_32 (output_bfd, tpoff (info, value),
11934 			      sgot->contents + cur_off);
11935 		cur_off += 4;
11936 	      }
11937 
11938 	    if (h != NULL)
11939 	      h->got.offset |= 1;
11940 	    else
11941 	      local_got_offsets[r_symndx] |= 1;
11942 	  }
11943 
11944 	if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32 && r_type != R_ARM_TLS_GD32_FDPIC)
11945 	  off += 8;
11946 	else if (tls_type & GOT_TLS_GDESC)
11947 	  off = offplt;
11948 
11949 	if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
11950 	    || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
11951 	  {
11952 	    bfd_signed_vma offset;
11953 	    /* TLS stubs are arm mode.  The original symbol is a
11954 	       data object, so branch_type is bogus.  */
11955 	    branch_type = ST_BRANCH_TO_ARM;
11956 	    enum elf32_arm_stub_type stub_type
11957 	      = arm_type_of_stub (info, input_section, rel,
11958 				  st_type, &branch_type,
11959 				  (struct elf32_arm_link_hash_entry *)h,
11960 				  globals->tls_trampoline, globals->root.splt,
11961 				  input_bfd, sym_name);
11962 
11963 	    if (stub_type != arm_stub_none)
11964 	      {
11965 		struct elf32_arm_stub_hash_entry *stub_entry
11966 		  = elf32_arm_get_stub_entry
11967 		  (input_section, globals->root.splt, 0, rel,
11968 		   globals, stub_type);
11969 		offset = (stub_entry->stub_offset
11970 			  + stub_entry->stub_sec->output_offset
11971 			  + stub_entry->stub_sec->output_section->vma);
11972 	      }
11973 	    else
11974 	      offset = (globals->root.splt->output_section->vma
11975 			+ globals->root.splt->output_offset
11976 			+ globals->tls_trampoline);
11977 
11978 	    if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
11979 	      {
11980 		unsigned long inst;
11981 
11982 		offset -= (input_section->output_section->vma
11983 			   + input_section->output_offset
11984 			   + rel->r_offset + 8);
11985 
11986 		inst = offset >> 2;
11987 		inst &= 0x00ffffff;
11988 		value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
11989 	      }
11990 	    else
11991 	      {
11992 		/* Thumb blx encodes the offset in a complicated
11993 		   fashion.  */
11994 		unsigned upper_insn, lower_insn;
11995 		unsigned neg;
11996 
11997 		offset -= (input_section->output_section->vma
11998 			   + input_section->output_offset
11999 			   + rel->r_offset + 4);
12000 
12001 		if (stub_type != arm_stub_none
12002 		    && arm_stub_is_thumb (stub_type))
12003 		  {
12004 		    lower_insn = 0xd000;
12005 		  }
12006 		else
12007 		  {
12008 		    lower_insn = 0xc000;
12009 		    /* Round up the offset to a word boundary.  */
12010 		    offset = (offset + 2) & ~2;
12011 		  }
12012 
12013 		neg = offset < 0;
12014 		upper_insn = (0xf000
12015 			      | ((offset >> 12) & 0x3ff)
12016 			      | (neg << 10));
12017 		lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
12018 			      | (((!((offset >> 22) & 1)) ^ neg) << 11)
12019 			      | ((offset >> 1) & 0x7ff);
12020 		bfd_put_16 (input_bfd, upper_insn, hit_data);
12021 		bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12022 		return bfd_reloc_ok;
12023 	      }
12024 	  }
12025 	/* These relocations needs special care, as besides the fact
12026 	   they point somewhere in .gotplt, the addend must be
12027 	   adjusted accordingly depending on the type of instruction
12028 	   we refer to.  */
12029 	else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
12030 	  {
12031 	    unsigned long data, insn;
12032 	    unsigned thumb;
12033 
12034 	    data = bfd_get_signed_32 (input_bfd, hit_data);
12035 	    thumb = data & 1;
12036 	    data &= ~1ul;
12037 
12038 	    if (thumb)
12039 	      {
12040 		insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
12041 		if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
12042 		  insn = (insn << 16)
12043 		    | bfd_get_16 (input_bfd,
12044 				  contents + rel->r_offset - data + 2);
12045 		if ((insn & 0xf800c000) == 0xf000c000)
12046 		  /* bl/blx */
12047 		  value = -6;
12048 		else if ((insn & 0xffffff00) == 0x4400)
12049 		  /* add */
12050 		  value = -5;
12051 		else
12052 		  {
12053 		    _bfd_error_handler
12054 		      /* xgettext:c-format */
12055 		      (_("%pB(%pA+%#" PRIx64 "): "
12056 			 "unexpected %s instruction '%#lx' "
12057 			 "referenced by TLS_GOTDESC"),
12058 		       input_bfd, input_section, (uint64_t) rel->r_offset,
12059 		       "Thumb", insn);
12060 		    return bfd_reloc_notsupported;
12061 		  }
12062 	      }
12063 	    else
12064 	      {
12065 		insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
12066 
12067 		switch (insn >> 24)
12068 		  {
12069 		  case 0xeb:  /* bl */
12070 		  case 0xfa:  /* blx */
12071 		    value = -4;
12072 		    break;
12073 
12074 		  case 0xe0:	/* add */
12075 		    value = -8;
12076 		    break;
12077 
12078 		  default:
12079 		    _bfd_error_handler
12080 		      /* xgettext:c-format */
12081 		      (_("%pB(%pA+%#" PRIx64 "): "
12082 			 "unexpected %s instruction '%#lx' "
12083 			 "referenced by TLS_GOTDESC"),
12084 		       input_bfd, input_section, (uint64_t) rel->r_offset,
12085 		       "ARM", insn);
12086 		    return bfd_reloc_notsupported;
12087 		  }
12088 	      }
12089 
12090 	    value += ((globals->root.sgotplt->output_section->vma
12091 		       + globals->root.sgotplt->output_offset + off)
12092 		      - (input_section->output_section->vma
12093 			 + input_section->output_offset
12094 			 + rel->r_offset)
12095 		      + globals->sgotplt_jump_table_size);
12096 	  }
12097 	else
12098 	  value = ((globals->root.sgot->output_section->vma
12099 		    + globals->root.sgot->output_offset + off)
12100 		   - (input_section->output_section->vma
12101 		      + input_section->output_offset + rel->r_offset));
12102 
12103 	if (globals->fdpic_p && (r_type == R_ARM_TLS_GD32_FDPIC ||
12104 				 r_type == R_ARM_TLS_IE32_FDPIC))
12105 	  {
12106 	    /* For FDPIC relocations, resolve to the offset of the GOT
12107 	       entry from the start of GOT.  */
12108 	    bfd_put_32(output_bfd,
12109 		       globals->root.sgot->output_offset + off,
12110 		       contents + rel->r_offset);
12111 
12112 	    return bfd_reloc_ok;
12113 	  }
12114 	else
12115 	  {
12116 	    return _bfd_final_link_relocate (howto, input_bfd, input_section,
12117 					     contents, rel->r_offset, value,
12118 					     rel->r_addend);
12119 	  }
12120       }
12121 
12122     case R_ARM_TLS_LE32:
12123       if (bfd_link_dll (info))
12124 	{
12125 	  _bfd_error_handler
12126 	    /* xgettext:c-format */
12127 	    (_("%pB(%pA+%#" PRIx64 "): %s relocation not permitted "
12128 	       "in shared object"),
12129 	     input_bfd, input_section, (uint64_t) rel->r_offset, howto->name);
12130 	  return bfd_reloc_notsupported;
12131 	}
12132       else
12133 	value = tpoff (info, value);
12134 
12135       return _bfd_final_link_relocate (howto, input_bfd, input_section,
12136 				       contents, rel->r_offset, value,
12137 				       rel->r_addend);
12138 
12139     case R_ARM_V4BX:
12140       if (globals->fix_v4bx)
12141 	{
12142 	  bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12143 
12144 	  /* Ensure that we have a BX instruction.  */
12145 	  BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
12146 
12147 	  if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
12148 	    {
12149 	      /* Branch to veneer.  */
12150 	      bfd_vma glue_addr;
12151 	      glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
12152 	      glue_addr -= input_section->output_section->vma
12153 			   + input_section->output_offset
12154 			   + rel->r_offset + 8;
12155 	      insn = (insn & 0xf0000000) | 0x0a000000
12156 		     | ((glue_addr >> 2) & 0x00ffffff);
12157 	    }
12158 	  else
12159 	    {
12160 	      /* Preserve Rm (lowest four bits) and the condition code
12161 		 (highest four bits). Other bits encode MOV PC,Rm.  */
12162 	      insn = (insn & 0xf000000f) | 0x01a0f000;
12163 	    }
12164 
12165 	  bfd_put_32 (input_bfd, insn, hit_data);
12166 	}
12167       return bfd_reloc_ok;
12168 
12169     case R_ARM_MOVW_ABS_NC:
12170     case R_ARM_MOVT_ABS:
12171     case R_ARM_MOVW_PREL_NC:
12172     case R_ARM_MOVT_PREL:
12173     /* Until we properly support segment-base-relative addressing then
12174        we assume the segment base to be zero, as for the group relocations.
12175        Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
12176        and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS.  */
12177     case R_ARM_MOVW_BREL_NC:
12178     case R_ARM_MOVW_BREL:
12179     case R_ARM_MOVT_BREL:
12180       {
12181 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12182 
12183 	if (globals->use_rel)
12184 	  {
12185 	    addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
12186 	    signed_addend = (addend ^ 0x8000) - 0x8000;
12187 	  }
12188 
12189 	value += signed_addend;
12190 
12191 	if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
12192 	  value -= (input_section->output_section->vma
12193 		    + input_section->output_offset + rel->r_offset);
12194 
12195 	if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
12196 	  return bfd_reloc_overflow;
12197 
12198 	if (branch_type == ST_BRANCH_TO_THUMB)
12199 	  value |= 1;
12200 
12201 	if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
12202 	    || r_type == R_ARM_MOVT_BREL)
12203 	  value >>= 16;
12204 
12205 	insn &= 0xfff0f000;
12206 	insn |= value & 0xfff;
12207 	insn |= (value & 0xf000) << 4;
12208 	bfd_put_32 (input_bfd, insn, hit_data);
12209       }
12210       return bfd_reloc_ok;
12211 
12212     case R_ARM_THM_MOVW_ABS_NC:
12213     case R_ARM_THM_MOVT_ABS:
12214     case R_ARM_THM_MOVW_PREL_NC:
12215     case R_ARM_THM_MOVT_PREL:
12216     /* Until we properly support segment-base-relative addressing then
12217        we assume the segment base to be zero, as for the above relocations.
12218        Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
12219        R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
12220        as R_ARM_THM_MOVT_ABS.  */
12221     case R_ARM_THM_MOVW_BREL_NC:
12222     case R_ARM_THM_MOVW_BREL:
12223     case R_ARM_THM_MOVT_BREL:
12224       {
12225 	bfd_vma insn;
12226 
12227 	insn = bfd_get_16 (input_bfd, hit_data) << 16;
12228 	insn |= bfd_get_16 (input_bfd, hit_data + 2);
12229 
12230 	if (globals->use_rel)
12231 	  {
12232 	    addend = ((insn >> 4)  & 0xf000)
12233 		   | ((insn >> 15) & 0x0800)
12234 		   | ((insn >> 4)  & 0x0700)
12235 		   | (insn	   & 0x00ff);
12236 	    signed_addend = (addend ^ 0x8000) - 0x8000;
12237 	  }
12238 
12239 	value += signed_addend;
12240 
12241 	if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
12242 	  value -= (input_section->output_section->vma
12243 		    + input_section->output_offset + rel->r_offset);
12244 
12245 	if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
12246 	  return bfd_reloc_overflow;
12247 
12248 	if (branch_type == ST_BRANCH_TO_THUMB)
12249 	  value |= 1;
12250 
12251 	if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
12252 	    || r_type == R_ARM_THM_MOVT_BREL)
12253 	  value >>= 16;
12254 
12255 	insn &= 0xfbf08f00;
12256 	insn |= (value & 0xf000) << 4;
12257 	insn |= (value & 0x0800) << 15;
12258 	insn |= (value & 0x0700) << 4;
12259 	insn |= (value & 0x00ff);
12260 
12261 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
12262 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
12263       }
12264       return bfd_reloc_ok;
12265 
12266     case R_ARM_ALU_PC_G0_NC:
12267     case R_ARM_ALU_PC_G1_NC:
12268     case R_ARM_ALU_PC_G0:
12269     case R_ARM_ALU_PC_G1:
12270     case R_ARM_ALU_PC_G2:
12271     case R_ARM_ALU_SB_G0_NC:
12272     case R_ARM_ALU_SB_G1_NC:
12273     case R_ARM_ALU_SB_G0:
12274     case R_ARM_ALU_SB_G1:
12275     case R_ARM_ALU_SB_G2:
12276       {
12277 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12278 	bfd_vma pc = input_section->output_section->vma
12279 		     + input_section->output_offset + rel->r_offset;
12280 	/* sb is the origin of the *segment* containing the symbol.  */
12281 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12282 	bfd_vma residual;
12283 	bfd_vma g_n;
12284 	bfd_signed_vma signed_value;
12285 	int group = 0;
12286 
12287 	/* Determine which group of bits to select.  */
12288 	switch (r_type)
12289 	  {
12290 	  case R_ARM_ALU_PC_G0_NC:
12291 	  case R_ARM_ALU_PC_G0:
12292 	  case R_ARM_ALU_SB_G0_NC:
12293 	  case R_ARM_ALU_SB_G0:
12294 	    group = 0;
12295 	    break;
12296 
12297 	  case R_ARM_ALU_PC_G1_NC:
12298 	  case R_ARM_ALU_PC_G1:
12299 	  case R_ARM_ALU_SB_G1_NC:
12300 	  case R_ARM_ALU_SB_G1:
12301 	    group = 1;
12302 	    break;
12303 
12304 	  case R_ARM_ALU_PC_G2:
12305 	  case R_ARM_ALU_SB_G2:
12306 	    group = 2;
12307 	    break;
12308 
12309 	  default:
12310 	    abort ();
12311 	  }
12312 
12313 	/* If REL, extract the addend from the insn.  If RELA, it will
12314 	   have already been fetched for us.  */
12315 	if (globals->use_rel)
12316 	  {
12317 	    int negative;
12318 	    bfd_vma constant = insn & 0xff;
12319 	    bfd_vma rotation = (insn & 0xf00) >> 8;
12320 
12321 	    if (rotation == 0)
12322 	      signed_addend = constant;
12323 	    else
12324 	      {
12325 		/* Compensate for the fact that in the instruction, the
12326 		   rotation is stored in multiples of 2 bits.  */
12327 		rotation *= 2;
12328 
12329 		/* Rotate "constant" right by "rotation" bits.  */
12330 		signed_addend = (constant >> rotation) |
12331 				(constant << (8 * sizeof (bfd_vma) - rotation));
12332 	      }
12333 
12334 	    /* Determine if the instruction is an ADD or a SUB.
12335 	       (For REL, this determines the sign of the addend.)  */
12336 	    negative = identify_add_or_sub (insn);
12337 	    if (negative == 0)
12338 	      {
12339 		_bfd_error_handler
12340 		  /* xgettext:c-format */
12341 		  (_("%pB(%pA+%#" PRIx64 "): only ADD or SUB instructions "
12342 		     "are allowed for ALU group relocations"),
12343 		  input_bfd, input_section, (uint64_t) rel->r_offset);
12344 		return bfd_reloc_overflow;
12345 	      }
12346 
12347 	    signed_addend *= negative;
12348 	  }
12349 
12350 	/* Compute the value (X) to go in the place.  */
12351 	if (r_type == R_ARM_ALU_PC_G0_NC
12352 	    || r_type == R_ARM_ALU_PC_G1_NC
12353 	    || r_type == R_ARM_ALU_PC_G0
12354 	    || r_type == R_ARM_ALU_PC_G1
12355 	    || r_type == R_ARM_ALU_PC_G2)
12356 	  /* PC relative.  */
12357 	  signed_value = value - pc + signed_addend;
12358 	else
12359 	  /* Section base relative.  */
12360 	  signed_value = value - sb + signed_addend;
12361 
12362 	/* If the target symbol is a Thumb function, then set the
12363 	   Thumb bit in the address.  */
12364 	if (branch_type == ST_BRANCH_TO_THUMB)
12365 	  signed_value |= 1;
12366 
12367 	/* Calculate the value of the relevant G_n, in encoded
12368 	   constant-with-rotation format.  */
12369 	g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12370 					  group, &residual);
12371 
12372 	/* Check for overflow if required.  */
12373 	if ((r_type == R_ARM_ALU_PC_G0
12374 	     || r_type == R_ARM_ALU_PC_G1
12375 	     || r_type == R_ARM_ALU_PC_G2
12376 	     || r_type == R_ARM_ALU_SB_G0
12377 	     || r_type == R_ARM_ALU_SB_G1
12378 	     || r_type == R_ARM_ALU_SB_G2) && residual != 0)
12379 	  {
12380 	    _bfd_error_handler
12381 	      /* xgettext:c-format */
12382 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12383 		 "splitting %#" PRIx64 " for group relocation %s"),
12384 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12385 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12386 	       howto->name);
12387 	    return bfd_reloc_overflow;
12388 	  }
12389 
12390 	/* Mask out the value and the ADD/SUB part of the opcode; take care
12391 	   not to destroy the S bit.  */
12392 	insn &= 0xff1ff000;
12393 
12394 	/* Set the opcode according to whether the value to go in the
12395 	   place is negative.  */
12396 	if (signed_value < 0)
12397 	  insn |= 1 << 22;
12398 	else
12399 	  insn |= 1 << 23;
12400 
12401 	/* Encode the offset.  */
12402 	insn |= g_n;
12403 
12404 	bfd_put_32 (input_bfd, insn, hit_data);
12405       }
12406       return bfd_reloc_ok;
12407 
12408     case R_ARM_LDR_PC_G0:
12409     case R_ARM_LDR_PC_G1:
12410     case R_ARM_LDR_PC_G2:
12411     case R_ARM_LDR_SB_G0:
12412     case R_ARM_LDR_SB_G1:
12413     case R_ARM_LDR_SB_G2:
12414       {
12415 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12416 	bfd_vma pc = input_section->output_section->vma
12417 		     + input_section->output_offset + rel->r_offset;
12418 	/* sb is the origin of the *segment* containing the symbol.  */
12419 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12420 	bfd_vma residual;
12421 	bfd_signed_vma signed_value;
12422 	int group = 0;
12423 
12424 	/* Determine which groups of bits to calculate.  */
12425 	switch (r_type)
12426 	  {
12427 	  case R_ARM_LDR_PC_G0:
12428 	  case R_ARM_LDR_SB_G0:
12429 	    group = 0;
12430 	    break;
12431 
12432 	  case R_ARM_LDR_PC_G1:
12433 	  case R_ARM_LDR_SB_G1:
12434 	    group = 1;
12435 	    break;
12436 
12437 	  case R_ARM_LDR_PC_G2:
12438 	  case R_ARM_LDR_SB_G2:
12439 	    group = 2;
12440 	    break;
12441 
12442 	  default:
12443 	    abort ();
12444 	  }
12445 
12446 	/* If REL, extract the addend from the insn.  If RELA, it will
12447 	   have already been fetched for us.  */
12448 	if (globals->use_rel)
12449 	  {
12450 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12451 	    signed_addend = negative * (insn & 0xfff);
12452 	  }
12453 
12454 	/* Compute the value (X) to go in the place.  */
12455 	if (r_type == R_ARM_LDR_PC_G0
12456 	    || r_type == R_ARM_LDR_PC_G1
12457 	    || r_type == R_ARM_LDR_PC_G2)
12458 	  /* PC relative.  */
12459 	  signed_value = value - pc + signed_addend;
12460 	else
12461 	  /* Section base relative.  */
12462 	  signed_value = value - sb + signed_addend;
12463 
12464 	/* Calculate the value of the relevant G_{n-1} to obtain
12465 	   the residual at that stage.  */
12466 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12467 				    group - 1, &residual);
12468 
12469 	/* Check for overflow.  */
12470 	if (residual >= 0x1000)
12471 	  {
12472 	    _bfd_error_handler
12473 	      /* xgettext:c-format */
12474 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12475 		 "splitting %#" PRIx64 " for group relocation %s"),
12476 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12477 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12478 	       howto->name);
12479 	    return bfd_reloc_overflow;
12480 	  }
12481 
12482 	/* Mask out the value and U bit.  */
12483 	insn &= 0xff7ff000;
12484 
12485 	/* Set the U bit if the value to go in the place is non-negative.  */
12486 	if (signed_value >= 0)
12487 	  insn |= 1 << 23;
12488 
12489 	/* Encode the offset.  */
12490 	insn |= residual;
12491 
12492 	bfd_put_32 (input_bfd, insn, hit_data);
12493       }
12494       return bfd_reloc_ok;
12495 
12496     case R_ARM_LDRS_PC_G0:
12497     case R_ARM_LDRS_PC_G1:
12498     case R_ARM_LDRS_PC_G2:
12499     case R_ARM_LDRS_SB_G0:
12500     case R_ARM_LDRS_SB_G1:
12501     case R_ARM_LDRS_SB_G2:
12502       {
12503 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12504 	bfd_vma pc = input_section->output_section->vma
12505 		     + input_section->output_offset + rel->r_offset;
12506 	/* sb is the origin of the *segment* containing the symbol.  */
12507 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12508 	bfd_vma residual;
12509 	bfd_signed_vma signed_value;
12510 	int group = 0;
12511 
12512 	/* Determine which groups of bits to calculate.  */
12513 	switch (r_type)
12514 	  {
12515 	  case R_ARM_LDRS_PC_G0:
12516 	  case R_ARM_LDRS_SB_G0:
12517 	    group = 0;
12518 	    break;
12519 
12520 	  case R_ARM_LDRS_PC_G1:
12521 	  case R_ARM_LDRS_SB_G1:
12522 	    group = 1;
12523 	    break;
12524 
12525 	  case R_ARM_LDRS_PC_G2:
12526 	  case R_ARM_LDRS_SB_G2:
12527 	    group = 2;
12528 	    break;
12529 
12530 	  default:
12531 	    abort ();
12532 	  }
12533 
12534 	/* If REL, extract the addend from the insn.  If RELA, it will
12535 	   have already been fetched for us.  */
12536 	if (globals->use_rel)
12537 	  {
12538 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12539 	    signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
12540 	  }
12541 
12542 	/* Compute the value (X) to go in the place.  */
12543 	if (r_type == R_ARM_LDRS_PC_G0
12544 	    || r_type == R_ARM_LDRS_PC_G1
12545 	    || r_type == R_ARM_LDRS_PC_G2)
12546 	  /* PC relative.  */
12547 	  signed_value = value - pc + signed_addend;
12548 	else
12549 	  /* Section base relative.  */
12550 	  signed_value = value - sb + signed_addend;
12551 
12552 	/* Calculate the value of the relevant G_{n-1} to obtain
12553 	   the residual at that stage.  */
12554 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12555 				    group - 1, &residual);
12556 
12557 	/* Check for overflow.  */
12558 	if (residual >= 0x100)
12559 	  {
12560 	    _bfd_error_handler
12561 	      /* xgettext:c-format */
12562 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12563 		 "splitting %#" PRIx64 " for group relocation %s"),
12564 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12565 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12566 	       howto->name);
12567 	    return bfd_reloc_overflow;
12568 	  }
12569 
12570 	/* Mask out the value and U bit.  */
12571 	insn &= 0xff7ff0f0;
12572 
12573 	/* Set the U bit if the value to go in the place is non-negative.  */
12574 	if (signed_value >= 0)
12575 	  insn |= 1 << 23;
12576 
12577 	/* Encode the offset.  */
12578 	insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
12579 
12580 	bfd_put_32 (input_bfd, insn, hit_data);
12581       }
12582       return bfd_reloc_ok;
12583 
12584     case R_ARM_LDC_PC_G0:
12585     case R_ARM_LDC_PC_G1:
12586     case R_ARM_LDC_PC_G2:
12587     case R_ARM_LDC_SB_G0:
12588     case R_ARM_LDC_SB_G1:
12589     case R_ARM_LDC_SB_G2:
12590       {
12591 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
12592 	bfd_vma pc = input_section->output_section->vma
12593 		     + input_section->output_offset + rel->r_offset;
12594 	/* sb is the origin of the *segment* containing the symbol.  */
12595 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
12596 	bfd_vma residual;
12597 	bfd_signed_vma signed_value;
12598 	int group = 0;
12599 
12600 	/* Determine which groups of bits to calculate.  */
12601 	switch (r_type)
12602 	  {
12603 	  case R_ARM_LDC_PC_G0:
12604 	  case R_ARM_LDC_SB_G0:
12605 	    group = 0;
12606 	    break;
12607 
12608 	  case R_ARM_LDC_PC_G1:
12609 	  case R_ARM_LDC_SB_G1:
12610 	    group = 1;
12611 	    break;
12612 
12613 	  case R_ARM_LDC_PC_G2:
12614 	  case R_ARM_LDC_SB_G2:
12615 	    group = 2;
12616 	    break;
12617 
12618 	  default:
12619 	    abort ();
12620 	  }
12621 
12622 	/* If REL, extract the addend from the insn.  If RELA, it will
12623 	   have already been fetched for us.  */
12624 	if (globals->use_rel)
12625 	  {
12626 	    int negative = (insn & (1 << 23)) ? 1 : -1;
12627 	    signed_addend = negative * ((insn & 0xff) << 2);
12628 	  }
12629 
12630 	/* Compute the value (X) to go in the place.  */
12631 	if (r_type == R_ARM_LDC_PC_G0
12632 	    || r_type == R_ARM_LDC_PC_G1
12633 	    || r_type == R_ARM_LDC_PC_G2)
12634 	  /* PC relative.  */
12635 	  signed_value = value - pc + signed_addend;
12636 	else
12637 	  /* Section base relative.  */
12638 	  signed_value = value - sb + signed_addend;
12639 
12640 	/* Calculate the value of the relevant G_{n-1} to obtain
12641 	   the residual at that stage.  */
12642 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
12643 				    group - 1, &residual);
12644 
12645 	/* Check for overflow.  (The absolute value to go in the place must be
12646 	   divisible by four and, after having been divided by four, must
12647 	   fit in eight bits.)  */
12648 	if ((residual & 0x3) != 0 || residual >= 0x400)
12649 	  {
12650 	    _bfd_error_handler
12651 	      /* xgettext:c-format */
12652 	      (_("%pB(%pA+%#" PRIx64 "): overflow whilst "
12653 		 "splitting %#" PRIx64 " for group relocation %s"),
12654 	       input_bfd, input_section, (uint64_t) rel->r_offset,
12655 	       (uint64_t) (signed_value < 0 ? -signed_value : signed_value),
12656 	       howto->name);
12657 	    return bfd_reloc_overflow;
12658 	  }
12659 
12660 	/* Mask out the value and U bit.  */
12661 	insn &= 0xff7fff00;
12662 
12663 	/* Set the U bit if the value to go in the place is non-negative.  */
12664 	if (signed_value >= 0)
12665 	  insn |= 1 << 23;
12666 
12667 	/* Encode the offset.  */
12668 	insn |= residual >> 2;
12669 
12670 	bfd_put_32 (input_bfd, insn, hit_data);
12671       }
12672       return bfd_reloc_ok;
12673 
12674     case R_ARM_THM_ALU_ABS_G0_NC:
12675     case R_ARM_THM_ALU_ABS_G1_NC:
12676     case R_ARM_THM_ALU_ABS_G2_NC:
12677     case R_ARM_THM_ALU_ABS_G3_NC:
12678 	{
12679 	    const int shift_array[4] = {0, 8, 16, 24};
12680 	    bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
12681 	    bfd_vma addr = value;
12682 	    int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
12683 
12684 	    /* Compute address.  */
12685 	    if (globals->use_rel)
12686 		signed_addend = insn & 0xff;
12687 	    addr += signed_addend;
12688 	    if (branch_type == ST_BRANCH_TO_THUMB)
12689 		addr |= 1;
12690 	    /* Clean imm8 insn.  */
12691 	    insn &= 0xff00;
12692 	    /* And update with correct part of address.  */
12693 	    insn |= (addr >> shift) & 0xff;
12694 	    /* Update insn.  */
12695 	    bfd_put_16 (input_bfd, insn, hit_data);
12696 	}
12697 
12698 	*unresolved_reloc_p = FALSE;
12699 	return bfd_reloc_ok;
12700 
12701     case R_ARM_GOTOFFFUNCDESC:
12702       {
12703 	if (h == NULL)
12704 	  {
12705 	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12706 	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12707 	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12708 	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12709 	    bfd_vma seg = -1;
12710 
12711 	    if (bfd_link_pic(info) && dynindx == 0)
12712 	      abort();
12713 
12714 	    /* Resolve relocation.  */
12715 	    bfd_put_32(output_bfd, (offset + sgot->output_offset)
12716 		       , contents + rel->r_offset);
12717 	    /* Emit R_ARM_FUNCDESC_VALUE or two fixups on funcdesc if
12718 	       not done yet.  */
12719 	    arm_elf_fill_funcdesc(output_bfd, info,
12720 				  &local_fdpic_cnts[r_symndx].funcdesc_offset,
12721 				  dynindx, offset, addr, dynreloc_value, seg);
12722 	  }
12723 	else
12724 	  {
12725 	    int dynindx;
12726 	    int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12727 	    bfd_vma addr;
12728 	    bfd_vma seg = -1;
12729 
12730 	    /* For static binaries, sym_sec can be null.  */
12731 	    if (sym_sec)
12732 	      {
12733 		dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12734 		addr = dynreloc_value - sym_sec->output_section->vma;
12735 	      }
12736 	    else
12737 	      {
12738 		dynindx = 0;
12739 		addr = 0;
12740 	      }
12741 
12742 	    if (bfd_link_pic(info) && dynindx == 0)
12743 	      abort();
12744 
12745 	    /* This case cannot occur since funcdesc is allocated by
12746 	       the dynamic loader so we cannot resolve the relocation.  */
12747 	    if (h->dynindx != -1)
12748 	      abort();
12749 
12750 	    /* Resolve relocation.  */
12751 	    bfd_put_32(output_bfd, (offset + sgot->output_offset),
12752 		       contents + rel->r_offset);
12753 	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12754 	    arm_elf_fill_funcdesc(output_bfd, info,
12755 				  &eh->fdpic_cnts.funcdesc_offset,
12756 				  dynindx, offset, addr, dynreloc_value, seg);
12757 	  }
12758       }
12759       *unresolved_reloc_p = FALSE;
12760       return bfd_reloc_ok;
12761 
12762     case R_ARM_GOTFUNCDESC:
12763       {
12764 	if (h != NULL)
12765 	  {
12766 	    Elf_Internal_Rela outrel;
12767 
12768 	    /* Resolve relocation.  */
12769 	    bfd_put_32(output_bfd, ((eh->fdpic_cnts.gotfuncdesc_offset & ~1)
12770 				    + sgot->output_offset),
12771 		       contents + rel->r_offset);
12772 	    /* Add funcdesc and associated R_ARM_FUNCDESC_VALUE.  */
12773 	    if(h->dynindx == -1)
12774 	      {
12775 		int dynindx;
12776 		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12777 		bfd_vma addr;
12778 		bfd_vma seg = -1;
12779 
12780 		/* For static binaries sym_sec can be null.  */
12781 		if (sym_sec)
12782 		  {
12783 		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12784 		    addr = dynreloc_value - sym_sec->output_section->vma;
12785 		  }
12786 		else
12787 		  {
12788 		    dynindx = 0;
12789 		    addr = 0;
12790 		  }
12791 
12792 		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12793 		arm_elf_fill_funcdesc(output_bfd, info,
12794 				      &eh->fdpic_cnts.funcdesc_offset,
12795 				      dynindx, offset, addr, dynreloc_value, seg);
12796 	      }
12797 
12798 	    /* Add a dynamic relocation on GOT entry if not already done.  */
12799 	    if ((eh->fdpic_cnts.gotfuncdesc_offset & 1) == 0)
12800 	      {
12801 		if (h->dynindx == -1)
12802 		  {
12803 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12804 		    if (h->root.type == bfd_link_hash_undefweak)
12805 		      bfd_put_32(output_bfd, 0, sgot->contents
12806 				 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12807 		    else
12808 		      bfd_put_32(output_bfd, sgot->output_section->vma
12809 				 + sgot->output_offset
12810 				 + (eh->fdpic_cnts.funcdesc_offset & ~1),
12811 				 sgot->contents
12812 				 + (eh->fdpic_cnts.gotfuncdesc_offset & ~1));
12813 		  }
12814 		else
12815 		  {
12816 		    outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12817 		  }
12818 		outrel.r_offset = sgot->output_section->vma
12819 		  + sgot->output_offset
12820 		  + (eh->fdpic_cnts.gotfuncdesc_offset & ~1);
12821 		outrel.r_addend = 0;
12822 		if (h->dynindx == -1 && !bfd_link_pic(info))
12823 		  if (h->root.type == bfd_link_hash_undefweak)
12824 		    arm_elf_add_rofixup(output_bfd, globals->srofixup, -1);
12825 		  else
12826 		    arm_elf_add_rofixup(output_bfd, globals->srofixup,
12827 					outrel.r_offset);
12828 		else
12829 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12830 		eh->fdpic_cnts.gotfuncdesc_offset |= 1;
12831 	      }
12832 	  }
12833 	else
12834 	  {
12835 	    /* Such relocation on static function should not have been
12836 	       emitted by the compiler.  */
12837 	    abort();
12838 	  }
12839       }
12840       *unresolved_reloc_p = FALSE;
12841       return bfd_reloc_ok;
12842 
12843     case R_ARM_FUNCDESC:
12844       {
12845 	if (h == NULL)
12846 	  {
12847 	    struct fdpic_local *local_fdpic_cnts = elf32_arm_local_fdpic_cnts(input_bfd);
12848 	    Elf_Internal_Rela outrel;
12849 	    int dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12850 	    int offset = local_fdpic_cnts[r_symndx].funcdesc_offset & ~1;
12851 	    bfd_vma addr = dynreloc_value - sym_sec->output_section->vma;
12852 	    bfd_vma seg = -1;
12853 
12854 	    if (bfd_link_pic(info) && dynindx == 0)
12855 	      abort();
12856 
12857 	    /* Replace static FUNCDESC relocation with a
12858 	       R_ARM_RELATIVE dynamic relocation or with a rofixup for
12859 	       executable.  */
12860 	    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12861 	    outrel.r_offset = input_section->output_section->vma
12862 	      + input_section->output_offset + rel->r_offset;
12863 	    outrel.r_addend = 0;
12864 	    if (bfd_link_pic(info))
12865 	      elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12866 	    else
12867 	      arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12868 
12869 	    bfd_put_32 (input_bfd, sgot->output_section->vma
12870 			+ sgot->output_offset + offset, hit_data);
12871 
12872 	    /* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12873 	    arm_elf_fill_funcdesc(output_bfd, info,
12874 				  &local_fdpic_cnts[r_symndx].funcdesc_offset,
12875 				  dynindx, offset, addr, dynreloc_value, seg);
12876 	  }
12877 	else
12878 	  {
12879 	    if (h->dynindx == -1)
12880 	      {
12881 		int dynindx;
12882 		int offset = eh->fdpic_cnts.funcdesc_offset & ~1;
12883 		bfd_vma addr;
12884 		bfd_vma seg = -1;
12885 		Elf_Internal_Rela outrel;
12886 
12887 		/* For static binaries sym_sec can be null.  */
12888 		if (sym_sec)
12889 		  {
12890 		    dynindx = elf_section_data (sym_sec->output_section)->dynindx;
12891 		    addr = dynreloc_value - sym_sec->output_section->vma;
12892 		  }
12893 		else
12894 		  {
12895 		    dynindx = 0;
12896 		    addr = 0;
12897 		  }
12898 
12899 		if (bfd_link_pic(info) && dynindx == 0)
12900 		  abort();
12901 
12902 		/* Replace static FUNCDESC relocation with a
12903 		   R_ARM_RELATIVE dynamic relocation.  */
12904 		outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
12905 		outrel.r_offset = input_section->output_section->vma
12906 		  + input_section->output_offset + rel->r_offset;
12907 		outrel.r_addend = 0;
12908 		if (bfd_link_pic(info))
12909 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12910 		else
12911 		  arm_elf_add_rofixup(output_bfd, globals->srofixup, outrel.r_offset);
12912 
12913 		bfd_put_32 (input_bfd, sgot->output_section->vma
12914 			    + sgot->output_offset + offset, hit_data);
12915 
12916 		/* Emit R_ARM_FUNCDESC_VALUE on funcdesc if not done yet.  */
12917 		arm_elf_fill_funcdesc(output_bfd, info,
12918 				      &eh->fdpic_cnts.funcdesc_offset,
12919 				      dynindx, offset, addr, dynreloc_value, seg);
12920 	      }
12921 	    else
12922 	      {
12923 		Elf_Internal_Rela outrel;
12924 
12925 		/* Add a dynamic relocation.  */
12926 		outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_FUNCDESC);
12927 		outrel.r_offset = input_section->output_section->vma
12928 		  + input_section->output_offset + rel->r_offset;
12929 		outrel.r_addend = 0;
12930 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
12931 	      }
12932 	  }
12933       }
12934       *unresolved_reloc_p = FALSE;
12935       return bfd_reloc_ok;
12936 
12937     case R_ARM_THM_BF16:
12938       {
12939 	bfd_vma relocation;
12940 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12941 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12942 
12943 	if (globals->use_rel)
12944 	  {
12945 	    bfd_vma immA  = (upper_insn & 0x001f);
12946 	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12947 	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12948 	    addend  = (immA << 12);
12949 	    addend |= (immB << 2);
12950 	    addend |= (immC << 1);
12951 	    addend |= 1;
12952 	    /* Sign extend.  */
12953 	    signed_addend = (addend & 0x10000) ? addend - (1 << 17) : addend;
12954 	  }
12955 
12956 	relocation  = value + signed_addend;
12957 	relocation -= (input_section->output_section->vma
12958 		       + input_section->output_offset
12959 		       + rel->r_offset);
12960 
12961 	/* Put RELOCATION back into the insn.  */
12962 	{
12963 	  bfd_vma immA = (relocation & 0x0001f000) >> 12;
12964 	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
12965 	  bfd_vma immC = (relocation & 0x00000002) >> 1;
12966 
12967 	  upper_insn = (upper_insn & 0xffe0) | immA;
12968 	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
12969 	}
12970 
12971 	/* Put the relocated value back in the object file:  */
12972 	bfd_put_16 (input_bfd, upper_insn, hit_data);
12973 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
12974 
12975 	return bfd_reloc_ok;
12976       }
12977 
12978     case R_ARM_THM_BF12:
12979       {
12980 	bfd_vma relocation;
12981 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
12982 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
12983 
12984 	if (globals->use_rel)
12985 	  {
12986 	    bfd_vma immA  = (upper_insn & 0x0001);
12987 	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
12988 	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
12989 	    addend  = (immA << 12);
12990 	    addend |= (immB << 2);
12991 	    addend |= (immC << 1);
12992 	    addend |= 1;
12993 	    /* Sign extend.  */
12994 	    addend = (addend & 0x1000) ? addend - (1 << 13) : addend;
12995 	    signed_addend = addend;
12996 	  }
12997 
12998 	relocation  = value + signed_addend;
12999 	relocation -= (input_section->output_section->vma
13000 		       + input_section->output_offset
13001 		       + rel->r_offset);
13002 
13003 	/* Put RELOCATION back into the insn.  */
13004 	{
13005 	  bfd_vma immA = (relocation & 0x00001000) >> 12;
13006 	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13007 	  bfd_vma immC = (relocation & 0x00000002) >> 1;
13008 
13009 	  upper_insn = (upper_insn & 0xfffe) | immA;
13010 	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13011 	}
13012 
13013 	/* Put the relocated value back in the object file:  */
13014 	bfd_put_16 (input_bfd, upper_insn, hit_data);
13015 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13016 
13017 	return bfd_reloc_ok;
13018       }
13019 
13020     case R_ARM_THM_BF18:
13021       {
13022 	bfd_vma relocation;
13023 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
13024 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
13025 
13026 	if (globals->use_rel)
13027 	  {
13028 	    bfd_vma immA  = (upper_insn & 0x007f);
13029 	    bfd_vma immB  = (lower_insn & 0x07fe) >> 1;
13030 	    bfd_vma immC  = (lower_insn & 0x0800) >> 11;
13031 	    addend  = (immA << 12);
13032 	    addend |= (immB << 2);
13033 	    addend |= (immC << 1);
13034 	    addend |= 1;
13035 	    /* Sign extend.  */
13036 	    addend = (addend & 0x40000) ? addend - (1 << 19) : addend;
13037 	    signed_addend = addend;
13038 	  }
13039 
13040 	relocation  = value + signed_addend;
13041 	relocation -= (input_section->output_section->vma
13042 		       + input_section->output_offset
13043 		       + rel->r_offset);
13044 
13045 	/* Put RELOCATION back into the insn.  */
13046 	{
13047 	  bfd_vma immA = (relocation & 0x0007f000) >> 12;
13048 	  bfd_vma immB = (relocation & 0x00000ffc) >> 2;
13049 	  bfd_vma immC = (relocation & 0x00000002) >> 1;
13050 
13051 	  upper_insn = (upper_insn & 0xff80) | immA;
13052 	  lower_insn = (lower_insn & 0xf001) | (immC << 11) | (immB << 1);
13053 	}
13054 
13055 	/* Put the relocated value back in the object file:  */
13056 	bfd_put_16 (input_bfd, upper_insn, hit_data);
13057 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
13058 
13059 	return bfd_reloc_ok;
13060       }
13061 
13062     default:
13063       return bfd_reloc_notsupported;
13064     }
13065 }
13066 
13067 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS.  */
13068 static void
13069 arm_add_to_rel (bfd *		   abfd,
13070 		bfd_byte *	   address,
13071 		reloc_howto_type * howto,
13072 		bfd_signed_vma	   increment)
13073 {
13074   bfd_signed_vma addend;
13075 
13076   if (howto->type == R_ARM_THM_CALL
13077       || howto->type == R_ARM_THM_JUMP24)
13078     {
13079       int upper_insn, lower_insn;
13080       int upper, lower;
13081 
13082       upper_insn = bfd_get_16 (abfd, address);
13083       lower_insn = bfd_get_16 (abfd, address + 2);
13084       upper = upper_insn & 0x7ff;
13085       lower = lower_insn & 0x7ff;
13086 
13087       addend = (upper << 12) | (lower << 1);
13088       addend += increment;
13089       addend >>= 1;
13090 
13091       upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
13092       lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
13093 
13094       bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
13095       bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
13096     }
13097   else
13098     {
13099       bfd_vma	     contents;
13100 
13101       contents = bfd_get_32 (abfd, address);
13102 
13103       /* Get the (signed) value from the instruction.  */
13104       addend = contents & howto->src_mask;
13105       if (addend & ((howto->src_mask + 1) >> 1))
13106 	{
13107 	  bfd_signed_vma mask;
13108 
13109 	  mask = -1;
13110 	  mask &= ~ howto->src_mask;
13111 	  addend |= mask;
13112 	}
13113 
13114       /* Add in the increment, (which is a byte value).  */
13115       switch (howto->type)
13116 	{
13117 	default:
13118 	  addend += increment;
13119 	  break;
13120 
13121 	case R_ARM_PC24:
13122 	case R_ARM_PLT32:
13123 	case R_ARM_CALL:
13124 	case R_ARM_JUMP24:
13125 	  addend <<= howto->size;
13126 	  addend += increment;
13127 
13128 	  /* Should we check for overflow here ?  */
13129 
13130 	  /* Drop any undesired bits.  */
13131 	  addend >>= howto->rightshift;
13132 	  break;
13133 	}
13134 
13135       contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
13136 
13137       bfd_put_32 (abfd, contents, address);
13138     }
13139 }
13140 
13141 #define IS_ARM_TLS_RELOC(R_TYPE)	\
13142   ((R_TYPE) == R_ARM_TLS_GD32		\
13143    || (R_TYPE) == R_ARM_TLS_GD32_FDPIC  \
13144    || (R_TYPE) == R_ARM_TLS_LDO32	\
13145    || (R_TYPE) == R_ARM_TLS_LDM32	\
13146    || (R_TYPE) == R_ARM_TLS_LDM32_FDPIC	\
13147    || (R_TYPE) == R_ARM_TLS_DTPOFF32	\
13148    || (R_TYPE) == R_ARM_TLS_DTPMOD32	\
13149    || (R_TYPE) == R_ARM_TLS_TPOFF32	\
13150    || (R_TYPE) == R_ARM_TLS_LE32	\
13151    || (R_TYPE) == R_ARM_TLS_IE32	\
13152    || (R_TYPE) == R_ARM_TLS_IE32_FDPIC	\
13153    || IS_ARM_TLS_GNU_RELOC (R_TYPE))
13154 
13155 /* Specific set of relocations for the gnu tls dialect.  */
13156 #define IS_ARM_TLS_GNU_RELOC(R_TYPE)	\
13157   ((R_TYPE) == R_ARM_TLS_GOTDESC	\
13158    || (R_TYPE) == R_ARM_TLS_CALL	\
13159    || (R_TYPE) == R_ARM_THM_TLS_CALL	\
13160    || (R_TYPE) == R_ARM_TLS_DESCSEQ	\
13161    || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
13162 
13163 /* Relocate an ARM ELF section.  */
13164 
13165 static bfd_boolean
13166 elf32_arm_relocate_section (bfd *		   output_bfd,
13167 			    struct bfd_link_info * info,
13168 			    bfd *		   input_bfd,
13169 			    asection *		   input_section,
13170 			    bfd_byte *		   contents,
13171 			    Elf_Internal_Rela *	   relocs,
13172 			    Elf_Internal_Sym *	   local_syms,
13173 			    asection **		   local_sections)
13174 {
13175   Elf_Internal_Shdr *symtab_hdr;
13176   struct elf_link_hash_entry **sym_hashes;
13177   Elf_Internal_Rela *rel;
13178   Elf_Internal_Rela *relend;
13179   const char *name;
13180   struct elf32_arm_link_hash_table * globals;
13181 
13182   globals = elf32_arm_hash_table (info);
13183   if (globals == NULL)
13184     return FALSE;
13185 
13186   symtab_hdr = & elf_symtab_hdr (input_bfd);
13187   sym_hashes = elf_sym_hashes (input_bfd);
13188 
13189   rel = relocs;
13190   relend = relocs + input_section->reloc_count;
13191   for (; rel < relend; rel++)
13192     {
13193       int			   r_type;
13194       reloc_howto_type *	   howto;
13195       unsigned long		   r_symndx;
13196       Elf_Internal_Sym *	   sym;
13197       asection *		   sec;
13198       struct elf_link_hash_entry * h;
13199       bfd_vma			   relocation;
13200       bfd_reloc_status_type	   r;
13201       arelent			   bfd_reloc;
13202       char			   sym_type;
13203       bfd_boolean		   unresolved_reloc = FALSE;
13204       char *error_message = NULL;
13205 
13206       r_symndx = ELF32_R_SYM (rel->r_info);
13207       r_type   = ELF32_R_TYPE (rel->r_info);
13208       r_type   = arm_real_reloc_type (globals, r_type);
13209 
13210       if (   r_type == R_ARM_GNU_VTENTRY
13211 	  || r_type == R_ARM_GNU_VTINHERIT)
13212 	continue;
13213 
13214       howto = bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
13215 
13216       if (howto == NULL)
13217 	return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
13218 
13219       h = NULL;
13220       sym = NULL;
13221       sec = NULL;
13222 
13223       if (r_symndx < symtab_hdr->sh_info)
13224 	{
13225 	  sym = local_syms + r_symndx;
13226 	  sym_type = ELF32_ST_TYPE (sym->st_info);
13227 	  sec = local_sections[r_symndx];
13228 
13229 	  /* An object file might have a reference to a local
13230 	     undefined symbol.  This is a daft object file, but we
13231 	     should at least do something about it.  V4BX & NONE
13232 	     relocations do not use the symbol and are explicitly
13233 	     allowed to use the undefined symbol, so allow those.
13234 	     Likewise for relocations against STN_UNDEF.  */
13235 	  if (r_type != R_ARM_V4BX
13236 	      && r_type != R_ARM_NONE
13237 	      && r_symndx != STN_UNDEF
13238 	      && bfd_is_und_section (sec)
13239 	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
13240 	    (*info->callbacks->undefined_symbol)
13241 	      (info, bfd_elf_string_from_elf_section
13242 	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
13243 	       input_bfd, input_section,
13244 	       rel->r_offset, TRUE);
13245 
13246 	  if (globals->use_rel)
13247 	    {
13248 	      relocation = (sec->output_section->vma
13249 			    + sec->output_offset
13250 			    + sym->st_value);
13251 	      if (!bfd_link_relocatable (info)
13252 		  && (sec->flags & SEC_MERGE)
13253 		  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13254 		{
13255 		  asection *msec;
13256 		  bfd_vma addend, value;
13257 
13258 		  switch (r_type)
13259 		    {
13260 		    case R_ARM_MOVW_ABS_NC:
13261 		    case R_ARM_MOVT_ABS:
13262 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13263 		      addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
13264 		      addend = (addend ^ 0x8000) - 0x8000;
13265 		      break;
13266 
13267 		    case R_ARM_THM_MOVW_ABS_NC:
13268 		    case R_ARM_THM_MOVT_ABS:
13269 		      value = bfd_get_16 (input_bfd, contents + rel->r_offset)
13270 			      << 16;
13271 		      value |= bfd_get_16 (input_bfd,
13272 					   contents + rel->r_offset + 2);
13273 		      addend = ((value & 0xf7000) >> 4) | (value & 0xff)
13274 			       | ((value & 0x04000000) >> 15);
13275 		      addend = (addend ^ 0x8000) - 0x8000;
13276 		      break;
13277 
13278 		    default:
13279 		      if (howto->rightshift
13280 			  || (howto->src_mask & (howto->src_mask + 1)))
13281 			{
13282 			  _bfd_error_handler
13283 			    /* xgettext:c-format */
13284 			    (_("%pB(%pA+%#" PRIx64 "): "
13285 			       "%s relocation against SEC_MERGE section"),
13286 			     input_bfd, input_section,
13287 			     (uint64_t) rel->r_offset, howto->name);
13288 			  return FALSE;
13289 			}
13290 
13291 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
13292 
13293 		      /* Get the (signed) value from the instruction.  */
13294 		      addend = value & howto->src_mask;
13295 		      if (addend & ((howto->src_mask + 1) >> 1))
13296 			{
13297 			  bfd_signed_vma mask;
13298 
13299 			  mask = -1;
13300 			  mask &= ~ howto->src_mask;
13301 			  addend |= mask;
13302 			}
13303 		      break;
13304 		    }
13305 
13306 		  msec = sec;
13307 		  addend =
13308 		    _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
13309 		    - relocation;
13310 		  addend += msec->output_section->vma + msec->output_offset;
13311 
13312 		  /* Cases here must match those in the preceding
13313 		     switch statement.  */
13314 		  switch (r_type)
13315 		    {
13316 		    case R_ARM_MOVW_ABS_NC:
13317 		    case R_ARM_MOVT_ABS:
13318 		      value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
13319 			      | (addend & 0xfff);
13320 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13321 		      break;
13322 
13323 		    case R_ARM_THM_MOVW_ABS_NC:
13324 		    case R_ARM_THM_MOVT_ABS:
13325 		      value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
13326 			      | (addend & 0xff) | ((addend & 0x0800) << 15);
13327 		      bfd_put_16 (input_bfd, value >> 16,
13328 				  contents + rel->r_offset);
13329 		      bfd_put_16 (input_bfd, value,
13330 				  contents + rel->r_offset + 2);
13331 		      break;
13332 
13333 		    default:
13334 		      value = (value & ~ howto->dst_mask)
13335 			      | (addend & howto->dst_mask);
13336 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
13337 		      break;
13338 		    }
13339 		}
13340 	    }
13341 	  else
13342 	    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
13343 	}
13344       else
13345 	{
13346 	  bfd_boolean warned, ignored;
13347 
13348 	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
13349 				   r_symndx, symtab_hdr, sym_hashes,
13350 				   h, sec, relocation,
13351 				   unresolved_reloc, warned, ignored);
13352 
13353 	  sym_type = h->type;
13354 	}
13355 
13356       if (sec != NULL && discarded_section (sec))
13357 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
13358 					 rel, 1, relend, howto, 0, contents);
13359 
13360       if (bfd_link_relocatable (info))
13361 	{
13362 	  /* This is a relocatable link.  We don't have to change
13363 	     anything, unless the reloc is against a section symbol,
13364 	     in which case we have to adjust according to where the
13365 	     section symbol winds up in the output section.  */
13366 	  if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
13367 	    {
13368 	      if (globals->use_rel)
13369 		arm_add_to_rel (input_bfd, contents + rel->r_offset,
13370 				howto, (bfd_signed_vma) sec->output_offset);
13371 	      else
13372 		rel->r_addend += sec->output_offset;
13373 	    }
13374 	  continue;
13375 	}
13376 
13377       if (h != NULL)
13378 	name = h->root.root.string;
13379       else
13380 	{
13381 	  name = (bfd_elf_string_from_elf_section
13382 		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
13383 	  if (name == NULL || *name == '\0')
13384 	    name = bfd_section_name (sec);
13385 	}
13386 
13387       if (r_symndx != STN_UNDEF
13388 	  && r_type != R_ARM_NONE
13389 	  && (h == NULL
13390 	      || h->root.type == bfd_link_hash_defined
13391 	      || h->root.type == bfd_link_hash_defweak)
13392 	  && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
13393 	{
13394 	  _bfd_error_handler
13395 	    ((sym_type == STT_TLS
13396 	      /* xgettext:c-format */
13397 	      ? _("%pB(%pA+%#" PRIx64 "): %s used with TLS symbol %s")
13398 	      /* xgettext:c-format */
13399 	      : _("%pB(%pA+%#" PRIx64 "): %s used with non-TLS symbol %s")),
13400 	     input_bfd,
13401 	     input_section,
13402 	     (uint64_t) rel->r_offset,
13403 	     howto->name,
13404 	     name);
13405 	}
13406 
13407       /* We call elf32_arm_final_link_relocate unless we're completely
13408 	 done, i.e., the relaxation produced the final output we want,
13409 	 and we won't let anybody mess with it. Also, we have to do
13410 	 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
13411 	 both in relaxed and non-relaxed cases.  */
13412       if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
13413 	  || (IS_ARM_TLS_GNU_RELOC (r_type)
13414 	      && !((h ? elf32_arm_hash_entry (h)->tls_type :
13415 		    elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
13416 		   & GOT_TLS_GDESC)))
13417 	{
13418 	  r = elf32_arm_tls_relax (globals, input_bfd, input_section,
13419 				   contents, rel, h == NULL);
13420 	  /* This may have been marked unresolved because it came from
13421 	     a shared library.  But we've just dealt with that.  */
13422 	  unresolved_reloc = 0;
13423 	}
13424       else
13425 	r = bfd_reloc_continue;
13426 
13427       if (r == bfd_reloc_continue)
13428 	{
13429 	  unsigned char branch_type =
13430 	    h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
13431 	      : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
13432 
13433 	  r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
13434 					     input_section, contents, rel,
13435 					     relocation, info, sec, name,
13436 					     sym_type, branch_type, h,
13437 					     &unresolved_reloc,
13438 					     &error_message);
13439 	}
13440 
13441       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
13442 	 because such sections are not SEC_ALLOC and thus ld.so will
13443 	 not process them.  */
13444       if (unresolved_reloc
13445 	  && !((input_section->flags & SEC_DEBUGGING) != 0
13446 	       && h->def_dynamic)
13447 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
13448 				      rel->r_offset) != (bfd_vma) -1)
13449 	{
13450 	  _bfd_error_handler
13451 	    /* xgettext:c-format */
13452 	    (_("%pB(%pA+%#" PRIx64 "): "
13453 	       "unresolvable %s relocation against symbol `%s'"),
13454 	     input_bfd,
13455 	     input_section,
13456 	     (uint64_t) rel->r_offset,
13457 	     howto->name,
13458 	     h->root.root.string);
13459 	  return FALSE;
13460 	}
13461 
13462       if (r != bfd_reloc_ok)
13463 	{
13464 	  switch (r)
13465 	    {
13466 	    case bfd_reloc_overflow:
13467 	      /* If the overflowing reloc was to an undefined symbol,
13468 		 we have already printed one error message and there
13469 		 is no point complaining again.  */
13470 	      if (!h || h->root.type != bfd_link_hash_undefined)
13471 		(*info->callbacks->reloc_overflow)
13472 		  (info, (h ? &h->root : NULL), name, howto->name,
13473 		   (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
13474 	      break;
13475 
13476 	    case bfd_reloc_undefined:
13477 	      (*info->callbacks->undefined_symbol)
13478 		(info, name, input_bfd, input_section, rel->r_offset, TRUE);
13479 	      break;
13480 
13481 	    case bfd_reloc_outofrange:
13482 	      error_message = _("out of range");
13483 	      goto common_error;
13484 
13485 	    case bfd_reloc_notsupported:
13486 	      error_message = _("unsupported relocation");
13487 	      goto common_error;
13488 
13489 	    case bfd_reloc_dangerous:
13490 	      /* error_message should already be set.  */
13491 	      goto common_error;
13492 
13493 	    default:
13494 	      error_message = _("unknown error");
13495 	      /* Fall through.  */
13496 
13497 	    common_error:
13498 	      BFD_ASSERT (error_message != NULL);
13499 	      (*info->callbacks->reloc_dangerous)
13500 		(info, error_message, input_bfd, input_section, rel->r_offset);
13501 	      break;
13502 	    }
13503 	}
13504     }
13505 
13506   return TRUE;
13507 }
13508 
13509 /* Add a new unwind edit to the list described by HEAD, TAIL.  If TINDEX is zero,
13510    adds the edit to the start of the list.  (The list must be built in order of
13511    ascending TINDEX: the function's callers are primarily responsible for
13512    maintaining that condition).  */
13513 
13514 static void
13515 add_unwind_table_edit (arm_unwind_table_edit **head,
13516 		       arm_unwind_table_edit **tail,
13517 		       arm_unwind_edit_type type,
13518 		       asection *linked_section,
13519 		       unsigned int tindex)
13520 {
13521   arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
13522       xmalloc (sizeof (arm_unwind_table_edit));
13523 
13524   new_edit->type = type;
13525   new_edit->linked_section = linked_section;
13526   new_edit->index = tindex;
13527 
13528   if (tindex > 0)
13529     {
13530       new_edit->next = NULL;
13531 
13532       if (*tail)
13533 	(*tail)->next = new_edit;
13534 
13535       (*tail) = new_edit;
13536 
13537       if (!*head)
13538 	(*head) = new_edit;
13539     }
13540   else
13541     {
13542       new_edit->next = *head;
13543 
13544       if (!*tail)
13545 	*tail = new_edit;
13546 
13547       *head = new_edit;
13548     }
13549 }
13550 
13551 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
13552 
13553 /* Increase the size of EXIDX_SEC by ADJUST bytes.  ADJUST mau be negative.  */
13554 static void
13555 adjust_exidx_size(asection *exidx_sec, int adjust)
13556 {
13557   asection *out_sec;
13558 
13559   if (!exidx_sec->rawsize)
13560     exidx_sec->rawsize = exidx_sec->size;
13561 
13562   bfd_set_section_size (exidx_sec, exidx_sec->size + adjust);
13563   out_sec = exidx_sec->output_section;
13564   /* Adjust size of output section.  */
13565   bfd_set_section_size (out_sec, out_sec->size +adjust);
13566 }
13567 
13568 /* Insert an EXIDX_CANTUNWIND marker at the end of a section.  */
13569 static void
13570 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
13571 {
13572   struct _arm_elf_section_data *exidx_arm_data;
13573 
13574   exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13575   add_unwind_table_edit (
13576     &exidx_arm_data->u.exidx.unwind_edit_list,
13577     &exidx_arm_data->u.exidx.unwind_edit_tail,
13578     INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
13579 
13580   exidx_arm_data->additional_reloc_count++;
13581 
13582   adjust_exidx_size(exidx_sec, 8);
13583 }
13584 
13585 /* Scan .ARM.exidx tables, and create a list describing edits which should be
13586    made to those tables, such that:
13587 
13588      1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
13589      2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
13590 	codes which have been inlined into the index).
13591 
13592    If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
13593 
13594    The edits are applied when the tables are written
13595    (in elf32_arm_write_section).  */
13596 
13597 bfd_boolean
13598 elf32_arm_fix_exidx_coverage (asection **text_section_order,
13599 			      unsigned int num_text_sections,
13600 			      struct bfd_link_info *info,
13601 			      bfd_boolean merge_exidx_entries)
13602 {
13603   bfd *inp;
13604   unsigned int last_second_word = 0, i;
13605   asection *last_exidx_sec = NULL;
13606   asection *last_text_sec = NULL;
13607   int last_unwind_type = -1;
13608 
13609   /* Walk over all EXIDX sections, and create backlinks from the corrsponding
13610      text sections.  */
13611   for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
13612     {
13613       asection *sec;
13614 
13615       for (sec = inp->sections; sec != NULL; sec = sec->next)
13616 	{
13617 	  struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
13618 	  Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
13619 
13620 	  if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
13621 	    continue;
13622 
13623 	  if (elf_sec->linked_to)
13624 	    {
13625 	      Elf_Internal_Shdr *linked_hdr
13626 		= &elf_section_data (elf_sec->linked_to)->this_hdr;
13627 	      struct _arm_elf_section_data *linked_sec_arm_data
13628 		= get_arm_elf_section_data (linked_hdr->bfd_section);
13629 
13630 	      if (linked_sec_arm_data == NULL)
13631 		continue;
13632 
13633 	      /* Link this .ARM.exidx section back from the text section it
13634 		 describes.  */
13635 	      linked_sec_arm_data->u.text.arm_exidx_sec = sec;
13636 	    }
13637 	}
13638     }
13639 
13640   /* Walk all text sections in order of increasing VMA.  Eilminate duplicate
13641      index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
13642      and add EXIDX_CANTUNWIND entries for sections with no unwind table data.  */
13643 
13644   for (i = 0; i < num_text_sections; i++)
13645     {
13646       asection *sec = text_section_order[i];
13647       asection *exidx_sec;
13648       struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
13649       struct _arm_elf_section_data *exidx_arm_data;
13650       bfd_byte *contents = NULL;
13651       int deleted_exidx_bytes = 0;
13652       bfd_vma j;
13653       arm_unwind_table_edit *unwind_edit_head = NULL;
13654       arm_unwind_table_edit *unwind_edit_tail = NULL;
13655       Elf_Internal_Shdr *hdr;
13656       bfd *ibfd;
13657 
13658       if (arm_data == NULL)
13659 	continue;
13660 
13661       exidx_sec = arm_data->u.text.arm_exidx_sec;
13662       if (exidx_sec == NULL)
13663 	{
13664 	  /* Section has no unwind data.  */
13665 	  if (last_unwind_type == 0 || !last_exidx_sec)
13666 	    continue;
13667 
13668 	  /* Ignore zero sized sections.  */
13669 	  if (sec->size == 0)
13670 	    continue;
13671 
13672 	  insert_cantunwind_after(last_text_sec, last_exidx_sec);
13673 	  last_unwind_type = 0;
13674 	  continue;
13675 	}
13676 
13677       /* Skip /DISCARD/ sections.  */
13678       if (bfd_is_abs_section (exidx_sec->output_section))
13679 	continue;
13680 
13681       hdr = &elf_section_data (exidx_sec)->this_hdr;
13682       if (hdr->sh_type != SHT_ARM_EXIDX)
13683 	continue;
13684 
13685       exidx_arm_data = get_arm_elf_section_data (exidx_sec);
13686       if (exidx_arm_data == NULL)
13687 	continue;
13688 
13689       ibfd = exidx_sec->owner;
13690 
13691       if (hdr->contents != NULL)
13692 	contents = hdr->contents;
13693       else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
13694 	/* An error?  */
13695 	continue;
13696 
13697       if (last_unwind_type > 0)
13698 	{
13699 	  unsigned int first_word = bfd_get_32 (ibfd, contents);
13700 	  /* Add cantunwind if first unwind item does not match section
13701 	     start.  */
13702 	  if (first_word != sec->vma)
13703 	    {
13704 	      insert_cantunwind_after (last_text_sec, last_exidx_sec);
13705 	      last_unwind_type = 0;
13706 	    }
13707 	}
13708 
13709       for (j = 0; j < hdr->sh_size; j += 8)
13710 	{
13711 	  unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
13712 	  int unwind_type;
13713 	  int elide = 0;
13714 
13715 	  /* An EXIDX_CANTUNWIND entry.  */
13716 	  if (second_word == 1)
13717 	    {
13718 	      if (last_unwind_type == 0)
13719 		elide = 1;
13720 	      unwind_type = 0;
13721 	    }
13722 	  /* Inlined unwinding data.  Merge if equal to previous.  */
13723 	  else if ((second_word & 0x80000000) != 0)
13724 	    {
13725 	      if (merge_exidx_entries
13726 		   && last_second_word == second_word && last_unwind_type == 1)
13727 		elide = 1;
13728 	      unwind_type = 1;
13729 	      last_second_word = second_word;
13730 	    }
13731 	  /* Normal table entry.  In theory we could merge these too,
13732 	     but duplicate entries are likely to be much less common.  */
13733 	  else
13734 	    unwind_type = 2;
13735 
13736 	  if (elide && !bfd_link_relocatable (info))
13737 	    {
13738 	      add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
13739 				     DELETE_EXIDX_ENTRY, NULL, j / 8);
13740 
13741 	      deleted_exidx_bytes += 8;
13742 	    }
13743 
13744 	  last_unwind_type = unwind_type;
13745 	}
13746 
13747       /* Free contents if we allocated it ourselves.  */
13748       if (contents != hdr->contents)
13749 	free (contents);
13750 
13751       /* Record edits to be applied later (in elf32_arm_write_section).  */
13752       exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
13753       exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
13754 
13755       if (deleted_exidx_bytes > 0)
13756 	adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
13757 
13758       last_exidx_sec = exidx_sec;
13759       last_text_sec = sec;
13760     }
13761 
13762   /* Add terminating CANTUNWIND entry.  */
13763   if (!bfd_link_relocatable (info) && last_exidx_sec
13764       && last_unwind_type != 0)
13765     insert_cantunwind_after(last_text_sec, last_exidx_sec);
13766 
13767   return TRUE;
13768 }
13769 
13770 static bfd_boolean
13771 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
13772 			       bfd *ibfd, const char *name)
13773 {
13774   asection *sec, *osec;
13775 
13776   sec = bfd_get_linker_section (ibfd, name);
13777   if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
13778     return TRUE;
13779 
13780   osec = sec->output_section;
13781   if (elf32_arm_write_section (obfd, info, sec, sec->contents))
13782     return TRUE;
13783 
13784   if (! bfd_set_section_contents (obfd, osec, sec->contents,
13785 				  sec->output_offset, sec->size))
13786     return FALSE;
13787 
13788   return TRUE;
13789 }
13790 
13791 static bfd_boolean
13792 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
13793 {
13794   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
13795   asection *sec, *osec;
13796 
13797   if (globals == NULL)
13798     return FALSE;
13799 
13800   /* Invoke the regular ELF backend linker to do all the work.  */
13801   if (!bfd_elf_final_link (abfd, info))
13802     return FALSE;
13803 
13804   /* Process stub sections (eg BE8 encoding, ...).  */
13805   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
13806   unsigned int i;
13807   for (i=0; i<htab->top_id; i++)
13808     {
13809       sec = htab->stub_group[i].stub_sec;
13810       /* Only process it once, in its link_sec slot.  */
13811       if (sec && i == htab->stub_group[i].link_sec->id)
13812 	{
13813 	  osec = sec->output_section;
13814 	  elf32_arm_write_section (abfd, info, sec, sec->contents);
13815 	  if (! bfd_set_section_contents (abfd, osec, sec->contents,
13816 					  sec->output_offset, sec->size))
13817 	    return FALSE;
13818 	}
13819     }
13820 
13821   /* Write out any glue sections now that we have created all the
13822      stubs.  */
13823   if (globals->bfd_of_glue_owner != NULL)
13824     {
13825       if (! elf32_arm_output_glue_section (info, abfd,
13826 					   globals->bfd_of_glue_owner,
13827 					   ARM2THUMB_GLUE_SECTION_NAME))
13828 	return FALSE;
13829 
13830       if (! elf32_arm_output_glue_section (info, abfd,
13831 					   globals->bfd_of_glue_owner,
13832 					   THUMB2ARM_GLUE_SECTION_NAME))
13833 	return FALSE;
13834 
13835       if (! elf32_arm_output_glue_section (info, abfd,
13836 					   globals->bfd_of_glue_owner,
13837 					   VFP11_ERRATUM_VENEER_SECTION_NAME))
13838 	return FALSE;
13839 
13840       if (! elf32_arm_output_glue_section (info, abfd,
13841 					   globals->bfd_of_glue_owner,
13842 					   STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
13843 	return FALSE;
13844 
13845       if (! elf32_arm_output_glue_section (info, abfd,
13846 					   globals->bfd_of_glue_owner,
13847 					   ARM_BX_GLUE_SECTION_NAME))
13848 	return FALSE;
13849     }
13850 
13851   return TRUE;
13852 }
13853 
13854 /* Return a best guess for the machine number based on the attributes.  */
13855 
13856 static unsigned int
13857 bfd_arm_get_mach_from_attributes (bfd * abfd)
13858 {
13859   int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
13860 
13861   switch (arch)
13862     {
13863     case TAG_CPU_ARCH_PRE_V4: return bfd_mach_arm_3M;
13864     case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
13865     case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
13866     case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
13867 
13868     case TAG_CPU_ARCH_V5TE:
13869       {
13870 	char * name;
13871 
13872 	BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
13873 	name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
13874 
13875 	if (name)
13876 	  {
13877 	    if (strcmp (name, "IWMMXT2") == 0)
13878 	      return bfd_mach_arm_iWMMXt2;
13879 
13880 	    if (strcmp (name, "IWMMXT") == 0)
13881 	      return bfd_mach_arm_iWMMXt;
13882 
13883 	    if (strcmp (name, "XSCALE") == 0)
13884 	      {
13885 		int wmmx;
13886 
13887 		BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
13888 		wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
13889 		switch (wmmx)
13890 		  {
13891 		  case 1: return bfd_mach_arm_iWMMXt;
13892 		  case 2: return bfd_mach_arm_iWMMXt2;
13893 		  default: return bfd_mach_arm_XScale;
13894 		  }
13895 	      }
13896 	  }
13897 
13898 	return bfd_mach_arm_5TE;
13899       }
13900 
13901     case TAG_CPU_ARCH_V5TEJ:
13902 	return bfd_mach_arm_5TEJ;
13903     case TAG_CPU_ARCH_V6:
13904 	return bfd_mach_arm_6;
13905     case TAG_CPU_ARCH_V6KZ:
13906 	return bfd_mach_arm_6KZ;
13907     case TAG_CPU_ARCH_V6T2:
13908 	return bfd_mach_arm_6T2;
13909     case TAG_CPU_ARCH_V6K:
13910 	return bfd_mach_arm_6K;
13911     case TAG_CPU_ARCH_V7:
13912 	return bfd_mach_arm_7;
13913     case TAG_CPU_ARCH_V6_M:
13914 	return bfd_mach_arm_6M;
13915     case TAG_CPU_ARCH_V6S_M:
13916 	return bfd_mach_arm_6SM;
13917     case TAG_CPU_ARCH_V7E_M:
13918 	return bfd_mach_arm_7EM;
13919     case TAG_CPU_ARCH_V8:
13920 	return bfd_mach_arm_8;
13921     case TAG_CPU_ARCH_V8R:
13922 	return bfd_mach_arm_8R;
13923     case TAG_CPU_ARCH_V8M_BASE:
13924 	return bfd_mach_arm_8M_BASE;
13925     case TAG_CPU_ARCH_V8M_MAIN:
13926 	return bfd_mach_arm_8M_MAIN;
13927     case TAG_CPU_ARCH_V8_1M_MAIN:
13928 	return bfd_mach_arm_8_1M_MAIN;
13929 
13930     default:
13931       /* Force entry to be added for any new known Tag_CPU_arch value.  */
13932       BFD_ASSERT (arch > MAX_TAG_CPU_ARCH);
13933 
13934       /* Unknown Tag_CPU_arch value.  */
13935       return bfd_mach_arm_unknown;
13936     }
13937 }
13938 
13939 /* Set the right machine number.  */
13940 
13941 static bfd_boolean
13942 elf32_arm_object_p (bfd *abfd)
13943 {
13944   unsigned int mach;
13945 
13946   mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
13947 
13948   if (mach == bfd_mach_arm_unknown)
13949     {
13950       if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
13951 	mach = bfd_mach_arm_ep9312;
13952       else
13953 	mach = bfd_arm_get_mach_from_attributes (abfd);
13954     }
13955 
13956   bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
13957   return TRUE;
13958 }
13959 
13960 /* Function to keep ARM specific flags in the ELF header.  */
13961 
13962 static bfd_boolean
13963 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
13964 {
13965   if (elf_flags_init (abfd)
13966       && elf_elfheader (abfd)->e_flags != flags)
13967     {
13968       if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
13969 	{
13970 	  if (flags & EF_ARM_INTERWORK)
13971 	    _bfd_error_handler
13972 	      (_("warning: not setting interworking flag of %pB since it has already been specified as non-interworking"),
13973 	       abfd);
13974 	  else
13975 	    _bfd_error_handler
13976 	      (_("warning: clearing the interworking flag of %pB due to outside request"),
13977 	       abfd);
13978 	}
13979     }
13980   else
13981     {
13982       elf_elfheader (abfd)->e_flags = flags;
13983       elf_flags_init (abfd) = TRUE;
13984     }
13985 
13986   return TRUE;
13987 }
13988 
13989 /* Copy backend specific data from one object module to another.  */
13990 
13991 static bfd_boolean
13992 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
13993 {
13994   flagword in_flags;
13995   flagword out_flags;
13996 
13997   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
13998     return TRUE;
13999 
14000   in_flags  = elf_elfheader (ibfd)->e_flags;
14001   out_flags = elf_elfheader (obfd)->e_flags;
14002 
14003   if (elf_flags_init (obfd)
14004       && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
14005       && in_flags != out_flags)
14006     {
14007       /* Cannot mix APCS26 and APCS32 code.  */
14008       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
14009 	return FALSE;
14010 
14011       /* Cannot mix float APCS and non-float APCS code.  */
14012       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
14013 	return FALSE;
14014 
14015       /* If the src and dest have different interworking flags
14016 	 then turn off the interworking bit.  */
14017       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
14018 	{
14019 	  if (out_flags & EF_ARM_INTERWORK)
14020 	    _bfd_error_handler
14021 	      (_("warning: clearing the interworking flag of %pB because non-interworking code in %pB has been linked with it"),
14022 	       obfd, ibfd);
14023 
14024 	  in_flags &= ~EF_ARM_INTERWORK;
14025 	}
14026 
14027       /* Likewise for PIC, though don't warn for this case.  */
14028       if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
14029 	in_flags &= ~EF_ARM_PIC;
14030     }
14031 
14032   elf_elfheader (obfd)->e_flags = in_flags;
14033   elf_flags_init (obfd) = TRUE;
14034 
14035   return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
14036 }
14037 
14038 /* Values for Tag_ABI_PCS_R9_use.  */
14039 enum
14040 {
14041   AEABI_R9_V6,
14042   AEABI_R9_SB,
14043   AEABI_R9_TLS,
14044   AEABI_R9_unused
14045 };
14046 
14047 /* Values for Tag_ABI_PCS_RW_data.  */
14048 enum
14049 {
14050   AEABI_PCS_RW_data_absolute,
14051   AEABI_PCS_RW_data_PCrel,
14052   AEABI_PCS_RW_data_SBrel,
14053   AEABI_PCS_RW_data_unused
14054 };
14055 
14056 /* Values for Tag_ABI_enum_size.  */
14057 enum
14058 {
14059   AEABI_enum_unused,
14060   AEABI_enum_short,
14061   AEABI_enum_wide,
14062   AEABI_enum_forced_wide
14063 };
14064 
14065 /* Determine whether an object attribute tag takes an integer, a
14066    string or both.  */
14067 
14068 static int
14069 elf32_arm_obj_attrs_arg_type (int tag)
14070 {
14071   if (tag == Tag_compatibility)
14072     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
14073   else if (tag == Tag_nodefaults)
14074     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
14075   else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
14076     return ATTR_TYPE_FLAG_STR_VAL;
14077   else if (tag < 32)
14078     return ATTR_TYPE_FLAG_INT_VAL;
14079   else
14080     return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
14081 }
14082 
14083 /* The ABI defines that Tag_conformance should be emitted first, and that
14084    Tag_nodefaults should be second (if either is defined).  This sets those
14085    two positions, and bumps up the position of all the remaining tags to
14086    compensate.  */
14087 static int
14088 elf32_arm_obj_attrs_order (int num)
14089 {
14090   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
14091     return Tag_conformance;
14092   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
14093     return Tag_nodefaults;
14094   if ((num - 2) < Tag_nodefaults)
14095     return num - 2;
14096   if ((num - 1) < Tag_conformance)
14097     return num - 1;
14098   return num;
14099 }
14100 
14101 /* Attribute numbers >=64 (mod 128) can be safely ignored.  */
14102 static bfd_boolean
14103 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
14104 {
14105   if ((tag & 127) < 64)
14106     {
14107       _bfd_error_handler
14108 	(_("%pB: unknown mandatory EABI object attribute %d"),
14109 	 abfd, tag);
14110       bfd_set_error (bfd_error_bad_value);
14111       return FALSE;
14112     }
14113   else
14114     {
14115       _bfd_error_handler
14116 	(_("warning: %pB: unknown EABI object attribute %d"),
14117 	 abfd, tag);
14118       return TRUE;
14119     }
14120 }
14121 
14122 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
14123    Returns -1 if no architecture could be read.  */
14124 
14125 static int
14126 get_secondary_compatible_arch (bfd *abfd)
14127 {
14128   obj_attribute *attr =
14129     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14130 
14131   /* Note: the tag and its argument below are uleb128 values, though
14132      currently-defined values fit in one byte for each.  */
14133   if (attr->s
14134       && attr->s[0] == Tag_CPU_arch
14135       && (attr->s[1] & 128) != 128
14136       && attr->s[2] == 0)
14137    return attr->s[1];
14138 
14139   /* This tag is "safely ignorable", so don't complain if it looks funny.  */
14140   return -1;
14141 }
14142 
14143 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
14144    The tag is removed if ARCH is -1.  */
14145 
14146 static void
14147 set_secondary_compatible_arch (bfd *abfd, int arch)
14148 {
14149   obj_attribute *attr =
14150     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
14151 
14152   if (arch == -1)
14153     {
14154       attr->s = NULL;
14155       return;
14156     }
14157 
14158   /* Note: the tag and its argument below are uleb128 values, though
14159      currently-defined values fit in one byte for each.  */
14160   if (!attr->s)
14161     attr->s = (char *) bfd_alloc (abfd, 3);
14162   attr->s[0] = Tag_CPU_arch;
14163   attr->s[1] = arch;
14164   attr->s[2] = '\0';
14165 }
14166 
14167 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
14168    into account.  */
14169 
14170 static int
14171 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
14172 		      int newtag, int secondary_compat)
14173 {
14174 #define T(X) TAG_CPU_ARCH_##X
14175   int tagl, tagh, result;
14176   const int v6t2[] =
14177     {
14178       T(V6T2),   /* PRE_V4.  */
14179       T(V6T2),   /* V4.  */
14180       T(V6T2),   /* V4T.  */
14181       T(V6T2),   /* V5T.  */
14182       T(V6T2),   /* V5TE.  */
14183       T(V6T2),   /* V5TEJ.  */
14184       T(V6T2),   /* V6.  */
14185       T(V7),     /* V6KZ.  */
14186       T(V6T2)    /* V6T2.  */
14187     };
14188   const int v6k[] =
14189     {
14190       T(V6K),    /* PRE_V4.  */
14191       T(V6K),    /* V4.  */
14192       T(V6K),    /* V4T.  */
14193       T(V6K),    /* V5T.  */
14194       T(V6K),    /* V5TE.  */
14195       T(V6K),    /* V5TEJ.  */
14196       T(V6K),    /* V6.  */
14197       T(V6KZ),   /* V6KZ.  */
14198       T(V7),     /* V6T2.  */
14199       T(V6K)     /* V6K.  */
14200     };
14201   const int v7[] =
14202     {
14203       T(V7),     /* PRE_V4.  */
14204       T(V7),     /* V4.  */
14205       T(V7),     /* V4T.  */
14206       T(V7),     /* V5T.  */
14207       T(V7),     /* V5TE.  */
14208       T(V7),     /* V5TEJ.  */
14209       T(V7),     /* V6.  */
14210       T(V7),     /* V6KZ.  */
14211       T(V7),     /* V6T2.  */
14212       T(V7),     /* V6K.  */
14213       T(V7)      /* V7.  */
14214     };
14215   const int v6_m[] =
14216     {
14217       -1,	 /* PRE_V4.  */
14218       -1,	 /* V4.  */
14219       T(V6K),    /* V4T.  */
14220       T(V6K),    /* V5T.  */
14221       T(V6K),    /* V5TE.  */
14222       T(V6K),    /* V5TEJ.  */
14223       T(V6K),    /* V6.  */
14224       T(V6KZ),   /* V6KZ.  */
14225       T(V7),     /* V6T2.  */
14226       T(V6K),    /* V6K.  */
14227       T(V7),     /* V7.  */
14228       T(V6_M)    /* V6_M.  */
14229     };
14230   const int v6s_m[] =
14231     {
14232       -1,	 /* PRE_V4.  */
14233       -1,	 /* V4.  */
14234       T(V6K),    /* V4T.  */
14235       T(V6K),    /* V5T.  */
14236       T(V6K),    /* V5TE.  */
14237       T(V6K),    /* V5TEJ.  */
14238       T(V6K),    /* V6.  */
14239       T(V6KZ),   /* V6KZ.  */
14240       T(V7),     /* V6T2.  */
14241       T(V6K),    /* V6K.  */
14242       T(V7),     /* V7.  */
14243       T(V6S_M),  /* V6_M.  */
14244       T(V6S_M)   /* V6S_M.  */
14245     };
14246   const int v7e_m[] =
14247     {
14248       -1,	 /* PRE_V4.  */
14249       -1,	 /* V4.  */
14250       T(V7E_M),  /* V4T.  */
14251       T(V7E_M),  /* V5T.  */
14252       T(V7E_M),  /* V5TE.  */
14253       T(V7E_M),  /* V5TEJ.  */
14254       T(V7E_M),  /* V6.  */
14255       T(V7E_M),  /* V6KZ.  */
14256       T(V7E_M),  /* V6T2.  */
14257       T(V7E_M),  /* V6K.  */
14258       T(V7E_M),  /* V7.  */
14259       T(V7E_M),  /* V6_M.  */
14260       T(V7E_M),  /* V6S_M.  */
14261       T(V7E_M)   /* V7E_M.  */
14262     };
14263   const int v8[] =
14264     {
14265       T(V8),		/* PRE_V4.  */
14266       T(V8),		/* V4.  */
14267       T(V8),		/* V4T.  */
14268       T(V8),		/* V5T.  */
14269       T(V8),		/* V5TE.  */
14270       T(V8),		/* V5TEJ.  */
14271       T(V8),		/* V6.  */
14272       T(V8),		/* V6KZ.  */
14273       T(V8),		/* V6T2.  */
14274       T(V8),		/* V6K.  */
14275       T(V8),		/* V7.  */
14276       T(V8),		/* V6_M.  */
14277       T(V8),		/* V6S_M.  */
14278       T(V8),		/* V7E_M.  */
14279       T(V8)		/* V8.  */
14280     };
14281   const int v8r[] =
14282     {
14283       T(V8R),		/* PRE_V4.  */
14284       T(V8R),		/* V4.  */
14285       T(V8R),		/* V4T.  */
14286       T(V8R),		/* V5T.  */
14287       T(V8R),		/* V5TE.  */
14288       T(V8R),		/* V5TEJ.  */
14289       T(V8R),		/* V6.  */
14290       T(V8R),		/* V6KZ.  */
14291       T(V8R),		/* V6T2.  */
14292       T(V8R),		/* V6K.  */
14293       T(V8R),		/* V7.  */
14294       T(V8R),		/* V6_M.  */
14295       T(V8R),		/* V6S_M.  */
14296       T(V8R),		/* V7E_M.  */
14297       T(V8),		/* V8.  */
14298       T(V8R),		/* V8R.  */
14299     };
14300   const int v8m_baseline[] =
14301     {
14302       -1,		/* PRE_V4.  */
14303       -1,		/* V4.  */
14304       -1,		/* V4T.  */
14305       -1,		/* V5T.  */
14306       -1,		/* V5TE.  */
14307       -1,		/* V5TEJ.  */
14308       -1,		/* V6.  */
14309       -1,		/* V6KZ.  */
14310       -1,		/* V6T2.  */
14311       -1,		/* V6K.  */
14312       -1,		/* V7.  */
14313       T(V8M_BASE),	/* V6_M.  */
14314       T(V8M_BASE),	/* V6S_M.  */
14315       -1,		/* V7E_M.  */
14316       -1,		/* V8.  */
14317       -1,		/* V8R.  */
14318       T(V8M_BASE)	/* V8-M BASELINE.  */
14319     };
14320   const int v8m_mainline[] =
14321     {
14322       -1,		/* PRE_V4.  */
14323       -1,		/* V4.  */
14324       -1,		/* V4T.  */
14325       -1,		/* V5T.  */
14326       -1,		/* V5TE.  */
14327       -1,		/* V5TEJ.  */
14328       -1,		/* V6.  */
14329       -1,		/* V6KZ.  */
14330       -1,		/* V6T2.  */
14331       -1,		/* V6K.  */
14332       T(V8M_MAIN),	/* V7.  */
14333       T(V8M_MAIN),	/* V6_M.  */
14334       T(V8M_MAIN),	/* V6S_M.  */
14335       T(V8M_MAIN),	/* V7E_M.  */
14336       -1,		/* V8.  */
14337       -1,		/* V8R.  */
14338       T(V8M_MAIN),	/* V8-M BASELINE.  */
14339       T(V8M_MAIN)	/* V8-M MAINLINE.  */
14340     };
14341   const int v8_1m_mainline[] =
14342     {
14343       -1,		/* PRE_V4.  */
14344       -1,		/* V4.  */
14345       -1,		/* V4T.  */
14346       -1,		/* V5T.  */
14347       -1,		/* V5TE.  */
14348       -1,		/* V5TEJ.  */
14349       -1,		/* V6.  */
14350       -1,		/* V6KZ.  */
14351       -1,		/* V6T2.  */
14352       -1,		/* V6K.  */
14353       T(V8_1M_MAIN),	/* V7.  */
14354       T(V8_1M_MAIN),	/* V6_M.  */
14355       T(V8_1M_MAIN),	/* V6S_M.  */
14356       T(V8_1M_MAIN),	/* V7E_M.  */
14357       -1,		/* V8.  */
14358       -1,		/* V8R.  */
14359       T(V8_1M_MAIN),	/* V8-M BASELINE.  */
14360       T(V8_1M_MAIN),	/* V8-M MAINLINE.  */
14361       -1,		/* Unused (18).  */
14362       -1,		/* Unused (19).  */
14363       -1,		/* Unused (20).  */
14364       T(V8_1M_MAIN)	/* V8.1-M MAINLINE.  */
14365     };
14366   const int v4t_plus_v6_m[] =
14367     {
14368       -1,		/* PRE_V4.  */
14369       -1,		/* V4.  */
14370       T(V4T),		/* V4T.  */
14371       T(V5T),		/* V5T.  */
14372       T(V5TE),		/* V5TE.  */
14373       T(V5TEJ),		/* V5TEJ.  */
14374       T(V6),		/* V6.  */
14375       T(V6KZ),		/* V6KZ.  */
14376       T(V6T2),		/* V6T2.  */
14377       T(V6K),		/* V6K.  */
14378       T(V7),		/* V7.  */
14379       T(V6_M),		/* V6_M.  */
14380       T(V6S_M),		/* V6S_M.  */
14381       T(V7E_M),		/* V7E_M.  */
14382       T(V8),		/* V8.  */
14383       -1,		/* V8R.  */
14384       T(V8M_BASE),	/* V8-M BASELINE.  */
14385       T(V8M_MAIN),	/* V8-M MAINLINE.  */
14386       -1,		/* Unused (18).  */
14387       -1,		/* Unused (19).  */
14388       -1,		/* Unused (20).  */
14389       T(V8_1M_MAIN),	/* V8.1-M MAINLINE.  */
14390       T(V4T_PLUS_V6_M)	/* V4T plus V6_M.  */
14391     };
14392   const int *comb[] =
14393     {
14394       v6t2,
14395       v6k,
14396       v7,
14397       v6_m,
14398       v6s_m,
14399       v7e_m,
14400       v8,
14401       v8r,
14402       v8m_baseline,
14403       v8m_mainline,
14404       NULL,
14405       NULL,
14406       NULL,
14407       v8_1m_mainline,
14408       /* Pseudo-architecture.  */
14409       v4t_plus_v6_m
14410     };
14411 
14412   /* Check we've not got a higher architecture than we know about.  */
14413 
14414   if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
14415     {
14416       _bfd_error_handler (_("error: %pB: unknown CPU architecture"), ibfd);
14417       return -1;
14418     }
14419 
14420   /* Override old tag if we have a Tag_also_compatible_with on the output.  */
14421 
14422   if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
14423       || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
14424     oldtag = T(V4T_PLUS_V6_M);
14425 
14426   /* And override the new tag if we have a Tag_also_compatible_with on the
14427      input.  */
14428 
14429   if ((newtag == T(V6_M) && secondary_compat == T(V4T))
14430       || (newtag == T(V4T) && secondary_compat == T(V6_M)))
14431     newtag = T(V4T_PLUS_V6_M);
14432 
14433   tagl = (oldtag < newtag) ? oldtag : newtag;
14434   result = tagh = (oldtag > newtag) ? oldtag : newtag;
14435 
14436   /* Architectures before V6KZ add features monotonically.  */
14437   if (tagh <= TAG_CPU_ARCH_V6KZ)
14438     return result;
14439 
14440   result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
14441 
14442   /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
14443      as the canonical version.  */
14444   if (result == T(V4T_PLUS_V6_M))
14445     {
14446       result = T(V4T);
14447       *secondary_compat_out = T(V6_M);
14448     }
14449   else
14450     *secondary_compat_out = -1;
14451 
14452   if (result == -1)
14453     {
14454       _bfd_error_handler (_("error: %pB: conflicting CPU architectures %d/%d"),
14455 			  ibfd, oldtag, newtag);
14456       return -1;
14457     }
14458 
14459   return result;
14460 #undef T
14461 }
14462 
14463 /* Query attributes object to see if integer divide instructions may be
14464    present in an object.  */
14465 static bfd_boolean
14466 elf32_arm_attributes_accept_div (const obj_attribute *attr)
14467 {
14468   int arch = attr[Tag_CPU_arch].i;
14469   int profile = attr[Tag_CPU_arch_profile].i;
14470 
14471   switch (attr[Tag_DIV_use].i)
14472     {
14473     case 0:
14474       /* Integer divide allowed if instruction contained in archetecture.  */
14475       if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
14476 	return TRUE;
14477       else if (arch >= TAG_CPU_ARCH_V7E_M)
14478 	return TRUE;
14479       else
14480 	return FALSE;
14481 
14482     case 1:
14483       /* Integer divide explicitly prohibited.  */
14484       return FALSE;
14485 
14486     default:
14487       /* Unrecognised case - treat as allowing divide everywhere.  */
14488     case 2:
14489       /* Integer divide allowed in ARM state.  */
14490       return TRUE;
14491     }
14492 }
14493 
14494 /* Query attributes object to see if integer divide instructions are
14495    forbidden to be in the object.  This is not the inverse of
14496    elf32_arm_attributes_accept_div.  */
14497 static bfd_boolean
14498 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
14499 {
14500   return attr[Tag_DIV_use].i == 1;
14501 }
14502 
14503 /* Merge EABI object attributes from IBFD into OBFD.  Raise an error if there
14504    are conflicting attributes.  */
14505 
14506 static bfd_boolean
14507 elf32_arm_merge_eabi_attributes (bfd *ibfd, struct bfd_link_info *info)
14508 {
14509   bfd *obfd = info->output_bfd;
14510   obj_attribute *in_attr;
14511   obj_attribute *out_attr;
14512   /* Some tags have 0 = don't care, 1 = strong requirement,
14513      2 = weak requirement.  */
14514   static const int order_021[3] = {0, 2, 1};
14515   int i;
14516   bfd_boolean result = TRUE;
14517   const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
14518 
14519   /* Skip the linker stubs file.  This preserves previous behavior
14520      of accepting unknown attributes in the first input file - but
14521      is that a bug?  */
14522   if (ibfd->flags & BFD_LINKER_CREATED)
14523     return TRUE;
14524 
14525   /* Skip any input that hasn't attribute section.
14526      This enables to link object files without attribute section with
14527      any others.  */
14528   if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
14529     return TRUE;
14530 
14531   if (!elf_known_obj_attributes_proc (obfd)[0].i)
14532     {
14533       /* This is the first object.  Copy the attributes.  */
14534       _bfd_elf_copy_obj_attributes (ibfd, obfd);
14535 
14536       out_attr = elf_known_obj_attributes_proc (obfd);
14537 
14538       /* Use the Tag_null value to indicate the attributes have been
14539 	 initialized.  */
14540       out_attr[0].i = 1;
14541 
14542       /* We do not output objects with Tag_MPextension_use_legacy - we move
14543 	 the attribute's value to Tag_MPextension_use.  */
14544       if (out_attr[Tag_MPextension_use_legacy].i != 0)
14545 	{
14546 	  if (out_attr[Tag_MPextension_use].i != 0
14547 	      && out_attr[Tag_MPextension_use_legacy].i
14548 		!= out_attr[Tag_MPextension_use].i)
14549 	    {
14550 	      _bfd_error_handler
14551 		(_("Error: %pB has both the current and legacy "
14552 		   "Tag_MPextension_use attributes"), ibfd);
14553 	      result = FALSE;
14554 	    }
14555 
14556 	  out_attr[Tag_MPextension_use] =
14557 	    out_attr[Tag_MPextension_use_legacy];
14558 	  out_attr[Tag_MPextension_use_legacy].type = 0;
14559 	  out_attr[Tag_MPextension_use_legacy].i = 0;
14560 	}
14561 
14562       return result;
14563     }
14564 
14565   in_attr = elf_known_obj_attributes_proc (ibfd);
14566   out_attr = elf_known_obj_attributes_proc (obfd);
14567   /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
14568   if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
14569     {
14570       /* Ignore mismatches if the object doesn't use floating point or is
14571 	 floating point ABI independent.  */
14572       if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
14573 	  || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14574 	      && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
14575 	out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
14576       else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
14577 	       && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
14578 	{
14579 	  _bfd_error_handler
14580 	    (_("error: %pB uses VFP register arguments, %pB does not"),
14581 	     in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
14582 	     in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
14583 	  result = FALSE;
14584 	}
14585     }
14586 
14587   for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
14588     {
14589       /* Merge this attribute with existing attributes.  */
14590       switch (i)
14591 	{
14592 	case Tag_CPU_raw_name:
14593 	case Tag_CPU_name:
14594 	  /* These are merged after Tag_CPU_arch.  */
14595 	  break;
14596 
14597 	case Tag_ABI_optimization_goals:
14598 	case Tag_ABI_FP_optimization_goals:
14599 	  /* Use the first value seen.  */
14600 	  break;
14601 
14602 	case Tag_CPU_arch:
14603 	  {
14604 	    int secondary_compat = -1, secondary_compat_out = -1;
14605 	    unsigned int saved_out_attr = out_attr[i].i;
14606 	    int arch_attr;
14607 	    static const char *name_table[] =
14608 	      {
14609 		/* These aren't real CPU names, but we can't guess
14610 		   that from the architecture version alone.  */
14611 		"Pre v4",
14612 		"ARM v4",
14613 		"ARM v4T",
14614 		"ARM v5T",
14615 		"ARM v5TE",
14616 		"ARM v5TEJ",
14617 		"ARM v6",
14618 		"ARM v6KZ",
14619 		"ARM v6T2",
14620 		"ARM v6K",
14621 		"ARM v7",
14622 		"ARM v6-M",
14623 		"ARM v6S-M",
14624 		"ARM v8",
14625 		"",
14626 		"ARM v8-M.baseline",
14627 		"ARM v8-M.mainline",
14628 	    };
14629 
14630 	    /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
14631 	    secondary_compat = get_secondary_compatible_arch (ibfd);
14632 	    secondary_compat_out = get_secondary_compatible_arch (obfd);
14633 	    arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
14634 					      &secondary_compat_out,
14635 					      in_attr[i].i,
14636 					      secondary_compat);
14637 
14638 	    /* Return with error if failed to merge.  */
14639 	    if (arch_attr == -1)
14640 	      return FALSE;
14641 
14642 	    out_attr[i].i = arch_attr;
14643 
14644 	    set_secondary_compatible_arch (obfd, secondary_compat_out);
14645 
14646 	    /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
14647 	    if (out_attr[i].i == saved_out_attr)
14648 	      ; /* Leave the names alone.  */
14649 	    else if (out_attr[i].i == in_attr[i].i)
14650 	      {
14651 		/* The output architecture has been changed to match the
14652 		   input architecture.  Use the input names.  */
14653 		out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
14654 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
14655 		  : NULL;
14656 		out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
14657 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
14658 		  : NULL;
14659 	      }
14660 	    else
14661 	      {
14662 		out_attr[Tag_CPU_name].s = NULL;
14663 		out_attr[Tag_CPU_raw_name].s = NULL;
14664 	      }
14665 
14666 	    /* If we still don't have a value for Tag_CPU_name,
14667 	       make one up now.  Tag_CPU_raw_name remains blank.  */
14668 	    if (out_attr[Tag_CPU_name].s == NULL
14669 		&& out_attr[i].i < ARRAY_SIZE (name_table))
14670 	      out_attr[Tag_CPU_name].s =
14671 		_bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
14672 	  }
14673 	  break;
14674 
14675 	case Tag_ARM_ISA_use:
14676 	case Tag_THUMB_ISA_use:
14677 	case Tag_WMMX_arch:
14678 	case Tag_Advanced_SIMD_arch:
14679 	  /* ??? Do Advanced_SIMD (NEON) and WMMX conflict?  */
14680 	case Tag_ABI_FP_rounding:
14681 	case Tag_ABI_FP_exceptions:
14682 	case Tag_ABI_FP_user_exceptions:
14683 	case Tag_ABI_FP_number_model:
14684 	case Tag_FP_HP_extension:
14685 	case Tag_CPU_unaligned_access:
14686 	case Tag_T2EE_use:
14687 	case Tag_MPextension_use:
14688 	case Tag_MVE_arch:
14689 	  /* Use the largest value specified.  */
14690 	  if (in_attr[i].i > out_attr[i].i)
14691 	    out_attr[i].i = in_attr[i].i;
14692 	  break;
14693 
14694 	case Tag_ABI_align_preserved:
14695 	case Tag_ABI_PCS_RO_data:
14696 	  /* Use the smallest value specified.  */
14697 	  if (in_attr[i].i < out_attr[i].i)
14698 	    out_attr[i].i = in_attr[i].i;
14699 	  break;
14700 
14701 	case Tag_ABI_align_needed:
14702 	  if ((in_attr[i].i > 0 || out_attr[i].i > 0)
14703 	      && (in_attr[Tag_ABI_align_preserved].i == 0
14704 		  || out_attr[Tag_ABI_align_preserved].i == 0))
14705 	    {
14706 	      /* This error message should be enabled once all non-conformant
14707 		 binaries in the toolchain have had the attributes set
14708 		 properly.
14709 	      _bfd_error_handler
14710 		(_("error: %pB: 8-byte data alignment conflicts with %pB"),
14711 		 obfd, ibfd);
14712 	      result = FALSE; */
14713 	    }
14714 	  /* Fall through.  */
14715 	case Tag_ABI_FP_denormal:
14716 	case Tag_ABI_PCS_GOT_use:
14717 	  /* Use the "greatest" from the sequence 0, 2, 1, or the largest
14718 	     value if greater than 2 (for future-proofing).  */
14719 	  if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
14720 	      || (in_attr[i].i <= 2 && out_attr[i].i <= 2
14721 		  && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
14722 	    out_attr[i].i = in_attr[i].i;
14723 	  break;
14724 
14725 	case Tag_Virtualization_use:
14726 	  /* The virtualization tag effectively stores two bits of
14727 	     information: the intended use of TrustZone (in bit 0), and the
14728 	     intended use of Virtualization (in bit 1).  */
14729 	  if (out_attr[i].i == 0)
14730 	    out_attr[i].i = in_attr[i].i;
14731 	  else if (in_attr[i].i != 0
14732 		   && in_attr[i].i != out_attr[i].i)
14733 	    {
14734 	      if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
14735 		out_attr[i].i = 3;
14736 	      else
14737 		{
14738 		  _bfd_error_handler
14739 		    (_("error: %pB: unable to merge virtualization attributes "
14740 		       "with %pB"),
14741 		     obfd, ibfd);
14742 		  result = FALSE;
14743 		}
14744 	    }
14745 	  break;
14746 
14747 	case Tag_CPU_arch_profile:
14748 	  if (out_attr[i].i != in_attr[i].i)
14749 	    {
14750 	      /* 0 will merge with anything.
14751 		 'A' and 'S' merge to 'A'.
14752 		 'R' and 'S' merge to 'R'.
14753 		 'M' and 'A|R|S' is an error.  */
14754 	      if (out_attr[i].i == 0
14755 		  || (out_attr[i].i == 'S'
14756 		      && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
14757 		out_attr[i].i = in_attr[i].i;
14758 	      else if (in_attr[i].i == 0
14759 		       || (in_attr[i].i == 'S'
14760 			   && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
14761 		; /* Do nothing.  */
14762 	      else
14763 		{
14764 		  _bfd_error_handler
14765 		    (_("error: %pB: conflicting architecture profiles %c/%c"),
14766 		     ibfd,
14767 		     in_attr[i].i ? in_attr[i].i : '0',
14768 		     out_attr[i].i ? out_attr[i].i : '0');
14769 		  result = FALSE;
14770 		}
14771 	    }
14772 	  break;
14773 
14774 	case Tag_DSP_extension:
14775 	  /* No need to change output value if any of:
14776 	     - pre (<=) ARMv5T input architecture (do not have DSP)
14777 	     - M input profile not ARMv7E-M and do not have DSP.  */
14778 	  if (in_attr[Tag_CPU_arch].i <= 3
14779 	      || (in_attr[Tag_CPU_arch_profile].i == 'M'
14780 		  && in_attr[Tag_CPU_arch].i != 13
14781 		  && in_attr[i].i == 0))
14782 	    ; /* Do nothing.  */
14783 	  /* Output value should be 0 if DSP part of architecture, ie.
14784 	     - post (>=) ARMv5te architecture output
14785 	     - A, R or S profile output or ARMv7E-M output architecture.  */
14786 	  else if (out_attr[Tag_CPU_arch].i >= 4
14787 		   && (out_attr[Tag_CPU_arch_profile].i == 'A'
14788 		       || out_attr[Tag_CPU_arch_profile].i == 'R'
14789 		       || out_attr[Tag_CPU_arch_profile].i == 'S'
14790 		       || out_attr[Tag_CPU_arch].i == 13))
14791 	    out_attr[i].i = 0;
14792 	  /* Otherwise, DSP instructions are added and not part of output
14793 	     architecture.  */
14794 	  else
14795 	    out_attr[i].i = 1;
14796 	  break;
14797 
14798 	case Tag_FP_arch:
14799 	    {
14800 	      /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
14801 		 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
14802 		 when it's 0.  It might mean absence of FP hardware if
14803 		 Tag_FP_arch is zero.  */
14804 
14805 #define VFP_VERSION_COUNT 9
14806 	      static const struct
14807 	      {
14808 		  int ver;
14809 		  int regs;
14810 	      } vfp_versions[VFP_VERSION_COUNT] =
14811 		{
14812 		  {0, 0},
14813 		  {1, 16},
14814 		  {2, 16},
14815 		  {3, 32},
14816 		  {3, 16},
14817 		  {4, 32},
14818 		  {4, 16},
14819 		  {8, 32},
14820 		  {8, 16}
14821 		};
14822 	      int ver;
14823 	      int regs;
14824 	      int newval;
14825 
14826 	      /* If the output has no requirement about FP hardware,
14827 		 follow the requirement of the input.  */
14828 	      if (out_attr[i].i == 0)
14829 		{
14830 		  /* This assert is still reasonable, we shouldn't
14831 		     produce the suspicious build attribute
14832 		     combination (See below for in_attr).  */
14833 		  BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
14834 		  out_attr[i].i = in_attr[i].i;
14835 		  out_attr[Tag_ABI_HardFP_use].i
14836 		    = in_attr[Tag_ABI_HardFP_use].i;
14837 		  break;
14838 		}
14839 	      /* If the input has no requirement about FP hardware, do
14840 		 nothing.  */
14841 	      else if (in_attr[i].i == 0)
14842 		{
14843 		  /* We used to assert that Tag_ABI_HardFP_use was
14844 		     zero here, but we should never assert when
14845 		     consuming an object file that has suspicious
14846 		     build attributes.  The single precision variant
14847 		     of 'no FP architecture' is still 'no FP
14848 		     architecture', so we just ignore the tag in this
14849 		     case.  */
14850 		  break;
14851 		}
14852 
14853 	      /* Both the input and the output have nonzero Tag_FP_arch.
14854 		 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
14855 
14856 	      /* If both the input and the output have zero Tag_ABI_HardFP_use,
14857 		 do nothing.  */
14858 	      if (in_attr[Tag_ABI_HardFP_use].i == 0
14859 		  && out_attr[Tag_ABI_HardFP_use].i == 0)
14860 		;
14861 	      /* If the input and the output have different Tag_ABI_HardFP_use,
14862 		 the combination of them is 0 (implied by Tag_FP_arch).  */
14863 	      else if (in_attr[Tag_ABI_HardFP_use].i
14864 		       != out_attr[Tag_ABI_HardFP_use].i)
14865 		out_attr[Tag_ABI_HardFP_use].i = 0;
14866 
14867 	      /* Now we can handle Tag_FP_arch.  */
14868 
14869 	      /* Values of VFP_VERSION_COUNT or more aren't defined, so just
14870 		 pick the biggest.  */
14871 	      if (in_attr[i].i >= VFP_VERSION_COUNT
14872 		  && in_attr[i].i > out_attr[i].i)
14873 		{
14874 		  out_attr[i] = in_attr[i];
14875 		  break;
14876 		}
14877 	      /* The output uses the superset of input features
14878 		 (ISA version) and registers.  */
14879 	      ver = vfp_versions[in_attr[i].i].ver;
14880 	      if (ver < vfp_versions[out_attr[i].i].ver)
14881 		ver = vfp_versions[out_attr[i].i].ver;
14882 	      regs = vfp_versions[in_attr[i].i].regs;
14883 	      if (regs < vfp_versions[out_attr[i].i].regs)
14884 		regs = vfp_versions[out_attr[i].i].regs;
14885 	      /* This assumes all possible supersets are also a valid
14886 		 options.  */
14887 	      for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
14888 		{
14889 		  if (regs == vfp_versions[newval].regs
14890 		      && ver == vfp_versions[newval].ver)
14891 		    break;
14892 		}
14893 	      out_attr[i].i = newval;
14894 	    }
14895 	  break;
14896 	case Tag_PCS_config:
14897 	  if (out_attr[i].i == 0)
14898 	    out_attr[i].i = in_attr[i].i;
14899 	  else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
14900 	    {
14901 	      /* It's sometimes ok to mix different configs, so this is only
14902 		 a warning.  */
14903 	      _bfd_error_handler
14904 		(_("warning: %pB: conflicting platform configuration"), ibfd);
14905 	    }
14906 	  break;
14907 	case Tag_ABI_PCS_R9_use:
14908 	  if (in_attr[i].i != out_attr[i].i
14909 	      && out_attr[i].i != AEABI_R9_unused
14910 	      && in_attr[i].i != AEABI_R9_unused)
14911 	    {
14912 	      _bfd_error_handler
14913 		(_("error: %pB: conflicting use of R9"), ibfd);
14914 	      result = FALSE;
14915 	    }
14916 	  if (out_attr[i].i == AEABI_R9_unused)
14917 	    out_attr[i].i = in_attr[i].i;
14918 	  break;
14919 	case Tag_ABI_PCS_RW_data:
14920 	  if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
14921 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
14922 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
14923 	    {
14924 	      _bfd_error_handler
14925 		(_("error: %pB: SB relative addressing conflicts with use of R9"),
14926 		 ibfd);
14927 	      result = FALSE;
14928 	    }
14929 	  /* Use the smallest value specified.  */
14930 	  if (in_attr[i].i < out_attr[i].i)
14931 	    out_attr[i].i = in_attr[i].i;
14932 	  break;
14933 	case Tag_ABI_PCS_wchar_t:
14934 	  if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
14935 	      && !elf_arm_tdata (obfd)->no_wchar_size_warning)
14936 	    {
14937 	      _bfd_error_handler
14938 		(_("warning: %pB uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
14939 		 ibfd, in_attr[i].i, out_attr[i].i);
14940 	    }
14941 	  else if (in_attr[i].i && !out_attr[i].i)
14942 	    out_attr[i].i = in_attr[i].i;
14943 	  break;
14944 	case Tag_ABI_enum_size:
14945 	  if (in_attr[i].i != AEABI_enum_unused)
14946 	    {
14947 	      if (out_attr[i].i == AEABI_enum_unused
14948 		  || out_attr[i].i == AEABI_enum_forced_wide)
14949 		{
14950 		  /* The existing object is compatible with anything.
14951 		     Use whatever requirements the new object has.  */
14952 		  out_attr[i].i = in_attr[i].i;
14953 		}
14954 	      else if (in_attr[i].i != AEABI_enum_forced_wide
14955 		       && out_attr[i].i != in_attr[i].i
14956 		       && !elf_arm_tdata (obfd)->no_enum_size_warning)
14957 		{
14958 		  static const char *aeabi_enum_names[] =
14959 		    { "", "variable-size", "32-bit", "" };
14960 		  const char *in_name =
14961 		    in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14962 		    ? aeabi_enum_names[in_attr[i].i]
14963 		    : "<unknown>";
14964 		  const char *out_name =
14965 		    out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
14966 		    ? aeabi_enum_names[out_attr[i].i]
14967 		    : "<unknown>";
14968 		  _bfd_error_handler
14969 		    (_("warning: %pB uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
14970 		     ibfd, in_name, out_name);
14971 		}
14972 	    }
14973 	  break;
14974 	case Tag_ABI_VFP_args:
14975 	  /* Aready done.  */
14976 	  break;
14977 	case Tag_ABI_WMMX_args:
14978 	  if (in_attr[i].i != out_attr[i].i)
14979 	    {
14980 	      _bfd_error_handler
14981 		(_("error: %pB uses iWMMXt register arguments, %pB does not"),
14982 		 ibfd, obfd);
14983 	      result = FALSE;
14984 	    }
14985 	  break;
14986 	case Tag_compatibility:
14987 	  /* Merged in target-independent code.  */
14988 	  break;
14989 	case Tag_ABI_HardFP_use:
14990 	  /* This is handled along with Tag_FP_arch.  */
14991 	  break;
14992 	case Tag_ABI_FP_16bit_format:
14993 	  if (in_attr[i].i != 0 && out_attr[i].i != 0)
14994 	    {
14995 	      if (in_attr[i].i != out_attr[i].i)
14996 		{
14997 		  _bfd_error_handler
14998 		    (_("error: fp16 format mismatch between %pB and %pB"),
14999 		     ibfd, obfd);
15000 		  result = FALSE;
15001 		}
15002 	    }
15003 	  if (in_attr[i].i != 0)
15004 	    out_attr[i].i = in_attr[i].i;
15005 	  break;
15006 
15007 	case Tag_DIV_use:
15008 	  /* A value of zero on input means that the divide instruction may
15009 	     be used if available in the base architecture as specified via
15010 	     Tag_CPU_arch and Tag_CPU_arch_profile.  A value of 1 means that
15011 	     the user did not want divide instructions.  A value of 2
15012 	     explicitly means that divide instructions were allowed in ARM
15013 	     and Thumb state.  */
15014 	  if (in_attr[i].i == out_attr[i].i)
15015 	    /* Do nothing.  */ ;
15016 	  else if (elf32_arm_attributes_forbid_div (in_attr)
15017 		   && !elf32_arm_attributes_accept_div (out_attr))
15018 	    out_attr[i].i = 1;
15019 	  else if (elf32_arm_attributes_forbid_div (out_attr)
15020 		   && elf32_arm_attributes_accept_div (in_attr))
15021 	    out_attr[i].i = in_attr[i].i;
15022 	  else if (in_attr[i].i == 2)
15023 	    out_attr[i].i = in_attr[i].i;
15024 	  break;
15025 
15026 	case Tag_MPextension_use_legacy:
15027 	  /* We don't output objects with Tag_MPextension_use_legacy - we
15028 	     move the value to Tag_MPextension_use.  */
15029 	  if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
15030 	    {
15031 	      if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
15032 		{
15033 		  _bfd_error_handler
15034 		    (_("%pB has both the current and legacy "
15035 		       "Tag_MPextension_use attributes"),
15036 		     ibfd);
15037 		  result = FALSE;
15038 		}
15039 	    }
15040 
15041 	  if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
15042 	    out_attr[Tag_MPextension_use] = in_attr[i];
15043 
15044 	  break;
15045 
15046 	case Tag_nodefaults:
15047 	  /* This tag is set if it exists, but the value is unused (and is
15048 	     typically zero).  We don't actually need to do anything here -
15049 	     the merge happens automatically when the type flags are merged
15050 	     below.  */
15051 	  break;
15052 	case Tag_also_compatible_with:
15053 	  /* Already done in Tag_CPU_arch.  */
15054 	  break;
15055 	case Tag_conformance:
15056 	  /* Keep the attribute if it matches.  Throw it away otherwise.
15057 	     No attribute means no claim to conform.  */
15058 	  if (!in_attr[i].s || !out_attr[i].s
15059 	      || strcmp (in_attr[i].s, out_attr[i].s) != 0)
15060 	    out_attr[i].s = NULL;
15061 	  break;
15062 
15063 	default:
15064 	  result
15065 	    = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
15066 	}
15067 
15068       /* If out_attr was copied from in_attr then it won't have a type yet.  */
15069       if (in_attr[i].type && !out_attr[i].type)
15070 	out_attr[i].type = in_attr[i].type;
15071     }
15072 
15073   /* Merge Tag_compatibility attributes and any common GNU ones.  */
15074   if (!_bfd_elf_merge_object_attributes (ibfd, info))
15075     return FALSE;
15076 
15077   /* Check for any attributes not known on ARM.  */
15078   result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
15079 
15080   return result;
15081 }
15082 
15083 
15084 /* Return TRUE if the two EABI versions are incompatible.  */
15085 
15086 static bfd_boolean
15087 elf32_arm_versions_compatible (unsigned iver, unsigned over)
15088 {
15089   /* v4 and v5 are the same spec before and after it was released,
15090      so allow mixing them.  */
15091   if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
15092       || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
15093     return TRUE;
15094 
15095   return (iver == over);
15096 }
15097 
15098 /* Merge backend specific data from an object file to the output
15099    object file when linking.  */
15100 
15101 static bfd_boolean
15102 elf32_arm_merge_private_bfd_data (bfd *, struct bfd_link_info *);
15103 
15104 /* Display the flags field.  */
15105 
15106 static bfd_boolean
15107 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
15108 {
15109   FILE * file = (FILE *) ptr;
15110   unsigned long flags;
15111 
15112   BFD_ASSERT (abfd != NULL && ptr != NULL);
15113 
15114   /* Print normal ELF private data.  */
15115   _bfd_elf_print_private_bfd_data (abfd, ptr);
15116 
15117   flags = elf_elfheader (abfd)->e_flags;
15118   /* Ignore init flag - it may not be set, despite the flags field
15119      containing valid data.  */
15120 
15121   fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
15122 
15123   switch (EF_ARM_EABI_VERSION (flags))
15124     {
15125     case EF_ARM_EABI_UNKNOWN:
15126       /* The following flag bits are GNU extensions and not part of the
15127 	 official ARM ELF extended ABI.  Hence they are only decoded if
15128 	 the EABI version is not set.  */
15129       if (flags & EF_ARM_INTERWORK)
15130 	fprintf (file, _(" [interworking enabled]"));
15131 
15132       if (flags & EF_ARM_APCS_26)
15133 	fprintf (file, " [APCS-26]");
15134       else
15135 	fprintf (file, " [APCS-32]");
15136 
15137       if (flags & EF_ARM_VFP_FLOAT)
15138 	fprintf (file, _(" [VFP float format]"));
15139       else if (flags & EF_ARM_MAVERICK_FLOAT)
15140 	fprintf (file, _(" [Maverick float format]"));
15141       else
15142 	fprintf (file, _(" [FPA float format]"));
15143 
15144       if (flags & EF_ARM_APCS_FLOAT)
15145 	fprintf (file, _(" [floats passed in float registers]"));
15146 
15147       if (flags & EF_ARM_PIC)
15148 	fprintf (file, _(" [position independent]"));
15149 
15150       if (flags & EF_ARM_NEW_ABI)
15151 	fprintf (file, _(" [new ABI]"));
15152 
15153       if (flags & EF_ARM_OLD_ABI)
15154 	fprintf (file, _(" [old ABI]"));
15155 
15156       if (flags & EF_ARM_SOFT_FLOAT)
15157 	fprintf (file, _(" [software FP]"));
15158 
15159       flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
15160 		 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
15161 		 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
15162 		 | EF_ARM_MAVERICK_FLOAT);
15163       break;
15164 
15165     case EF_ARM_EABI_VER1:
15166       fprintf (file, _(" [Version1 EABI]"));
15167 
15168       if (flags & EF_ARM_SYMSARESORTED)
15169 	fprintf (file, _(" [sorted symbol table]"));
15170       else
15171 	fprintf (file, _(" [unsorted symbol table]"));
15172 
15173       flags &= ~ EF_ARM_SYMSARESORTED;
15174       break;
15175 
15176     case EF_ARM_EABI_VER2:
15177       fprintf (file, _(" [Version2 EABI]"));
15178 
15179       if (flags & EF_ARM_SYMSARESORTED)
15180 	fprintf (file, _(" [sorted symbol table]"));
15181       else
15182 	fprintf (file, _(" [unsorted symbol table]"));
15183 
15184       if (flags & EF_ARM_DYNSYMSUSESEGIDX)
15185 	fprintf (file, _(" [dynamic symbols use segment index]"));
15186 
15187       if (flags & EF_ARM_MAPSYMSFIRST)
15188 	fprintf (file, _(" [mapping symbols precede others]"));
15189 
15190       flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
15191 		 | EF_ARM_MAPSYMSFIRST);
15192       break;
15193 
15194     case EF_ARM_EABI_VER3:
15195       fprintf (file, _(" [Version3 EABI]"));
15196       break;
15197 
15198     case EF_ARM_EABI_VER4:
15199       fprintf (file, _(" [Version4 EABI]"));
15200       goto eabi;
15201 
15202     case EF_ARM_EABI_VER5:
15203       fprintf (file, _(" [Version5 EABI]"));
15204 
15205       if (flags & EF_ARM_ABI_FLOAT_SOFT)
15206 	fprintf (file, _(" [soft-float ABI]"));
15207 
15208       if (flags & EF_ARM_ABI_FLOAT_HARD)
15209 	fprintf (file, _(" [hard-float ABI]"));
15210 
15211       flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
15212 
15213     eabi:
15214       if (flags & EF_ARM_BE8)
15215 	fprintf (file, _(" [BE8]"));
15216 
15217       if (flags & EF_ARM_LE8)
15218 	fprintf (file, _(" [LE8]"));
15219 
15220       flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
15221       break;
15222 
15223     default:
15224       fprintf (file, _(" <EABI version unrecognised>"));
15225       break;
15226     }
15227 
15228   flags &= ~ EF_ARM_EABIMASK;
15229 
15230   if (flags & EF_ARM_RELEXEC)
15231     fprintf (file, _(" [relocatable executable]"));
15232 
15233   if (flags & EF_ARM_PIC)
15234     fprintf (file, _(" [position independent]"));
15235 
15236   if (elf_elfheader (abfd)->e_ident[EI_OSABI] == ELFOSABI_ARM_FDPIC)
15237     fprintf (file, _(" [FDPIC ABI supplement]"));
15238 
15239   flags &= ~ (EF_ARM_RELEXEC | EF_ARM_PIC);
15240 
15241   if (flags)
15242     fprintf (file, _("<Unrecognised flag bits set>"));
15243 
15244   fputc ('\n', file);
15245 
15246   return TRUE;
15247 }
15248 
15249 static int
15250 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
15251 {
15252   switch (ELF_ST_TYPE (elf_sym->st_info))
15253     {
15254     case STT_ARM_TFUNC:
15255       return ELF_ST_TYPE (elf_sym->st_info);
15256 
15257     case STT_ARM_16BIT:
15258       /* If the symbol is not an object, return the STT_ARM_16BIT flag.
15259 	 This allows us to distinguish between data used by Thumb instructions
15260 	 and non-data (which is probably code) inside Thumb regions of an
15261 	 executable.  */
15262       if (type != STT_OBJECT && type != STT_TLS)
15263 	return ELF_ST_TYPE (elf_sym->st_info);
15264       break;
15265 
15266     default:
15267       break;
15268     }
15269 
15270   return type;
15271 }
15272 
15273 static asection *
15274 elf32_arm_gc_mark_hook (asection *sec,
15275 			struct bfd_link_info *info,
15276 			Elf_Internal_Rela *rel,
15277 			struct elf_link_hash_entry *h,
15278 			Elf_Internal_Sym *sym)
15279 {
15280   if (h != NULL)
15281     switch (ELF32_R_TYPE (rel->r_info))
15282       {
15283       case R_ARM_GNU_VTINHERIT:
15284       case R_ARM_GNU_VTENTRY:
15285 	return NULL;
15286       }
15287 
15288   return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
15289 }
15290 
15291 /* Look through the relocs for a section during the first phase.  */
15292 
15293 static bfd_boolean
15294 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
15295 			asection *sec, const Elf_Internal_Rela *relocs)
15296 {
15297   Elf_Internal_Shdr *symtab_hdr;
15298   struct elf_link_hash_entry **sym_hashes;
15299   const Elf_Internal_Rela *rel;
15300   const Elf_Internal_Rela *rel_end;
15301   bfd *dynobj;
15302   asection *sreloc;
15303   struct elf32_arm_link_hash_table *htab;
15304   bfd_boolean call_reloc_p;
15305   bfd_boolean may_become_dynamic_p;
15306   bfd_boolean may_need_local_target_p;
15307   unsigned long nsyms;
15308 
15309   if (bfd_link_relocatable (info))
15310     return TRUE;
15311 
15312   BFD_ASSERT (is_arm_elf (abfd));
15313 
15314   htab = elf32_arm_hash_table (info);
15315   if (htab == NULL)
15316     return FALSE;
15317 
15318   sreloc = NULL;
15319 
15320   /* Create dynamic sections for relocatable executables so that we can
15321      copy relocations.  */
15322   if (htab->root.is_relocatable_executable
15323       && ! htab->root.dynamic_sections_created)
15324     {
15325       if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
15326 	return FALSE;
15327     }
15328 
15329   if (htab->root.dynobj == NULL)
15330     htab->root.dynobj = abfd;
15331   if (!create_ifunc_sections (info))
15332     return FALSE;
15333 
15334   dynobj = htab->root.dynobj;
15335 
15336   symtab_hdr = & elf_symtab_hdr (abfd);
15337   sym_hashes = elf_sym_hashes (abfd);
15338   nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
15339 
15340   rel_end = relocs + sec->reloc_count;
15341   for (rel = relocs; rel < rel_end; rel++)
15342     {
15343       Elf_Internal_Sym *isym;
15344       struct elf_link_hash_entry *h;
15345       struct elf32_arm_link_hash_entry *eh;
15346       unsigned int r_symndx;
15347       int r_type;
15348 
15349       r_symndx = ELF32_R_SYM (rel->r_info);
15350       r_type = ELF32_R_TYPE (rel->r_info);
15351       r_type = arm_real_reloc_type (htab, r_type);
15352 
15353       if (r_symndx >= nsyms
15354 	  /* PR 9934: It is possible to have relocations that do not
15355 	     refer to symbols, thus it is also possible to have an
15356 	     object file containing relocations but no symbol table.  */
15357 	  && (r_symndx > STN_UNDEF || nsyms > 0))
15358 	{
15359 	  _bfd_error_handler (_("%pB: bad symbol index: %d"), abfd,
15360 			      r_symndx);
15361 	  return FALSE;
15362 	}
15363 
15364       h = NULL;
15365       isym = NULL;
15366       if (nsyms > 0)
15367 	{
15368 	  if (r_symndx < symtab_hdr->sh_info)
15369 	    {
15370 	      /* A local symbol.  */
15371 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache,
15372 					    abfd, r_symndx);
15373 	      if (isym == NULL)
15374 		return FALSE;
15375 	    }
15376 	  else
15377 	    {
15378 	      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
15379 	      while (h->root.type == bfd_link_hash_indirect
15380 		     || h->root.type == bfd_link_hash_warning)
15381 		h = (struct elf_link_hash_entry *) h->root.u.i.link;
15382 	    }
15383 	}
15384 
15385       eh = (struct elf32_arm_link_hash_entry *) h;
15386 
15387       call_reloc_p = FALSE;
15388       may_become_dynamic_p = FALSE;
15389       may_need_local_target_p = FALSE;
15390 
15391       /* Could be done earlier, if h were already available.  */
15392       r_type = elf32_arm_tls_transition (info, r_type, h);
15393       switch (r_type)
15394 	{
15395 	case R_ARM_GOTOFFFUNCDESC:
15396 	  {
15397 	    if (h == NULL)
15398 	      {
15399 		if (!elf32_arm_allocate_local_sym_info (abfd))
15400 		  return FALSE;
15401 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].gotofffuncdesc_cnt += 1;
15402 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15403 	      }
15404 	    else
15405 	      {
15406 		eh->fdpic_cnts.gotofffuncdesc_cnt++;
15407 	      }
15408 	  }
15409 	  break;
15410 
15411 	case R_ARM_GOTFUNCDESC:
15412 	  {
15413 	    if (h == NULL)
15414 	      {
15415 		/* Such a relocation is not supposed to be generated
15416 		   by gcc on a static function. */
15417 		/* Anyway if needed it could be handled.  */
15418 		abort();
15419 	      }
15420 	    else
15421 	      {
15422 		eh->fdpic_cnts.gotfuncdesc_cnt++;
15423 	      }
15424 	  }
15425 	  break;
15426 
15427 	case R_ARM_FUNCDESC:
15428 	  {
15429 	    if (h == NULL)
15430 	      {
15431 		if (!elf32_arm_allocate_local_sym_info (abfd))
15432 		  return FALSE;
15433 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_cnt += 1;
15434 		elf32_arm_local_fdpic_cnts(abfd)[r_symndx].funcdesc_offset = -1;
15435 	      }
15436 	    else
15437 	      {
15438 		eh->fdpic_cnts.funcdesc_cnt++;
15439 	      }
15440 	  }
15441 	  break;
15442 
15443 	  case R_ARM_GOT32:
15444 	  case R_ARM_GOT_PREL:
15445 	  case R_ARM_TLS_GD32:
15446 	  case R_ARM_TLS_GD32_FDPIC:
15447 	  case R_ARM_TLS_IE32:
15448 	  case R_ARM_TLS_IE32_FDPIC:
15449 	  case R_ARM_TLS_GOTDESC:
15450 	  case R_ARM_TLS_DESCSEQ:
15451 	  case R_ARM_THM_TLS_DESCSEQ:
15452 	  case R_ARM_TLS_CALL:
15453 	  case R_ARM_THM_TLS_CALL:
15454 	    /* This symbol requires a global offset table entry.  */
15455 	    {
15456 	      int tls_type, old_tls_type;
15457 
15458 	      switch (r_type)
15459 		{
15460 		case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
15461 		case R_ARM_TLS_GD32_FDPIC: tls_type = GOT_TLS_GD; break;
15462 
15463 		case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
15464 		case R_ARM_TLS_IE32_FDPIC: tls_type = GOT_TLS_IE; break;
15465 
15466 		case R_ARM_TLS_GOTDESC:
15467 		case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
15468 		case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
15469 		  tls_type = GOT_TLS_GDESC; break;
15470 
15471 		default: tls_type = GOT_NORMAL; break;
15472 		}
15473 
15474 	      if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
15475 		info->flags |= DF_STATIC_TLS;
15476 
15477 	      if (h != NULL)
15478 		{
15479 		  h->got.refcount++;
15480 		  old_tls_type = elf32_arm_hash_entry (h)->tls_type;
15481 		}
15482 	      else
15483 		{
15484 		  /* This is a global offset table entry for a local symbol.  */
15485 		  if (!elf32_arm_allocate_local_sym_info (abfd))
15486 		    return FALSE;
15487 		  elf_local_got_refcounts (abfd)[r_symndx] += 1;
15488 		  old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
15489 		}
15490 
15491 	      /* If a variable is accessed with both tls methods, two
15492 		 slots may be created.  */
15493 	      if (GOT_TLS_GD_ANY_P (old_tls_type)
15494 		  && GOT_TLS_GD_ANY_P (tls_type))
15495 		tls_type |= old_tls_type;
15496 
15497 	      /* We will already have issued an error message if there
15498 		 is a TLS/non-TLS mismatch, based on the symbol
15499 		 type.  So just combine any TLS types needed.  */
15500 	      if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
15501 		  && tls_type != GOT_NORMAL)
15502 		tls_type |= old_tls_type;
15503 
15504 	      /* If the symbol is accessed in both IE and GDESC
15505 		 method, we're able to relax. Turn off the GDESC flag,
15506 		 without messing up with any other kind of tls types
15507 		 that may be involved.  */
15508 	      if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
15509 		tls_type &= ~GOT_TLS_GDESC;
15510 
15511 	      if (old_tls_type != tls_type)
15512 		{
15513 		  if (h != NULL)
15514 		    elf32_arm_hash_entry (h)->tls_type = tls_type;
15515 		  else
15516 		    elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
15517 		}
15518 	    }
15519 	    /* Fall through.  */
15520 
15521 	  case R_ARM_TLS_LDM32:
15522 	  case R_ARM_TLS_LDM32_FDPIC:
15523 	    if (r_type == R_ARM_TLS_LDM32 || r_type == R_ARM_TLS_LDM32_FDPIC)
15524 		htab->tls_ldm_got.refcount++;
15525 	    /* Fall through.  */
15526 
15527 	  case R_ARM_GOTOFF32:
15528 	  case R_ARM_GOTPC:
15529 	    if (htab->root.sgot == NULL
15530 		&& !create_got_section (htab->root.dynobj, info))
15531 	      return FALSE;
15532 	    break;
15533 
15534 	  case R_ARM_PC24:
15535 	  case R_ARM_PLT32:
15536 	  case R_ARM_CALL:
15537 	  case R_ARM_JUMP24:
15538 	  case R_ARM_PREL31:
15539 	  case R_ARM_THM_CALL:
15540 	  case R_ARM_THM_JUMP24:
15541 	  case R_ARM_THM_JUMP19:
15542 	    call_reloc_p = TRUE;
15543 	    may_need_local_target_p = TRUE;
15544 	    break;
15545 
15546 	  case R_ARM_ABS12:
15547 	    /* VxWorks uses dynamic R_ARM_ABS12 relocations for
15548 	       ldr __GOTT_INDEX__ offsets.  */
15549 	    if (!htab->vxworks_p)
15550 	      {
15551 		may_need_local_target_p = TRUE;
15552 		break;
15553 	      }
15554 	    else goto jump_over;
15555 
15556 	    /* Fall through.  */
15557 
15558 	  case R_ARM_MOVW_ABS_NC:
15559 	  case R_ARM_MOVT_ABS:
15560 	  case R_ARM_THM_MOVW_ABS_NC:
15561 	  case R_ARM_THM_MOVT_ABS:
15562 	    if (bfd_link_pic (info))
15563 	      {
15564 		_bfd_error_handler
15565 		  (_("%pB: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
15566 		   abfd, elf32_arm_howto_table_1[r_type].name,
15567 		   (h) ? h->root.root.string : "a local symbol");
15568 		bfd_set_error (bfd_error_bad_value);
15569 		return FALSE;
15570 	      }
15571 
15572 	    /* Fall through.  */
15573 	  case R_ARM_ABS32:
15574 	  case R_ARM_ABS32_NOI:
15575 	jump_over:
15576 	    if (h != NULL && bfd_link_executable (info))
15577 	      {
15578 		h->pointer_equality_needed = 1;
15579 	      }
15580 	    /* Fall through.  */
15581 	  case R_ARM_REL32:
15582 	  case R_ARM_REL32_NOI:
15583 	  case R_ARM_MOVW_PREL_NC:
15584 	  case R_ARM_MOVT_PREL:
15585 	  case R_ARM_THM_MOVW_PREL_NC:
15586 	  case R_ARM_THM_MOVT_PREL:
15587 
15588 	    /* Should the interworking branches be listed here?  */
15589 	    if ((bfd_link_pic (info) || htab->root.is_relocatable_executable
15590 		 || htab->fdpic_p)
15591 		&& (sec->flags & SEC_ALLOC) != 0)
15592 	      {
15593 		if (h == NULL
15594 		    && elf32_arm_howto_from_type (r_type)->pc_relative)
15595 		  {
15596 		    /* In shared libraries and relocatable executables,
15597 		       we treat local relative references as calls;
15598 		       see the related SYMBOL_CALLS_LOCAL code in
15599 		       allocate_dynrelocs.  */
15600 		    call_reloc_p = TRUE;
15601 		    may_need_local_target_p = TRUE;
15602 		  }
15603 		else
15604 		  /* We are creating a shared library or relocatable
15605 		     executable, and this is a reloc against a global symbol,
15606 		     or a non-PC-relative reloc against a local symbol.
15607 		     We may need to copy the reloc into the output.  */
15608 		  may_become_dynamic_p = TRUE;
15609 	      }
15610 	    else
15611 	      may_need_local_target_p = TRUE;
15612 	    break;
15613 
15614 	/* This relocation describes the C++ object vtable hierarchy.
15615 	   Reconstruct it for later use during GC.  */
15616 	case R_ARM_GNU_VTINHERIT:
15617 	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
15618 	    return FALSE;
15619 	  break;
15620 
15621 	/* This relocation describes which C++ vtable entries are actually
15622 	   used.  Record for later use during GC.  */
15623 	case R_ARM_GNU_VTENTRY:
15624 	  if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
15625 	    return FALSE;
15626 	  break;
15627 	}
15628 
15629       if (h != NULL)
15630 	{
15631 	  if (call_reloc_p)
15632 	    /* We may need a .plt entry if the function this reloc
15633 	       refers to is in a different object, regardless of the
15634 	       symbol's type.  We can't tell for sure yet, because
15635 	       something later might force the symbol local.  */
15636 	    h->needs_plt = 1;
15637 	  else if (may_need_local_target_p)
15638 	    /* If this reloc is in a read-only section, we might
15639 	       need a copy reloc.  We can't check reliably at this
15640 	       stage whether the section is read-only, as input
15641 	       sections have not yet been mapped to output sections.
15642 	       Tentatively set the flag for now, and correct in
15643 	       adjust_dynamic_symbol.  */
15644 	    h->non_got_ref = 1;
15645 	}
15646 
15647       if (may_need_local_target_p
15648 	  && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
15649 	{
15650 	  union gotplt_union *root_plt;
15651 	  struct arm_plt_info *arm_plt;
15652 	  struct arm_local_iplt_info *local_iplt;
15653 
15654 	  if (h != NULL)
15655 	    {
15656 	      root_plt = &h->plt;
15657 	      arm_plt = &eh->plt;
15658 	    }
15659 	  else
15660 	    {
15661 	      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
15662 	      if (local_iplt == NULL)
15663 		return FALSE;
15664 	      root_plt = &local_iplt->root;
15665 	      arm_plt = &local_iplt->arm;
15666 	    }
15667 
15668 	  /* If the symbol is a function that doesn't bind locally,
15669 	     this relocation will need a PLT entry.  */
15670 	  if (root_plt->refcount != -1)
15671 	    root_plt->refcount += 1;
15672 
15673 	  if (!call_reloc_p)
15674 	    arm_plt->noncall_refcount++;
15675 
15676 	  /* It's too early to use htab->use_blx here, so we have to
15677 	     record possible blx references separately from
15678 	     relocs that definitely need a thumb stub.  */
15679 
15680 	  if (r_type == R_ARM_THM_CALL)
15681 	    arm_plt->maybe_thumb_refcount += 1;
15682 
15683 	  if (r_type == R_ARM_THM_JUMP24
15684 	      || r_type == R_ARM_THM_JUMP19)
15685 	    arm_plt->thumb_refcount += 1;
15686 	}
15687 
15688       if (may_become_dynamic_p)
15689 	{
15690 	  struct elf_dyn_relocs *p, **head;
15691 
15692 	  /* Create a reloc section in dynobj.  */
15693 	  if (sreloc == NULL)
15694 	    {
15695 	      sreloc = _bfd_elf_make_dynamic_reloc_section
15696 		(sec, dynobj, 2, abfd, ! htab->use_rel);
15697 
15698 	      if (sreloc == NULL)
15699 		return FALSE;
15700 
15701 	      /* BPABI objects never have dynamic relocations mapped.  */
15702 	      if (htab->symbian_p)
15703 		{
15704 		  flagword flags;
15705 
15706 		  flags = bfd_section_flags (sreloc);
15707 		  flags &= ~(SEC_LOAD | SEC_ALLOC);
15708 		  bfd_set_section_flags (sreloc, flags);
15709 		}
15710 	    }
15711 
15712 	  /* If this is a global symbol, count the number of
15713 	     relocations we need for this symbol.  */
15714 	  if (h != NULL)
15715 	    head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
15716 	  else
15717 	    {
15718 	      head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
15719 	      if (head == NULL)
15720 		return FALSE;
15721 	    }
15722 
15723 	  p = *head;
15724 	  if (p == NULL || p->sec != sec)
15725 	    {
15726 	      bfd_size_type amt = sizeof *p;
15727 
15728 	      p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
15729 	      if (p == NULL)
15730 		return FALSE;
15731 	      p->next = *head;
15732 	      *head = p;
15733 	      p->sec = sec;
15734 	      p->count = 0;
15735 	      p->pc_count = 0;
15736 	    }
15737 
15738 	  if (elf32_arm_howto_from_type (r_type)->pc_relative)
15739 	    p->pc_count += 1;
15740 	  p->count += 1;
15741 	  if (h == NULL && htab->fdpic_p && !bfd_link_pic(info)
15742 	      && r_type != R_ARM_ABS32 && r_type != R_ARM_ABS32_NOI) {
15743 	    /* Here we only support R_ARM_ABS32 and R_ARM_ABS32_NOI
15744 	       that will become rofixup.  */
15745 	    /* This is due to the fact that we suppose all will become rofixup.  */
15746 	    fprintf(stderr, "FDPIC does not yet support %d relocation to become dynamic for executable\n", r_type);
15747 	    _bfd_error_handler
15748 	      (_("FDPIC does not yet support %s relocation"
15749 		 " to become dynamic for executable"),
15750 	       elf32_arm_howto_table_1[r_type].name);
15751 	    abort();
15752 	  }
15753 	}
15754     }
15755 
15756   return TRUE;
15757 }
15758 
15759 static void
15760 elf32_arm_update_relocs (asection *o,
15761 			 struct bfd_elf_section_reloc_data *reldata)
15762 {
15763   void (*swap_in) (bfd *, const bfd_byte *, Elf_Internal_Rela *);
15764   void (*swap_out) (bfd *, const Elf_Internal_Rela *, bfd_byte *);
15765   const struct elf_backend_data *bed;
15766   _arm_elf_section_data *eado;
15767   struct bfd_link_order *p;
15768   bfd_byte *erela_head, *erela;
15769   Elf_Internal_Rela *irela_head, *irela;
15770   Elf_Internal_Shdr *rel_hdr;
15771   bfd *abfd;
15772   unsigned int count;
15773 
15774   eado = get_arm_elf_section_data (o);
15775 
15776   if (!eado || eado->elf.this_hdr.sh_type != SHT_ARM_EXIDX)
15777     return;
15778 
15779   abfd = o->owner;
15780   bed = get_elf_backend_data (abfd);
15781   rel_hdr = reldata->hdr;
15782 
15783   if (rel_hdr->sh_entsize == bed->s->sizeof_rel)
15784     {
15785       swap_in = bed->s->swap_reloc_in;
15786       swap_out = bed->s->swap_reloc_out;
15787     }
15788   else if (rel_hdr->sh_entsize == bed->s->sizeof_rela)
15789     {
15790       swap_in = bed->s->swap_reloca_in;
15791       swap_out = bed->s->swap_reloca_out;
15792     }
15793   else
15794     abort ();
15795 
15796   erela_head = rel_hdr->contents;
15797   irela_head = (Elf_Internal_Rela *) bfd_zmalloc
15798     ((NUM_SHDR_ENTRIES (rel_hdr) + 1) * sizeof (*irela_head));
15799 
15800   erela = erela_head;
15801   irela = irela_head;
15802   count = 0;
15803 
15804   for (p = o->map_head.link_order; p; p = p->next)
15805     {
15806       if (p->type == bfd_section_reloc_link_order
15807 	  || p->type == bfd_symbol_reloc_link_order)
15808 	{
15809 	  (*swap_in) (abfd, erela, irela);
15810 	  erela += rel_hdr->sh_entsize;
15811 	  irela++;
15812 	  count++;
15813 	}
15814       else if (p->type == bfd_indirect_link_order)
15815 	{
15816 	  struct bfd_elf_section_reloc_data *input_reldata;
15817 	  arm_unwind_table_edit *edit_list, *edit_tail;
15818 	  _arm_elf_section_data *eadi;
15819 	  bfd_size_type j;
15820 	  bfd_vma offset;
15821 	  asection *i;
15822 
15823 	  i = p->u.indirect.section;
15824 
15825 	  eadi = get_arm_elf_section_data (i);
15826 	  edit_list = eadi->u.exidx.unwind_edit_list;
15827 	  edit_tail = eadi->u.exidx.unwind_edit_tail;
15828 	  offset = i->output_offset;
15829 
15830 	  if (eadi->elf.rel.hdr &&
15831 	      eadi->elf.rel.hdr->sh_entsize == rel_hdr->sh_entsize)
15832 	    input_reldata = &eadi->elf.rel;
15833 	  else if (eadi->elf.rela.hdr &&
15834 		   eadi->elf.rela.hdr->sh_entsize == rel_hdr->sh_entsize)
15835 	    input_reldata = &eadi->elf.rela;
15836 	  else
15837 	    abort ();
15838 
15839 	  if (edit_list)
15840 	    {
15841 	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15842 		{
15843 		  arm_unwind_table_edit *edit_node, *edit_next;
15844 		  bfd_vma bias;
15845 		  bfd_vma reloc_index;
15846 
15847 		  (*swap_in) (abfd, erela, irela);
15848 		  reloc_index = (irela->r_offset - offset) / 8;
15849 
15850 		  bias = 0;
15851 		  edit_node = edit_list;
15852 		  for (edit_next = edit_list;
15853 		       edit_next && edit_next->index <= reloc_index;
15854 		       edit_next = edit_node->next)
15855 		    {
15856 		      bias++;
15857 		      edit_node = edit_next;
15858 		    }
15859 
15860 		  if (edit_node->type != DELETE_EXIDX_ENTRY
15861 		      || edit_node->index != reloc_index)
15862 		    {
15863 		      irela->r_offset -= bias * 8;
15864 		      irela++;
15865 		      count++;
15866 		    }
15867 
15868 		  erela += rel_hdr->sh_entsize;
15869 		}
15870 
15871 	      if (edit_tail->type == INSERT_EXIDX_CANTUNWIND_AT_END)
15872 		{
15873 		  /* New relocation entity.  */
15874 		  asection *text_sec = edit_tail->linked_section;
15875 		  asection *text_out = text_sec->output_section;
15876 		  bfd_vma exidx_offset = offset + i->size - 8;
15877 
15878 		  irela->r_addend = 0;
15879 		  irela->r_offset = exidx_offset;
15880 		  irela->r_info = ELF32_R_INFO
15881 		    (text_out->target_index, R_ARM_PREL31);
15882 		  irela++;
15883 		  count++;
15884 		}
15885 	    }
15886 	  else
15887 	    {
15888 	      for (j = 0; j < NUM_SHDR_ENTRIES (input_reldata->hdr); j++)
15889 		{
15890 		  (*swap_in) (abfd, erela, irela);
15891 		  erela += rel_hdr->sh_entsize;
15892 		  irela++;
15893 		}
15894 
15895 	      count += NUM_SHDR_ENTRIES (input_reldata->hdr);
15896 	    }
15897 	}
15898     }
15899 
15900   reldata->count = count;
15901   rel_hdr->sh_size = count * rel_hdr->sh_entsize;
15902 
15903   erela = erela_head;
15904   irela = irela_head;
15905   while (count > 0)
15906     {
15907       (*swap_out) (abfd, irela, erela);
15908       erela += rel_hdr->sh_entsize;
15909       irela++;
15910       count--;
15911     }
15912 
15913   free (irela_head);
15914 
15915   /* Hashes are no longer valid.  */
15916   free (reldata->hashes);
15917   reldata->hashes = NULL;
15918 }
15919 
15920 /* Unwinding tables are not referenced directly.  This pass marks them as
15921    required if the corresponding code section is marked.  Similarly, ARMv8-M
15922    secure entry functions can only be referenced by SG veneers which are
15923    created after the GC process. They need to be marked in case they reside in
15924    their own section (as would be the case if code was compiled with
15925    -ffunction-sections).  */
15926 
15927 static bfd_boolean
15928 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
15929 				  elf_gc_mark_hook_fn gc_mark_hook)
15930 {
15931   bfd *sub;
15932   Elf_Internal_Shdr **elf_shdrp;
15933   asection *cmse_sec;
15934   obj_attribute *out_attr;
15935   Elf_Internal_Shdr *symtab_hdr;
15936   unsigned i, sym_count, ext_start;
15937   const struct elf_backend_data *bed;
15938   struct elf_link_hash_entry **sym_hashes;
15939   struct elf32_arm_link_hash_entry *cmse_hash;
15940   bfd_boolean again, is_v8m, first_bfd_browse = TRUE;
15941   bfd_boolean debug_sec_need_to_be_marked = FALSE;
15942   asection *isec;
15943 
15944   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
15945 
15946   out_attr = elf_known_obj_attributes_proc (info->output_bfd);
15947   is_v8m = out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V8M_BASE
15948 	   && out_attr[Tag_CPU_arch_profile].i == 'M';
15949 
15950   /* Marking EH data may cause additional code sections to be marked,
15951      requiring multiple passes.  */
15952   again = TRUE;
15953   while (again)
15954     {
15955       again = FALSE;
15956       for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
15957 	{
15958 	  asection *o;
15959 
15960 	  if (! is_arm_elf (sub))
15961 	    continue;
15962 
15963 	  elf_shdrp = elf_elfsections (sub);
15964 	  for (o = sub->sections; o != NULL; o = o->next)
15965 	    {
15966 	      Elf_Internal_Shdr *hdr;
15967 
15968 	      hdr = &elf_section_data (o)->this_hdr;
15969 	      if (hdr->sh_type == SHT_ARM_EXIDX
15970 		  && hdr->sh_link
15971 		  && hdr->sh_link < elf_numsections (sub)
15972 		  && !o->gc_mark
15973 		  && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
15974 		{
15975 		  again = TRUE;
15976 		  if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
15977 		    return FALSE;
15978 		}
15979 	    }
15980 
15981 	  /* Mark section holding ARMv8-M secure entry functions.  We mark all
15982 	     of them so no need for a second browsing.  */
15983 	  if (is_v8m && first_bfd_browse)
15984 	    {
15985 	      sym_hashes = elf_sym_hashes (sub);
15986 	      bed = get_elf_backend_data (sub);
15987 	      symtab_hdr = &elf_tdata (sub)->symtab_hdr;
15988 	      sym_count = symtab_hdr->sh_size / bed->s->sizeof_sym;
15989 	      ext_start = symtab_hdr->sh_info;
15990 
15991 	      /* Scan symbols.  */
15992 	      for (i = ext_start; i < sym_count; i++)
15993 		{
15994 		  cmse_hash = elf32_arm_hash_entry (sym_hashes[i - ext_start]);
15995 
15996 		  /* Assume it is a special symbol.  If not, cmse_scan will
15997 		     warn about it and user can do something about it.  */
15998 		  if (CONST_STRNEQ (cmse_hash->root.root.root.string,
15999 				    CMSE_PREFIX))
16000 		    {
16001 		      cmse_sec = cmse_hash->root.root.u.def.section;
16002 		      if (!cmse_sec->gc_mark
16003 			  && !_bfd_elf_gc_mark (info, cmse_sec, gc_mark_hook))
16004 			return FALSE;
16005 		      /* The debug sections related to these secure entry
16006 			 functions are marked on enabling below flag.  */
16007 		      debug_sec_need_to_be_marked = TRUE;
16008 		    }
16009 		}
16010 
16011 	      if (debug_sec_need_to_be_marked)
16012 		{
16013 		  /* Looping over all the sections of the object file containing
16014 		     Armv8-M secure entry functions and marking all the debug
16015 		     sections.  */
16016 		  for (isec = sub->sections; isec != NULL; isec = isec->next)
16017 		    {
16018 		      /* If not a debug sections, skip it.  */
16019 		      if (!isec->gc_mark && (isec->flags & SEC_DEBUGGING))
16020 			isec->gc_mark = 1 ;
16021 		    }
16022 		  debug_sec_need_to_be_marked = FALSE;
16023 		}
16024 	    }
16025 	}
16026       first_bfd_browse = FALSE;
16027     }
16028 
16029   return TRUE;
16030 }
16031 
16032 /* Treat mapping symbols as special target symbols.  */
16033 
16034 static bfd_boolean
16035 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
16036 {
16037   return bfd_is_arm_special_symbol_name (sym->name,
16038 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
16039 }
16040 
16041 /* If the ELF symbol SYM might be a function in SEC, return the
16042    function size and set *CODE_OFF to the function's entry point,
16043    otherwise return zero.  */
16044 
16045 static bfd_size_type
16046 elf32_arm_maybe_function_sym (const asymbol *sym, asection *sec,
16047 			      bfd_vma *code_off)
16048 {
16049   bfd_size_type size;
16050 
16051   if ((sym->flags & (BSF_SECTION_SYM | BSF_FILE | BSF_OBJECT
16052 		     | BSF_THREAD_LOCAL | BSF_RELC | BSF_SRELC)) != 0
16053       || sym->section != sec)
16054     return 0;
16055 
16056   if (!(sym->flags & BSF_SYNTHETIC))
16057     switch (ELF_ST_TYPE (((elf_symbol_type *) sym)->internal_elf_sym.st_info))
16058       {
16059 	case STT_FUNC:
16060 	case STT_ARM_TFUNC:
16061 	case STT_NOTYPE:
16062 	  break;
16063 	default:
16064 	  return 0;
16065       }
16066 
16067   if ((sym->flags & BSF_LOCAL)
16068       && bfd_is_arm_special_symbol_name (sym->name,
16069 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY))
16070     return 0;
16071 
16072   *code_off = sym->value;
16073   size = 0;
16074   if (!(sym->flags & BSF_SYNTHETIC))
16075     size = ((elf_symbol_type *) sym)->internal_elf_sym.st_size;
16076   if (size == 0)
16077     size = 1;
16078   return size;
16079 }
16080 
16081 static bfd_boolean
16082 elf32_arm_find_inliner_info (bfd *	    abfd,
16083 			     const char **  filename_ptr,
16084 			     const char **  functionname_ptr,
16085 			     unsigned int * line_ptr)
16086 {
16087   bfd_boolean found;
16088   found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
16089 					 functionname_ptr, line_ptr,
16090 					 & elf_tdata (abfd)->dwarf2_find_line_info);
16091   return found;
16092 }
16093 
16094 /* Find dynamic relocs for H that apply to read-only sections.  */
16095 
16096 static asection *
16097 readonly_dynrelocs (struct elf_link_hash_entry *h)
16098 {
16099   struct elf_dyn_relocs *p;
16100 
16101   for (p = elf32_arm_hash_entry (h)->dyn_relocs; p != NULL; p = p->next)
16102     {
16103       asection *s = p->sec->output_section;
16104 
16105       if (s != NULL && (s->flags & SEC_READONLY) != 0)
16106 	return p->sec;
16107     }
16108   return NULL;
16109 }
16110 
16111 /* Adjust a symbol defined by a dynamic object and referenced by a
16112    regular object.  The current definition is in some section of the
16113    dynamic object, but we're not including those sections.  We have to
16114    change the definition to something the rest of the link can
16115    understand.  */
16116 
16117 static bfd_boolean
16118 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
16119 				 struct elf_link_hash_entry * h)
16120 {
16121   bfd * dynobj;
16122   asection *s, *srel;
16123   struct elf32_arm_link_hash_entry * eh;
16124   struct elf32_arm_link_hash_table *globals;
16125 
16126   globals = elf32_arm_hash_table (info);
16127   if (globals == NULL)
16128     return FALSE;
16129 
16130   dynobj = elf_hash_table (info)->dynobj;
16131 
16132   /* Make sure we know what is going on here.  */
16133   BFD_ASSERT (dynobj != NULL
16134 	      && (h->needs_plt
16135 		  || h->type == STT_GNU_IFUNC
16136 		  || h->is_weakalias
16137 		  || (h->def_dynamic
16138 		      && h->ref_regular
16139 		      && !h->def_regular)));
16140 
16141   eh = (struct elf32_arm_link_hash_entry *) h;
16142 
16143   /* If this is a function, put it in the procedure linkage table.  We
16144      will fill in the contents of the procedure linkage table later,
16145      when we know the address of the .got section.  */
16146   if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
16147     {
16148       /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
16149 	 symbol binds locally.  */
16150       if (h->plt.refcount <= 0
16151 	  || (h->type != STT_GNU_IFUNC
16152 	      && (SYMBOL_CALLS_LOCAL (info, h)
16153 		  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16154 		      && h->root.type == bfd_link_hash_undefweak))))
16155 	{
16156 	  /* This case can occur if we saw a PLT32 reloc in an input
16157 	     file, but the symbol was never referred to by a dynamic
16158 	     object, or if all references were garbage collected.  In
16159 	     such a case, we don't actually need to build a procedure
16160 	     linkage table, and we can just do a PC24 reloc instead.  */
16161 	  h->plt.offset = (bfd_vma) -1;
16162 	  eh->plt.thumb_refcount = 0;
16163 	  eh->plt.maybe_thumb_refcount = 0;
16164 	  eh->plt.noncall_refcount = 0;
16165 	  h->needs_plt = 0;
16166 	}
16167 
16168       return TRUE;
16169     }
16170   else
16171     {
16172       /* It's possible that we incorrectly decided a .plt reloc was
16173 	 needed for an R_ARM_PC24 or similar reloc to a non-function sym
16174 	 in check_relocs.  We can't decide accurately between function
16175 	 and non-function syms in check-relocs; Objects loaded later in
16176 	 the link may change h->type.  So fix it now.  */
16177       h->plt.offset = (bfd_vma) -1;
16178       eh->plt.thumb_refcount = 0;
16179       eh->plt.maybe_thumb_refcount = 0;
16180       eh->plt.noncall_refcount = 0;
16181     }
16182 
16183   /* If this is a weak symbol, and there is a real definition, the
16184      processor independent code will have arranged for us to see the
16185      real definition first, and we can just use the same value.  */
16186   if (h->is_weakalias)
16187     {
16188       struct elf_link_hash_entry *def = weakdef (h);
16189       BFD_ASSERT (def->root.type == bfd_link_hash_defined);
16190       h->root.u.def.section = def->root.u.def.section;
16191       h->root.u.def.value = def->root.u.def.value;
16192       return TRUE;
16193     }
16194 
16195   /* If there are no non-GOT references, we do not need a copy
16196      relocation.  */
16197   if (!h->non_got_ref)
16198     return TRUE;
16199 
16200   /* This is a reference to a symbol defined by a dynamic object which
16201      is not a function.  */
16202 
16203   /* If we are creating a shared library, we must presume that the
16204      only references to the symbol are via the global offset table.
16205      For such cases we need not do anything here; the relocations will
16206      be handled correctly by relocate_section.  Relocatable executables
16207      can reference data in shared objects directly, so we don't need to
16208      do anything here.  */
16209   if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
16210     return TRUE;
16211 
16212   /* We must allocate the symbol in our .dynbss section, which will
16213      become part of the .bss section of the executable.  There will be
16214      an entry for this symbol in the .dynsym section.  The dynamic
16215      object will contain position independent code, so all references
16216      from the dynamic object to this symbol will go through the global
16217      offset table.  The dynamic linker will use the .dynsym entry to
16218      determine the address it must put in the global offset table, so
16219      both the dynamic object and the regular object will refer to the
16220      same memory location for the variable.  */
16221   /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
16222      linker to copy the initial value out of the dynamic object and into
16223      the runtime process image.  We need to remember the offset into the
16224      .rel(a).bss section we are going to use.  */
16225   if ((h->root.u.def.section->flags & SEC_READONLY) != 0)
16226     {
16227       s = globals->root.sdynrelro;
16228       srel = globals->root.sreldynrelro;
16229     }
16230   else
16231     {
16232       s = globals->root.sdynbss;
16233       srel = globals->root.srelbss;
16234     }
16235   if (info->nocopyreloc == 0
16236       && (h->root.u.def.section->flags & SEC_ALLOC) != 0
16237       && h->size != 0)
16238     {
16239       elf32_arm_allocate_dynrelocs (info, srel, 1);
16240       h->needs_copy = 1;
16241     }
16242 
16243   return _bfd_elf_adjust_dynamic_copy (info, h, s);
16244 }
16245 
16246 /* Allocate space in .plt, .got and associated reloc sections for
16247    dynamic relocs.  */
16248 
16249 static bfd_boolean
16250 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
16251 {
16252   struct bfd_link_info *info;
16253   struct elf32_arm_link_hash_table *htab;
16254   struct elf32_arm_link_hash_entry *eh;
16255   struct elf_dyn_relocs *p;
16256 
16257   if (h->root.type == bfd_link_hash_indirect)
16258     return TRUE;
16259 
16260   eh = (struct elf32_arm_link_hash_entry *) h;
16261 
16262   info = (struct bfd_link_info *) inf;
16263   htab = elf32_arm_hash_table (info);
16264   if (htab == NULL)
16265     return FALSE;
16266 
16267   if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
16268       && h->plt.refcount > 0)
16269     {
16270       /* Make sure this symbol is output as a dynamic symbol.
16271 	 Undefined weak syms won't yet be marked as dynamic.  */
16272       if (h->dynindx == -1 && !h->forced_local
16273 	  && h->root.type == bfd_link_hash_undefweak)
16274 	{
16275 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16276 	    return FALSE;
16277 	}
16278 
16279       /* If the call in the PLT entry binds locally, the associated
16280 	 GOT entry should use an R_ARM_IRELATIVE relocation instead of
16281 	 the usual R_ARM_JUMP_SLOT.  Put it in the .iplt section rather
16282 	 than the .plt section.  */
16283       if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
16284 	{
16285 	  eh->is_iplt = 1;
16286 	  if (eh->plt.noncall_refcount == 0
16287 	      && SYMBOL_REFERENCES_LOCAL (info, h))
16288 	    /* All non-call references can be resolved directly.
16289 	       This means that they can (and in some cases, must)
16290 	       resolve directly to the run-time target, rather than
16291 	       to the PLT.  That in turns means that any .got entry
16292 	       would be equal to the .igot.plt entry, so there's
16293 	       no point having both.  */
16294 	    h->got.refcount = 0;
16295 	}
16296 
16297       if (bfd_link_pic (info)
16298 	  || eh->is_iplt
16299 	  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
16300 	{
16301 	  elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
16302 
16303 	  /* If this symbol is not defined in a regular file, and we are
16304 	     not generating a shared library, then set the symbol to this
16305 	     location in the .plt.  This is required to make function
16306 	     pointers compare as equal between the normal executable and
16307 	     the shared library.  */
16308 	  if (! bfd_link_pic (info)
16309 	      && !h->def_regular)
16310 	    {
16311 	      h->root.u.def.section = htab->root.splt;
16312 	      h->root.u.def.value = h->plt.offset;
16313 
16314 	      /* Make sure the function is not marked as Thumb, in case
16315 		 it is the target of an ABS32 relocation, which will
16316 		 point to the PLT entry.  */
16317 	      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16318 	    }
16319 
16320 	  /* VxWorks executables have a second set of relocations for
16321 	     each PLT entry.  They go in a separate relocation section,
16322 	     which is processed by the kernel loader.  */
16323 	  if (htab->vxworks_p && !bfd_link_pic (info))
16324 	    {
16325 	      /* There is a relocation for the initial PLT entry:
16326 		 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
16327 	      if (h->plt.offset == htab->plt_header_size)
16328 		elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
16329 
16330 	      /* There are two extra relocations for each subsequent
16331 		 PLT entry: an R_ARM_32 relocation for the GOT entry,
16332 		 and an R_ARM_32 relocation for the PLT entry.  */
16333 	      elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
16334 	    }
16335 	}
16336       else
16337 	{
16338 	  h->plt.offset = (bfd_vma) -1;
16339 	  h->needs_plt = 0;
16340 	}
16341     }
16342   else
16343     {
16344       h->plt.offset = (bfd_vma) -1;
16345       h->needs_plt = 0;
16346     }
16347 
16348   eh = (struct elf32_arm_link_hash_entry *) h;
16349   eh->tlsdesc_got = (bfd_vma) -1;
16350 
16351   if (h->got.refcount > 0)
16352     {
16353       asection *s;
16354       bfd_boolean dyn;
16355       int tls_type = elf32_arm_hash_entry (h)->tls_type;
16356       int indx;
16357 
16358       /* Make sure this symbol is output as a dynamic symbol.
16359 	 Undefined weak syms won't yet be marked as dynamic.  */
16360       if (htab->root.dynamic_sections_created && h->dynindx == -1 && !h->forced_local
16361 	  && h->root.type == bfd_link_hash_undefweak)
16362 	{
16363 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16364 	    return FALSE;
16365 	}
16366 
16367       if (!htab->symbian_p)
16368 	{
16369 	  s = htab->root.sgot;
16370 	  h->got.offset = s->size;
16371 
16372 	  if (tls_type == GOT_UNKNOWN)
16373 	    abort ();
16374 
16375 	  if (tls_type == GOT_NORMAL)
16376 	    /* Non-TLS symbols need one GOT slot.  */
16377 	    s->size += 4;
16378 	  else
16379 	    {
16380 	      if (tls_type & GOT_TLS_GDESC)
16381 		{
16382 		  /* R_ARM_TLS_DESC needs 2 GOT slots.  */
16383 		  eh->tlsdesc_got
16384 		    = (htab->root.sgotplt->size
16385 		       - elf32_arm_compute_jump_table_size (htab));
16386 		  htab->root.sgotplt->size += 8;
16387 		  h->got.offset = (bfd_vma) -2;
16388 		  /* plt.got_offset needs to know there's a TLS_DESC
16389 		     reloc in the middle of .got.plt.  */
16390 		  htab->num_tls_desc++;
16391 		}
16392 
16393 	      if (tls_type & GOT_TLS_GD)
16394 		{
16395 		  /* R_ARM_TLS_GD32 and R_ARM_TLS_GD32_FDPIC need two
16396 		     consecutive GOT slots.  If the symbol is both GD
16397 		     and GDESC, got.offset may have been
16398 		     overwritten.  */
16399 		  h->got.offset = s->size;
16400 		  s->size += 8;
16401 		}
16402 
16403 	      if (tls_type & GOT_TLS_IE)
16404 		/* R_ARM_TLS_IE32/R_ARM_TLS_IE32_FDPIC need one GOT
16405 		   slot.  */
16406 		s->size += 4;
16407 	    }
16408 
16409 	  dyn = htab->root.dynamic_sections_created;
16410 
16411 	  indx = 0;
16412 	  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
16413 					       bfd_link_pic (info),
16414 					       h)
16415 	      && (!bfd_link_pic (info)
16416 		  || !SYMBOL_REFERENCES_LOCAL (info, h)))
16417 	    indx = h->dynindx;
16418 
16419 	  if (tls_type != GOT_NORMAL
16420 	      && (bfd_link_dll (info) || indx != 0)
16421 	      && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
16422 		  || h->root.type != bfd_link_hash_undefweak))
16423 	    {
16424 	      if (tls_type & GOT_TLS_IE)
16425 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16426 
16427 	      if (tls_type & GOT_TLS_GD)
16428 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16429 
16430 	      if (tls_type & GOT_TLS_GDESC)
16431 		{
16432 		  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
16433 		  /* GDESC needs a trampoline to jump to.  */
16434 		  htab->tls_trampoline = -1;
16435 		}
16436 
16437 	      /* Only GD needs it.  GDESC just emits one relocation per
16438 		 2 entries.  */
16439 	      if ((tls_type & GOT_TLS_GD) && indx != 0)
16440 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16441 	    }
16442 	  else if (((indx != -1) || htab->fdpic_p)
16443 		   && !SYMBOL_REFERENCES_LOCAL (info, h))
16444 	    {
16445 	      if (htab->root.dynamic_sections_created)
16446 		/* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation.  */
16447 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16448 	    }
16449 	  else if (h->type == STT_GNU_IFUNC
16450 		   && eh->plt.noncall_refcount == 0)
16451 	    /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
16452 	       they all resolve dynamically instead.  Reserve room for the
16453 	       GOT entry's R_ARM_IRELATIVE relocation.  */
16454 	    elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
16455 	  else if (bfd_link_pic (info)
16456 		   && !UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16457 	    /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
16458 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16459 	  else if (htab->fdpic_p && tls_type == GOT_NORMAL)
16460 	    /* Reserve room for rofixup for FDPIC executable.  */
16461 	    /* TLS relocs do not need space since they are completely
16462 	       resolved.  */
16463 	    htab->srofixup->size += 4;
16464 	}
16465     }
16466   else
16467     h->got.offset = (bfd_vma) -1;
16468 
16469   /* FDPIC support.  */
16470   if (eh->fdpic_cnts.gotofffuncdesc_cnt > 0)
16471     {
16472       /* Symbol musn't be exported.  */
16473       if (h->dynindx != -1)
16474 	abort();
16475 
16476       /* We only allocate one function descriptor with its associated relocation.  */
16477       if (eh->fdpic_cnts.funcdesc_offset == -1)
16478 	{
16479 	  asection *s = htab->root.sgot;
16480 
16481 	  eh->fdpic_cnts.funcdesc_offset = s->size;
16482 	  s->size += 8;
16483 	  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16484 	  if (bfd_link_pic(info))
16485 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16486 	  else
16487 	    htab->srofixup->size += 8;
16488 	}
16489     }
16490 
16491   if (eh->fdpic_cnts.gotfuncdesc_cnt > 0)
16492     {
16493       asection *s = htab->root.sgot;
16494 
16495       if (htab->root.dynamic_sections_created && h->dynindx == -1
16496 	  && !h->forced_local)
16497 	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16498 	  return FALSE;
16499 
16500       if (h->dynindx == -1)
16501 	{
16502 	  /* We only allocate one function descriptor with its associated relocation. q */
16503 	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16504 	    {
16505 
16506 	      eh->fdpic_cnts.funcdesc_offset = s->size;
16507 	      s->size += 8;
16508 	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16509 	      if (bfd_link_pic(info))
16510 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16511 	      else
16512 		htab->srofixup->size += 8;
16513 	    }
16514 	}
16515 
16516       /* Add one entry into the GOT and a R_ARM_FUNCDESC or
16517 	 R_ARM_RELATIVE/rofixup relocation on it.  */
16518       eh->fdpic_cnts.gotfuncdesc_offset = s->size;
16519       s->size += 4;
16520       if (h->dynindx == -1 && !bfd_link_pic(info))
16521 	htab->srofixup->size += 4;
16522       else
16523 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16524     }
16525 
16526   if (eh->fdpic_cnts.funcdesc_cnt > 0)
16527     {
16528       if (htab->root.dynamic_sections_created && h->dynindx == -1
16529 	  && !h->forced_local)
16530 	if (! bfd_elf_link_record_dynamic_symbol (info, h))
16531 	  return FALSE;
16532 
16533       if (h->dynindx == -1)
16534 	{
16535 	  /* We only allocate one function descriptor with its associated relocation.  */
16536 	  if (eh->fdpic_cnts.funcdesc_offset == -1)
16537 	    {
16538 	      asection *s = htab->root.sgot;
16539 
16540 	      eh->fdpic_cnts.funcdesc_offset = s->size;
16541 	      s->size += 8;
16542 	      /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16543 	      if (bfd_link_pic(info))
16544 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
16545 	      else
16546 		htab->srofixup->size += 8;
16547 	    }
16548 	}
16549       if (h->dynindx == -1 && !bfd_link_pic(info))
16550 	{
16551 	  /* For FDPIC executable we replace R_ARM_RELATIVE with a rofixup.  */
16552 	  htab->srofixup->size += 4 * eh->fdpic_cnts.funcdesc_cnt;
16553 	}
16554       else
16555 	{
16556 	  /* Will need one dynamic reloc per reference. will be either
16557 	     R_ARM_FUNCDESC or R_ARM_RELATIVE for hidden symbols.  */
16558 	  elf32_arm_allocate_dynrelocs (info, htab->root.srelgot,
16559 					eh->fdpic_cnts.funcdesc_cnt);
16560 	}
16561     }
16562 
16563   /* Allocate stubs for exported Thumb functions on v4t.  */
16564   if (!htab->use_blx && h->dynindx != -1
16565       && h->def_regular
16566       && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
16567       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
16568     {
16569       struct elf_link_hash_entry * th;
16570       struct bfd_link_hash_entry * bh;
16571       struct elf_link_hash_entry * myh;
16572       char name[1024];
16573       asection *s;
16574       bh = NULL;
16575       /* Create a new symbol to regist the real location of the function.  */
16576       s = h->root.u.def.section;
16577       sprintf (name, "__real_%s", h->root.root.string);
16578       _bfd_generic_link_add_one_symbol (info, s->owner,
16579 					name, BSF_GLOBAL, s,
16580 					h->root.u.def.value,
16581 					NULL, TRUE, FALSE, &bh);
16582 
16583       myh = (struct elf_link_hash_entry *) bh;
16584       myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16585       myh->forced_local = 1;
16586       ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
16587       eh->export_glue = myh;
16588       th = record_arm_to_thumb_glue (info, h);
16589       /* Point the symbol at the stub.  */
16590       h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
16591       ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
16592       h->root.u.def.section = th->root.u.def.section;
16593       h->root.u.def.value = th->root.u.def.value & ~1;
16594     }
16595 
16596   if (eh->dyn_relocs == NULL)
16597     return TRUE;
16598 
16599   /* In the shared -Bsymbolic case, discard space allocated for
16600      dynamic pc-relative relocs against symbols which turn out to be
16601      defined in regular objects.  For the normal shared case, discard
16602      space for pc-relative relocs that have become local due to symbol
16603      visibility changes.  */
16604 
16605   if (bfd_link_pic (info) || htab->root.is_relocatable_executable || htab->fdpic_p)
16606     {
16607       /* Relocs that use pc_count are PC-relative forms, which will appear
16608 	 on something like ".long foo - ." or "movw REG, foo - .".  We want
16609 	 calls to protected symbols to resolve directly to the function
16610 	 rather than going via the plt.  If people want function pointer
16611 	 comparisons to work as expected then they should avoid writing
16612 	 assembly like ".long foo - .".  */
16613       if (SYMBOL_CALLS_LOCAL (info, h))
16614 	{
16615 	  struct elf_dyn_relocs **pp;
16616 
16617 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16618 	    {
16619 	      p->count -= p->pc_count;
16620 	      p->pc_count = 0;
16621 	      if (p->count == 0)
16622 		*pp = p->next;
16623 	      else
16624 		pp = &p->next;
16625 	    }
16626 	}
16627 
16628       if (htab->vxworks_p)
16629 	{
16630 	  struct elf_dyn_relocs **pp;
16631 
16632 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
16633 	    {
16634 	      if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
16635 		*pp = p->next;
16636 	      else
16637 		pp = &p->next;
16638 	    }
16639 	}
16640 
16641       /* Also discard relocs on undefined weak syms with non-default
16642 	 visibility.  */
16643       if (eh->dyn_relocs != NULL
16644 	  && h->root.type == bfd_link_hash_undefweak)
16645 	{
16646 	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
16647 	      || UNDEFWEAK_NO_DYNAMIC_RELOC (info, h))
16648 	    eh->dyn_relocs = NULL;
16649 
16650 	  /* Make sure undefined weak symbols are output as a dynamic
16651 	     symbol in PIEs.  */
16652 	  else if (htab->root.dynamic_sections_created && h->dynindx == -1
16653 		   && !h->forced_local)
16654 	    {
16655 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16656 		return FALSE;
16657 	    }
16658 	}
16659 
16660       else if (htab->root.is_relocatable_executable && h->dynindx == -1
16661 	       && h->root.type == bfd_link_hash_new)
16662 	{
16663 	  /* Output absolute symbols so that we can create relocations
16664 	     against them.  For normal symbols we output a relocation
16665 	     against the section that contains them.  */
16666 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
16667 	    return FALSE;
16668 	}
16669 
16670     }
16671   else
16672     {
16673       /* For the non-shared case, discard space for relocs against
16674 	 symbols which turn out to need copy relocs or are not
16675 	 dynamic.  */
16676 
16677       if (!h->non_got_ref
16678 	  && ((h->def_dynamic
16679 	       && !h->def_regular)
16680 	      || (htab->root.dynamic_sections_created
16681 		  && (h->root.type == bfd_link_hash_undefweak
16682 		      || h->root.type == bfd_link_hash_undefined))))
16683 	{
16684 	  /* Make sure this symbol is output as a dynamic symbol.
16685 	     Undefined weak syms won't yet be marked as dynamic.  */
16686 	  if (h->dynindx == -1 && !h->forced_local
16687 	      && h->root.type == bfd_link_hash_undefweak)
16688 	    {
16689 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
16690 		return FALSE;
16691 	    }
16692 
16693 	  /* If that succeeded, we know we'll be keeping all the
16694 	     relocs.  */
16695 	  if (h->dynindx != -1)
16696 	    goto keep;
16697 	}
16698 
16699       eh->dyn_relocs = NULL;
16700 
16701     keep: ;
16702     }
16703 
16704   /* Finally, allocate space.  */
16705   for (p = eh->dyn_relocs; p != NULL; p = p->next)
16706     {
16707       asection *sreloc = elf_section_data (p->sec)->sreloc;
16708 
16709       if (h->type == STT_GNU_IFUNC
16710 	  && eh->plt.noncall_refcount == 0
16711 	  && SYMBOL_REFERENCES_LOCAL (info, h))
16712 	elf32_arm_allocate_irelocs (info, sreloc, p->count);
16713       else if (h->dynindx != -1 && (!bfd_link_pic(info) || !info->symbolic || !h->def_regular))
16714 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16715       else if (htab->fdpic_p && !bfd_link_pic(info))
16716 	htab->srofixup->size += 4 * p->count;
16717       else
16718 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
16719     }
16720 
16721   return TRUE;
16722 }
16723 
16724 /* Set DF_TEXTREL if we find any dynamic relocs that apply to
16725    read-only sections.  */
16726 
16727 static bfd_boolean
16728 maybe_set_textrel (struct elf_link_hash_entry *h, void *info_p)
16729 {
16730   asection *sec;
16731 
16732   if (h->root.type == bfd_link_hash_indirect)
16733     return TRUE;
16734 
16735   sec = readonly_dynrelocs (h);
16736   if (sec != NULL)
16737     {
16738       struct bfd_link_info *info = (struct bfd_link_info *) info_p;
16739 
16740       info->flags |= DF_TEXTREL;
16741       info->callbacks->minfo
16742 	(_("%pB: dynamic relocation against `%pT' in read-only section `%pA'\n"),
16743 	 sec->owner, h->root.root.string, sec);
16744 
16745       /* Not an error, just cut short the traversal.  */
16746       return FALSE;
16747     }
16748 
16749   return TRUE;
16750 }
16751 
16752 void
16753 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
16754 				 int byteswap_code)
16755 {
16756   struct elf32_arm_link_hash_table *globals;
16757 
16758   globals = elf32_arm_hash_table (info);
16759   if (globals == NULL)
16760     return;
16761 
16762   globals->byteswap_code = byteswap_code;
16763 }
16764 
16765 /* Set the sizes of the dynamic sections.  */
16766 
16767 static bfd_boolean
16768 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
16769 				 struct bfd_link_info * info)
16770 {
16771   bfd * dynobj;
16772   asection * s;
16773   bfd_boolean plt;
16774   bfd_boolean relocs;
16775   bfd *ibfd;
16776   struct elf32_arm_link_hash_table *htab;
16777 
16778   htab = elf32_arm_hash_table (info);
16779   if (htab == NULL)
16780     return FALSE;
16781 
16782   dynobj = elf_hash_table (info)->dynobj;
16783   BFD_ASSERT (dynobj != NULL);
16784   check_use_blx (htab);
16785 
16786   if (elf_hash_table (info)->dynamic_sections_created)
16787     {
16788       /* Set the contents of the .interp section to the interpreter.  */
16789       if (bfd_link_executable (info) && !info->nointerp)
16790 	{
16791 	  s = bfd_get_linker_section (dynobj, ".interp");
16792 	  BFD_ASSERT (s != NULL);
16793 	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
16794 	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
16795 	}
16796     }
16797 
16798   /* Set up .got offsets for local syms, and space for local dynamic
16799      relocs.  */
16800   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
16801     {
16802       bfd_signed_vma *local_got;
16803       bfd_signed_vma *end_local_got;
16804       struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
16805       char *local_tls_type;
16806       bfd_vma *local_tlsdesc_gotent;
16807       bfd_size_type locsymcount;
16808       Elf_Internal_Shdr *symtab_hdr;
16809       asection *srel;
16810       bfd_boolean is_vxworks = htab->vxworks_p;
16811       unsigned int symndx;
16812       struct fdpic_local *local_fdpic_cnts;
16813 
16814       if (! is_arm_elf (ibfd))
16815 	continue;
16816 
16817       for (s = ibfd->sections; s != NULL; s = s->next)
16818 	{
16819 	  struct elf_dyn_relocs *p;
16820 
16821 	  for (p = (struct elf_dyn_relocs *)
16822 		   elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
16823 	    {
16824 	      if (!bfd_is_abs_section (p->sec)
16825 		  && bfd_is_abs_section (p->sec->output_section))
16826 		{
16827 		  /* Input section has been discarded, either because
16828 		     it is a copy of a linkonce section or due to
16829 		     linker script /DISCARD/, so we'll be discarding
16830 		     the relocs too.  */
16831 		}
16832 	      else if (is_vxworks
16833 		       && strcmp (p->sec->output_section->name,
16834 				  ".tls_vars") == 0)
16835 		{
16836 		  /* Relocations in vxworks .tls_vars sections are
16837 		     handled specially by the loader.  */
16838 		}
16839 	      else if (p->count != 0)
16840 		{
16841 		  srel = elf_section_data (p->sec)->sreloc;
16842 		  if (htab->fdpic_p && !bfd_link_pic(info))
16843 		    htab->srofixup->size += 4 * p->count;
16844 		  else
16845 		    elf32_arm_allocate_dynrelocs (info, srel, p->count);
16846 		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
16847 		    info->flags |= DF_TEXTREL;
16848 		}
16849 	    }
16850 	}
16851 
16852       local_got = elf_local_got_refcounts (ibfd);
16853       if (!local_got)
16854 	continue;
16855 
16856       symtab_hdr = & elf_symtab_hdr (ibfd);
16857       locsymcount = symtab_hdr->sh_info;
16858       end_local_got = local_got + locsymcount;
16859       local_iplt_ptr = elf32_arm_local_iplt (ibfd);
16860       local_tls_type = elf32_arm_local_got_tls_type (ibfd);
16861       local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
16862       local_fdpic_cnts = elf32_arm_local_fdpic_cnts (ibfd);
16863       symndx = 0;
16864       s = htab->root.sgot;
16865       srel = htab->root.srelgot;
16866       for (; local_got < end_local_got;
16867 	   ++local_got, ++local_iplt_ptr, ++local_tls_type,
16868 	   ++local_tlsdesc_gotent, ++symndx, ++local_fdpic_cnts)
16869 	{
16870 	  *local_tlsdesc_gotent = (bfd_vma) -1;
16871 	  local_iplt = *local_iplt_ptr;
16872 
16873 	  /* FDPIC support.  */
16874 	  if (local_fdpic_cnts->gotofffuncdesc_cnt > 0)
16875 	    {
16876 	      if (local_fdpic_cnts->funcdesc_offset == -1)
16877 		{
16878 		  local_fdpic_cnts->funcdesc_offset = s->size;
16879 		  s->size += 8;
16880 
16881 		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16882 		  if (bfd_link_pic(info))
16883 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16884 		  else
16885 		    htab->srofixup->size += 8;
16886 		}
16887 	    }
16888 
16889 	  if (local_fdpic_cnts->funcdesc_cnt > 0)
16890 	    {
16891 	      if (local_fdpic_cnts->funcdesc_offset == -1)
16892 		{
16893 		  local_fdpic_cnts->funcdesc_offset = s->size;
16894 		  s->size += 8;
16895 
16896 		  /* We will add an R_ARM_FUNCDESC_VALUE relocation or two rofixups.  */
16897 		  if (bfd_link_pic(info))
16898 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16899 		  else
16900 		    htab->srofixup->size += 8;
16901 		}
16902 
16903 	      /* We will add n R_ARM_RELATIVE relocations or n rofixups.  */
16904 	      if (bfd_link_pic(info))
16905 		elf32_arm_allocate_dynrelocs (info, srel, local_fdpic_cnts->funcdesc_cnt);
16906 	      else
16907 		htab->srofixup->size += 4 * local_fdpic_cnts->funcdesc_cnt;
16908 	    }
16909 
16910 	  if (local_iplt != NULL)
16911 	    {
16912 	      struct elf_dyn_relocs *p;
16913 
16914 	      if (local_iplt->root.refcount > 0)
16915 		{
16916 		  elf32_arm_allocate_plt_entry (info, TRUE,
16917 						&local_iplt->root,
16918 						&local_iplt->arm);
16919 		  if (local_iplt->arm.noncall_refcount == 0)
16920 		    /* All references to the PLT are calls, so all
16921 		       non-call references can resolve directly to the
16922 		       run-time target.  This means that the .got entry
16923 		       would be the same as the .igot.plt entry, so there's
16924 		       no point creating both.  */
16925 		    *local_got = 0;
16926 		}
16927 	      else
16928 		{
16929 		  BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
16930 		  local_iplt->root.offset = (bfd_vma) -1;
16931 		}
16932 
16933 	      for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
16934 		{
16935 		  asection *psrel;
16936 
16937 		  psrel = elf_section_data (p->sec)->sreloc;
16938 		  if (local_iplt->arm.noncall_refcount == 0)
16939 		    elf32_arm_allocate_irelocs (info, psrel, p->count);
16940 		  else
16941 		    elf32_arm_allocate_dynrelocs (info, psrel, p->count);
16942 		}
16943 	    }
16944 	  if (*local_got > 0)
16945 	    {
16946 	      Elf_Internal_Sym *isym;
16947 
16948 	      *local_got = s->size;
16949 	      if (*local_tls_type & GOT_TLS_GD)
16950 		/* TLS_GD relocs need an 8-byte structure in the GOT.  */
16951 		s->size += 8;
16952 	      if (*local_tls_type & GOT_TLS_GDESC)
16953 		{
16954 		  *local_tlsdesc_gotent = htab->root.sgotplt->size
16955 		    - elf32_arm_compute_jump_table_size (htab);
16956 		  htab->root.sgotplt->size += 8;
16957 		  *local_got = (bfd_vma) -2;
16958 		  /* plt.got_offset needs to know there's a TLS_DESC
16959 		     reloc in the middle of .got.plt.  */
16960 		  htab->num_tls_desc++;
16961 		}
16962 	      if (*local_tls_type & GOT_TLS_IE)
16963 		s->size += 4;
16964 
16965 	      if (*local_tls_type & GOT_NORMAL)
16966 		{
16967 		  /* If the symbol is both GD and GDESC, *local_got
16968 		     may have been overwritten.  */
16969 		  *local_got = s->size;
16970 		  s->size += 4;
16971 		}
16972 
16973 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
16974 	      if (isym == NULL)
16975 		return FALSE;
16976 
16977 	      /* If all references to an STT_GNU_IFUNC PLT are calls,
16978 		 then all non-call references, including this GOT entry,
16979 		 resolve directly to the run-time target.  */
16980 	      if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
16981 		  && (local_iplt == NULL
16982 		      || local_iplt->arm.noncall_refcount == 0))
16983 		elf32_arm_allocate_irelocs (info, srel, 1);
16984 	      else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC || htab->fdpic_p)
16985 		{
16986 		  if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC)))
16987 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
16988 		  else if (htab->fdpic_p && *local_tls_type & GOT_NORMAL)
16989 		    htab->srofixup->size += 4;
16990 
16991 		  if ((bfd_link_pic (info) || htab->fdpic_p)
16992 		      && *local_tls_type & GOT_TLS_GDESC)
16993 		    {
16994 		      elf32_arm_allocate_dynrelocs (info,
16995 						    htab->root.srelplt, 1);
16996 		      htab->tls_trampoline = -1;
16997 		    }
16998 		}
16999 	    }
17000 	  else
17001 	    *local_got = (bfd_vma) -1;
17002 	}
17003     }
17004 
17005   if (htab->tls_ldm_got.refcount > 0)
17006     {
17007       /* Allocate two GOT entries and one dynamic relocation (if necessary)
17008 	 for R_ARM_TLS_LDM32/R_ARM_TLS_LDM32_FDPIC relocations.  */
17009       htab->tls_ldm_got.offset = htab->root.sgot->size;
17010       htab->root.sgot->size += 8;
17011       if (bfd_link_pic (info))
17012 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
17013     }
17014   else
17015     htab->tls_ldm_got.offset = -1;
17016 
17017   /* At the very end of the .rofixup section is a pointer to the GOT,
17018      reserve space for it. */
17019   if (htab->fdpic_p && htab->srofixup != NULL)
17020     htab->srofixup->size += 4;
17021 
17022   /* Allocate global sym .plt and .got entries, and space for global
17023      sym dynamic relocs.  */
17024   elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
17025 
17026   /* Here we rummage through the found bfds to collect glue information.  */
17027   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
17028     {
17029       if (! is_arm_elf (ibfd))
17030 	continue;
17031 
17032       /* Initialise mapping tables for code/data.  */
17033       bfd_elf32_arm_init_maps (ibfd);
17034 
17035       if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
17036 	  || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
17037 	  || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
17038 	_bfd_error_handler (_("errors encountered processing file %pB"), ibfd);
17039     }
17040 
17041   /* Allocate space for the glue sections now that we've sized them.  */
17042   bfd_elf32_arm_allocate_interworking_sections (info);
17043 
17044   /* For every jump slot reserved in the sgotplt, reloc_count is
17045      incremented.  However, when we reserve space for TLS descriptors,
17046      it's not incremented, so in order to compute the space reserved
17047      for them, it suffices to multiply the reloc count by the jump
17048      slot size.  */
17049   if (htab->root.srelplt)
17050     htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
17051 
17052   if (htab->tls_trampoline)
17053     {
17054       if (htab->root.splt->size == 0)
17055 	htab->root.splt->size += htab->plt_header_size;
17056 
17057       htab->tls_trampoline = htab->root.splt->size;
17058       htab->root.splt->size += htab->plt_entry_size;
17059 
17060       /* If we're not using lazy TLS relocations, don't generate the
17061 	 PLT and GOT entries they require.  */
17062       if (!(info->flags & DF_BIND_NOW))
17063 	{
17064 	  htab->dt_tlsdesc_got = htab->root.sgot->size;
17065 	  htab->root.sgot->size += 4;
17066 
17067 	  htab->dt_tlsdesc_plt = htab->root.splt->size;
17068 	  htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
17069 	}
17070     }
17071 
17072   /* The check_relocs and adjust_dynamic_symbol entry points have
17073      determined the sizes of the various dynamic sections.  Allocate
17074      memory for them.  */
17075   plt = FALSE;
17076   relocs = FALSE;
17077   for (s = dynobj->sections; s != NULL; s = s->next)
17078     {
17079       const char * name;
17080 
17081       if ((s->flags & SEC_LINKER_CREATED) == 0)
17082 	continue;
17083 
17084       /* It's OK to base decisions on the section name, because none
17085 	 of the dynobj section names depend upon the input files.  */
17086       name = bfd_section_name (s);
17087 
17088       if (s == htab->root.splt)
17089 	{
17090 	  /* Remember whether there is a PLT.  */
17091 	  plt = s->size != 0;
17092 	}
17093       else if (CONST_STRNEQ (name, ".rel"))
17094 	{
17095 	  if (s->size != 0)
17096 	    {
17097 	      /* Remember whether there are any reloc sections other
17098 		 than .rel(a).plt and .rela.plt.unloaded.  */
17099 	      if (s != htab->root.srelplt && s != htab->srelplt2)
17100 		relocs = TRUE;
17101 
17102 	      /* We use the reloc_count field as a counter if we need
17103 		 to copy relocs into the output file.  */
17104 	      s->reloc_count = 0;
17105 	    }
17106 	}
17107       else if (s != htab->root.sgot
17108 	       && s != htab->root.sgotplt
17109 	       && s != htab->root.iplt
17110 	       && s != htab->root.igotplt
17111 	       && s != htab->root.sdynbss
17112 	       && s != htab->root.sdynrelro
17113 	       && s != htab->srofixup)
17114 	{
17115 	  /* It's not one of our sections, so don't allocate space.  */
17116 	  continue;
17117 	}
17118 
17119       if (s->size == 0)
17120 	{
17121 	  /* If we don't need this section, strip it from the
17122 	     output file.  This is mostly to handle .rel(a).bss and
17123 	     .rel(a).plt.  We must create both sections in
17124 	     create_dynamic_sections, because they must be created
17125 	     before the linker maps input sections to output
17126 	     sections.  The linker does that before
17127 	     adjust_dynamic_symbol is called, and it is that
17128 	     function which decides whether anything needs to go
17129 	     into these sections.  */
17130 	  s->flags |= SEC_EXCLUDE;
17131 	  continue;
17132 	}
17133 
17134       if ((s->flags & SEC_HAS_CONTENTS) == 0)
17135 	continue;
17136 
17137       /* Allocate memory for the section contents.  */
17138       s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
17139       if (s->contents == NULL)
17140 	return FALSE;
17141     }
17142 
17143   if (elf_hash_table (info)->dynamic_sections_created)
17144     {
17145       /* Add some entries to the .dynamic section.  We fill in the
17146 	 values later, in elf32_arm_finish_dynamic_sections, but we
17147 	 must add the entries now so that we get the correct size for
17148 	 the .dynamic section.  The DT_DEBUG entry is filled in by the
17149 	 dynamic linker and used by the debugger.  */
17150 #define add_dynamic_entry(TAG, VAL) \
17151   _bfd_elf_add_dynamic_entry (info, TAG, VAL)
17152 
17153      if (bfd_link_executable (info))
17154 	{
17155 	  if (!add_dynamic_entry (DT_DEBUG, 0))
17156 	    return FALSE;
17157 	}
17158 
17159       if (plt)
17160 	{
17161 	  if (   !add_dynamic_entry (DT_PLTGOT, 0)
17162 	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
17163 	      || !add_dynamic_entry (DT_PLTREL,
17164 				     htab->use_rel ? DT_REL : DT_RELA)
17165 	      || !add_dynamic_entry (DT_JMPREL, 0))
17166 	    return FALSE;
17167 
17168 	  if (htab->dt_tlsdesc_plt
17169 	      && (!add_dynamic_entry (DT_TLSDESC_PLT,0)
17170 		  || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
17171 	    return FALSE;
17172 	}
17173 
17174       if (relocs)
17175 	{
17176 	  if (htab->use_rel)
17177 	    {
17178 	      if (!add_dynamic_entry (DT_REL, 0)
17179 		  || !add_dynamic_entry (DT_RELSZ, 0)
17180 		  || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
17181 		return FALSE;
17182 	    }
17183 	  else
17184 	    {
17185 	      if (!add_dynamic_entry (DT_RELA, 0)
17186 		  || !add_dynamic_entry (DT_RELASZ, 0)
17187 		  || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
17188 		return FALSE;
17189 	    }
17190 	}
17191 
17192       /* If any dynamic relocs apply to a read-only section,
17193 	 then we need a DT_TEXTREL entry.  */
17194       if ((info->flags & DF_TEXTREL) == 0)
17195 	elf_link_hash_traverse (&htab->root, maybe_set_textrel, info);
17196 
17197       if ((info->flags & DF_TEXTREL) != 0)
17198 	{
17199 	  if (!add_dynamic_entry (DT_TEXTREL, 0))
17200 	    return FALSE;
17201 	}
17202       if (htab->vxworks_p
17203 	  && !elf_vxworks_add_dynamic_entries (output_bfd, info))
17204 	return FALSE;
17205     }
17206 #undef add_dynamic_entry
17207 
17208   return TRUE;
17209 }
17210 
17211 /* Size sections even though they're not dynamic.  We use it to setup
17212    _TLS_MODULE_BASE_, if needed.  */
17213 
17214 static bfd_boolean
17215 elf32_arm_always_size_sections (bfd *output_bfd,
17216 				struct bfd_link_info *info)
17217 {
17218   asection *tls_sec;
17219   struct elf32_arm_link_hash_table *htab;
17220 
17221   htab = elf32_arm_hash_table (info);
17222 
17223   if (bfd_link_relocatable (info))
17224     return TRUE;
17225 
17226   tls_sec = elf_hash_table (info)->tls_sec;
17227 
17228   if (tls_sec)
17229     {
17230       struct elf_link_hash_entry *tlsbase;
17231 
17232       tlsbase = elf_link_hash_lookup
17233 	(elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
17234 
17235       if (tlsbase)
17236 	{
17237 	  struct bfd_link_hash_entry *bh = NULL;
17238 	  const struct elf_backend_data *bed
17239 	    = get_elf_backend_data (output_bfd);
17240 
17241 	  if (!(_bfd_generic_link_add_one_symbol
17242 		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
17243 		 tls_sec, 0, NULL, FALSE,
17244 		 bed->collect, &bh)))
17245 	    return FALSE;
17246 
17247 	  tlsbase->type = STT_TLS;
17248 	  tlsbase = (struct elf_link_hash_entry *)bh;
17249 	  tlsbase->def_regular = 1;
17250 	  tlsbase->other = STV_HIDDEN;
17251 	  (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
17252 	}
17253     }
17254 
17255   if (htab->fdpic_p && !bfd_link_relocatable (info)
17256       && !bfd_elf_stack_segment_size (output_bfd, info,
17257 				      "__stacksize", DEFAULT_STACK_SIZE))
17258     return FALSE;
17259 
17260   return TRUE;
17261 }
17262 
17263 /* Finish up dynamic symbol handling.  We set the contents of various
17264    dynamic sections here.  */
17265 
17266 static bfd_boolean
17267 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
17268 				 struct bfd_link_info * info,
17269 				 struct elf_link_hash_entry * h,
17270 				 Elf_Internal_Sym * sym)
17271 {
17272   struct elf32_arm_link_hash_table *htab;
17273   struct elf32_arm_link_hash_entry *eh;
17274 
17275   htab = elf32_arm_hash_table (info);
17276   if (htab == NULL)
17277     return FALSE;
17278 
17279   eh = (struct elf32_arm_link_hash_entry *) h;
17280 
17281   if (h->plt.offset != (bfd_vma) -1)
17282     {
17283       if (!eh->is_iplt)
17284 	{
17285 	  BFD_ASSERT (h->dynindx != -1);
17286 	  if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
17287 					      h->dynindx, 0))
17288 	    return FALSE;
17289 	}
17290 
17291       if (!h->def_regular)
17292 	{
17293 	  /* Mark the symbol as undefined, rather than as defined in
17294 	     the .plt section.  */
17295 	  sym->st_shndx = SHN_UNDEF;
17296 	  /* If the symbol is weak we need to clear the value.
17297 	     Otherwise, the PLT entry would provide a definition for
17298 	     the symbol even if the symbol wasn't defined anywhere,
17299 	     and so the symbol would never be NULL.  Leave the value if
17300 	     there were any relocations where pointer equality matters
17301 	     (this is a clue for the dynamic linker, to make function
17302 	     pointer comparisons work between an application and shared
17303 	     library).  */
17304 	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
17305 	    sym->st_value = 0;
17306 	}
17307       else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
17308 	{
17309 	  /* At least one non-call relocation references this .iplt entry,
17310 	     so the .iplt entry is the function's canonical address.  */
17311 	  sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
17312 	  ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
17313 	  sym->st_shndx = (_bfd_elf_section_from_bfd_section
17314 			   (output_bfd, htab->root.iplt->output_section));
17315 	  sym->st_value = (h->plt.offset
17316 			   + htab->root.iplt->output_section->vma
17317 			   + htab->root.iplt->output_offset);
17318 	}
17319     }
17320 
17321   if (h->needs_copy)
17322     {
17323       asection * s;
17324       Elf_Internal_Rela rel;
17325 
17326       /* This symbol needs a copy reloc.  Set it up.  */
17327       BFD_ASSERT (h->dynindx != -1
17328 		  && (h->root.type == bfd_link_hash_defined
17329 		      || h->root.type == bfd_link_hash_defweak));
17330 
17331       rel.r_addend = 0;
17332       rel.r_offset = (h->root.u.def.value
17333 		      + h->root.u.def.section->output_section->vma
17334 		      + h->root.u.def.section->output_offset);
17335       rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
17336       if (h->root.u.def.section == htab->root.sdynrelro)
17337 	s = htab->root.sreldynrelro;
17338       else
17339 	s = htab->root.srelbss;
17340       elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
17341     }
17342 
17343   /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  On VxWorks,
17344      and for FDPIC, the _GLOBAL_OFFSET_TABLE_ symbol is not absolute:
17345      it is relative to the ".got" section.  */
17346   if (h == htab->root.hdynamic
17347       || (!htab->fdpic_p && !htab->vxworks_p && h == htab->root.hgot))
17348     sym->st_shndx = SHN_ABS;
17349 
17350   return TRUE;
17351 }
17352 
17353 static void
17354 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17355 		    void *contents,
17356 		    const unsigned long *template, unsigned count)
17357 {
17358   unsigned ix;
17359 
17360   for (ix = 0; ix != count; ix++)
17361     {
17362       unsigned long insn = template[ix];
17363 
17364       /* Emit mov pc,rx if bx is not permitted.  */
17365       if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
17366 	insn = (insn & 0xf000000f) | 0x01a0f000;
17367       put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
17368     }
17369 }
17370 
17371 /* Install the special first PLT entry for elf32-arm-nacl.  Unlike
17372    other variants, NaCl needs this entry in a static executable's
17373    .iplt too.  When we're handling that case, GOT_DISPLACEMENT is
17374    zero.  For .iplt really only the last bundle is useful, and .iplt
17375    could have a shorter first entry, with each individual PLT entry's
17376    relative branch calculated differently so it targets the last
17377    bundle instead of the instruction before it (labelled .Lplt_tail
17378    above).  But it's simpler to keep the size and layout of PLT0
17379    consistent with the dynamic case, at the cost of some dead code at
17380    the start of .iplt and the one dead store to the stack at the start
17381    of .Lplt_tail.  */
17382 static void
17383 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
17384 		   asection *plt, bfd_vma got_displacement)
17385 {
17386   unsigned int i;
17387 
17388   put_arm_insn (htab, output_bfd,
17389 		elf32_arm_nacl_plt0_entry[0]
17390 		| arm_movw_immediate (got_displacement),
17391 		plt->contents + 0);
17392   put_arm_insn (htab, output_bfd,
17393 		elf32_arm_nacl_plt0_entry[1]
17394 		| arm_movt_immediate (got_displacement),
17395 		plt->contents + 4);
17396 
17397   for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
17398     put_arm_insn (htab, output_bfd,
17399 		  elf32_arm_nacl_plt0_entry[i],
17400 		  plt->contents + (i * 4));
17401 }
17402 
17403 /* Finish up the dynamic sections.  */
17404 
17405 static bfd_boolean
17406 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
17407 {
17408   bfd * dynobj;
17409   asection * sgot;
17410   asection * sdyn;
17411   struct elf32_arm_link_hash_table *htab;
17412 
17413   htab = elf32_arm_hash_table (info);
17414   if (htab == NULL)
17415     return FALSE;
17416 
17417   dynobj = elf_hash_table (info)->dynobj;
17418 
17419   sgot = htab->root.sgotplt;
17420   /* A broken linker script might have discarded the dynamic sections.
17421      Catch this here so that we do not seg-fault later on.  */
17422   if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
17423     return FALSE;
17424   sdyn = bfd_get_linker_section (dynobj, ".dynamic");
17425 
17426   if (elf_hash_table (info)->dynamic_sections_created)
17427     {
17428       asection *splt;
17429       Elf32_External_Dyn *dyncon, *dynconend;
17430 
17431       splt = htab->root.splt;
17432       BFD_ASSERT (splt != NULL && sdyn != NULL);
17433       BFD_ASSERT (htab->symbian_p || sgot != NULL);
17434 
17435       dyncon = (Elf32_External_Dyn *) sdyn->contents;
17436       dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
17437 
17438       for (; dyncon < dynconend; dyncon++)
17439 	{
17440 	  Elf_Internal_Dyn dyn;
17441 	  const char * name;
17442 	  asection * s;
17443 
17444 	  bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
17445 
17446 	  switch (dyn.d_tag)
17447 	    {
17448 	      unsigned int type;
17449 
17450 	    default:
17451 	      if (htab->vxworks_p
17452 		  && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
17453 		bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17454 	      break;
17455 
17456 	    case DT_HASH:
17457 	      name = ".hash";
17458 	      goto get_vma_if_bpabi;
17459 	    case DT_STRTAB:
17460 	      name = ".dynstr";
17461 	      goto get_vma_if_bpabi;
17462 	    case DT_SYMTAB:
17463 	      name = ".dynsym";
17464 	      goto get_vma_if_bpabi;
17465 	    case DT_VERSYM:
17466 	      name = ".gnu.version";
17467 	      goto get_vma_if_bpabi;
17468 	    case DT_VERDEF:
17469 	      name = ".gnu.version_d";
17470 	      goto get_vma_if_bpabi;
17471 	    case DT_VERNEED:
17472 	      name = ".gnu.version_r";
17473 	      goto get_vma_if_bpabi;
17474 
17475 	    case DT_PLTGOT:
17476 	      name = htab->symbian_p ? ".got" : ".got.plt";
17477 	      goto get_vma;
17478 	    case DT_JMPREL:
17479 	      name = RELOC_SECTION (htab, ".plt");
17480 	    get_vma:
17481 	      s = bfd_get_linker_section (dynobj, name);
17482 	      if (s == NULL)
17483 		{
17484 		  _bfd_error_handler
17485 		    (_("could not find section %s"), name);
17486 		  bfd_set_error (bfd_error_invalid_operation);
17487 		  return FALSE;
17488 		}
17489 	      if (!htab->symbian_p)
17490 		dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
17491 	      else
17492 		/* In the BPABI, tags in the PT_DYNAMIC section point
17493 		   at the file offset, not the memory address, for the
17494 		   convenience of the post linker.  */
17495 		dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
17496 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17497 	      break;
17498 
17499 	    get_vma_if_bpabi:
17500 	      if (htab->symbian_p)
17501 		goto get_vma;
17502 	      break;
17503 
17504 	    case DT_PLTRELSZ:
17505 	      s = htab->root.srelplt;
17506 	      BFD_ASSERT (s != NULL);
17507 	      dyn.d_un.d_val = s->size;
17508 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17509 	      break;
17510 
17511 	    case DT_RELSZ:
17512 	    case DT_RELASZ:
17513 	    case DT_REL:
17514 	    case DT_RELA:
17515 	      /* In the BPABI, the DT_REL tag must point at the file
17516 		 offset, not the VMA, of the first relocation
17517 		 section.  So, we use code similar to that in
17518 		 elflink.c, but do not check for SHF_ALLOC on the
17519 		 relocation section, since relocation sections are
17520 		 never allocated under the BPABI.  PLT relocs are also
17521 		 included.  */
17522 	      if (htab->symbian_p)
17523 		{
17524 		  unsigned int i;
17525 		  type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
17526 			  ? SHT_REL : SHT_RELA);
17527 		  dyn.d_un.d_val = 0;
17528 		  for (i = 1; i < elf_numsections (output_bfd); i++)
17529 		    {
17530 		      Elf_Internal_Shdr *hdr
17531 			= elf_elfsections (output_bfd)[i];
17532 		      if (hdr->sh_type == type)
17533 			{
17534 			  if (dyn.d_tag == DT_RELSZ
17535 			      || dyn.d_tag == DT_RELASZ)
17536 			    dyn.d_un.d_val += hdr->sh_size;
17537 			  else if ((ufile_ptr) hdr->sh_offset
17538 				   <= dyn.d_un.d_val - 1)
17539 			    dyn.d_un.d_val = hdr->sh_offset;
17540 			}
17541 		    }
17542 		  bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17543 		}
17544 	      break;
17545 
17546 	    case DT_TLSDESC_PLT:
17547 	      s = htab->root.splt;
17548 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17549 				+ htab->dt_tlsdesc_plt);
17550 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17551 	      break;
17552 
17553 	    case DT_TLSDESC_GOT:
17554 	      s = htab->root.sgot;
17555 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
17556 				+ htab->dt_tlsdesc_got);
17557 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17558 	      break;
17559 
17560 	      /* Set the bottom bit of DT_INIT/FINI if the
17561 		 corresponding function is Thumb.  */
17562 	    case DT_INIT:
17563 	      name = info->init_function;
17564 	      goto get_sym;
17565 	    case DT_FINI:
17566 	      name = info->fini_function;
17567 	    get_sym:
17568 	      /* If it wasn't set by elf_bfd_final_link
17569 		 then there is nothing to adjust.  */
17570 	      if (dyn.d_un.d_val != 0)
17571 		{
17572 		  struct elf_link_hash_entry * eh;
17573 
17574 		  eh = elf_link_hash_lookup (elf_hash_table (info), name,
17575 					     FALSE, FALSE, TRUE);
17576 		  if (eh != NULL
17577 		      && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
17578 			 == ST_BRANCH_TO_THUMB)
17579 		    {
17580 		      dyn.d_un.d_val |= 1;
17581 		      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
17582 		    }
17583 		}
17584 	      break;
17585 	    }
17586 	}
17587 
17588       /* Fill in the first entry in the procedure linkage table.  */
17589       if (splt->size > 0 && htab->plt_header_size)
17590 	{
17591 	  const bfd_vma *plt0_entry;
17592 	  bfd_vma got_address, plt_address, got_displacement;
17593 
17594 	  /* Calculate the addresses of the GOT and PLT.  */
17595 	  got_address = sgot->output_section->vma + sgot->output_offset;
17596 	  plt_address = splt->output_section->vma + splt->output_offset;
17597 
17598 	  if (htab->vxworks_p)
17599 	    {
17600 	      /* The VxWorks GOT is relocated by the dynamic linker.
17601 		 Therefore, we must emit relocations rather than simply
17602 		 computing the values now.  */
17603 	      Elf_Internal_Rela rel;
17604 
17605 	      plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
17606 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17607 			    splt->contents + 0);
17608 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17609 			    splt->contents + 4);
17610 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17611 			    splt->contents + 8);
17612 	      bfd_put_32 (output_bfd, got_address, splt->contents + 12);
17613 
17614 	      /* Generate a relocation for _GLOBAL_OFFSET_TABLE_.  */
17615 	      rel.r_offset = plt_address + 12;
17616 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17617 	      rel.r_addend = 0;
17618 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel,
17619 				     htab->srelplt2->contents);
17620 	    }
17621 	  else if (htab->nacl_p)
17622 	    arm_nacl_put_plt0 (htab, output_bfd, splt,
17623 			       got_address + 8 - (plt_address + 16));
17624 	  else if (using_thumb_only (htab))
17625 	    {
17626 	      got_displacement = got_address - (plt_address + 12);
17627 
17628 	      plt0_entry = elf32_thumb2_plt0_entry;
17629 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17630 			    splt->contents + 0);
17631 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17632 			    splt->contents + 4);
17633 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17634 			    splt->contents + 8);
17635 
17636 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
17637 	    }
17638 	  else
17639 	    {
17640 	      got_displacement = got_address - (plt_address + 16);
17641 
17642 	      plt0_entry = elf32_arm_plt0_entry;
17643 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
17644 			    splt->contents + 0);
17645 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
17646 			    splt->contents + 4);
17647 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
17648 			    splt->contents + 8);
17649 	      put_arm_insn (htab, output_bfd, plt0_entry[3],
17650 			    splt->contents + 12);
17651 
17652 #ifdef FOUR_WORD_PLT
17653 	      /* The displacement value goes in the otherwise-unused
17654 		 last word of the second entry.  */
17655 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
17656 #else
17657 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
17658 #endif
17659 	    }
17660 	}
17661 
17662       /* UnixWare sets the entsize of .plt to 4, although that doesn't
17663 	 really seem like the right value.  */
17664       if (splt->output_section->owner == output_bfd)
17665 	elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
17666 
17667       if (htab->dt_tlsdesc_plt)
17668 	{
17669 	  bfd_vma got_address
17670 	    = sgot->output_section->vma + sgot->output_offset;
17671 	  bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
17672 				    + htab->root.sgot->output_offset);
17673 	  bfd_vma plt_address
17674 	    = splt->output_section->vma + splt->output_offset;
17675 
17676 	  arm_put_trampoline (htab, output_bfd,
17677 			      splt->contents + htab->dt_tlsdesc_plt,
17678 			      dl_tlsdesc_lazy_trampoline, 6);
17679 
17680 	  bfd_put_32 (output_bfd,
17681 		      gotplt_address + htab->dt_tlsdesc_got
17682 		      - (plt_address + htab->dt_tlsdesc_plt)
17683 		      - dl_tlsdesc_lazy_trampoline[6],
17684 		      splt->contents + htab->dt_tlsdesc_plt + 24);
17685 	  bfd_put_32 (output_bfd,
17686 		      got_address - (plt_address + htab->dt_tlsdesc_plt)
17687 		      - dl_tlsdesc_lazy_trampoline[7],
17688 		      splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
17689 	}
17690 
17691       if (htab->tls_trampoline)
17692 	{
17693 	  arm_put_trampoline (htab, output_bfd,
17694 			      splt->contents + htab->tls_trampoline,
17695 			      tls_trampoline, 3);
17696 #ifdef FOUR_WORD_PLT
17697 	  bfd_put_32 (output_bfd, 0x00000000,
17698 		      splt->contents + htab->tls_trampoline + 12);
17699 #endif
17700 	}
17701 
17702       if (htab->vxworks_p
17703 	  && !bfd_link_pic (info)
17704 	  && htab->root.splt->size > 0)
17705 	{
17706 	  /* Correct the .rel(a).plt.unloaded relocations.  They will have
17707 	     incorrect symbol indexes.  */
17708 	  int num_plts;
17709 	  unsigned char *p;
17710 
17711 	  num_plts = ((htab->root.splt->size - htab->plt_header_size)
17712 		      / htab->plt_entry_size);
17713 	  p = htab->srelplt2->contents + RELOC_SIZE (htab);
17714 
17715 	  for (; num_plts; num_plts--)
17716 	    {
17717 	      Elf_Internal_Rela rel;
17718 
17719 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17720 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
17721 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17722 	      p += RELOC_SIZE (htab);
17723 
17724 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
17725 	      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
17726 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
17727 	      p += RELOC_SIZE (htab);
17728 	    }
17729 	}
17730     }
17731 
17732   if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
17733     /* NaCl uses a special first entry in .iplt too.  */
17734     arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
17735 
17736   /* Fill in the first three entries in the global offset table.  */
17737   if (sgot)
17738     {
17739       if (sgot->size > 0)
17740 	{
17741 	  if (sdyn == NULL)
17742 	    bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
17743 	  else
17744 	    bfd_put_32 (output_bfd,
17745 			sdyn->output_section->vma + sdyn->output_offset,
17746 			sgot->contents);
17747 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
17748 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
17749 	}
17750 
17751       elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
17752     }
17753 
17754   /* At the very end of the .rofixup section is a pointer to the GOT.  */
17755   if (htab->fdpic_p && htab->srofixup != NULL)
17756     {
17757       struct elf_link_hash_entry *hgot = htab->root.hgot;
17758 
17759       bfd_vma got_value = hgot->root.u.def.value
17760 	+ hgot->root.u.def.section->output_section->vma
17761 	+ hgot->root.u.def.section->output_offset;
17762 
17763       arm_elf_add_rofixup(output_bfd, htab->srofixup, got_value);
17764 
17765       /* Make sure we allocated and generated the same number of fixups.  */
17766       BFD_ASSERT (htab->srofixup->reloc_count * 4 == htab->srofixup->size);
17767     }
17768 
17769   return TRUE;
17770 }
17771 
17772 static bfd_boolean
17773 elf32_arm_init_file_header (bfd *abfd, struct bfd_link_info *link_info)
17774 {
17775   Elf_Internal_Ehdr * i_ehdrp;	/* ELF file header, internal form.  */
17776   struct elf32_arm_link_hash_table *globals;
17777   struct elf_segment_map *m;
17778 
17779   if (!_bfd_elf_init_file_header (abfd, link_info))
17780     return FALSE;
17781 
17782   i_ehdrp = elf_elfheader (abfd);
17783 
17784   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
17785     i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
17786   i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
17787 
17788   if (link_info)
17789     {
17790       globals = elf32_arm_hash_table (link_info);
17791       if (globals != NULL && globals->byteswap_code)
17792 	i_ehdrp->e_flags |= EF_ARM_BE8;
17793 
17794       if (globals->fdpic_p)
17795 	i_ehdrp->e_ident[EI_OSABI] |= ELFOSABI_ARM_FDPIC;
17796     }
17797 
17798   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
17799       && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
17800     {
17801       int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
17802       if (abi == AEABI_VFP_args_vfp)
17803 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
17804       else
17805 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
17806     }
17807 
17808   /* Scan segment to set p_flags attribute if it contains only sections with
17809      SHF_ARM_PURECODE flag.  */
17810   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
17811     {
17812       unsigned int j;
17813 
17814       if (m->count == 0)
17815 	continue;
17816       for (j = 0; j < m->count; j++)
17817 	{
17818 	  if (!(elf_section_flags (m->sections[j]) & SHF_ARM_PURECODE))
17819 	    break;
17820 	}
17821       if (j == m->count)
17822 	{
17823 	  m->p_flags = PF_X;
17824 	  m->p_flags_valid = 1;
17825 	}
17826     }
17827   return TRUE;
17828 }
17829 
17830 static enum elf_reloc_type_class
17831 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
17832 			    const asection *rel_sec ATTRIBUTE_UNUSED,
17833 			    const Elf_Internal_Rela *rela)
17834 {
17835   switch ((int) ELF32_R_TYPE (rela->r_info))
17836     {
17837     case R_ARM_RELATIVE:
17838       return reloc_class_relative;
17839     case R_ARM_JUMP_SLOT:
17840       return reloc_class_plt;
17841     case R_ARM_COPY:
17842       return reloc_class_copy;
17843     case R_ARM_IRELATIVE:
17844       return reloc_class_ifunc;
17845     default:
17846       return reloc_class_normal;
17847     }
17848 }
17849 
17850 static void
17851 arm_final_write_processing (bfd *abfd)
17852 {
17853   bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
17854 }
17855 
17856 static bfd_boolean
17857 elf32_arm_final_write_processing (bfd *abfd)
17858 {
17859   arm_final_write_processing (abfd);
17860   return _bfd_elf_final_write_processing (abfd);
17861 }
17862 
17863 /* Return TRUE if this is an unwinding table entry.  */
17864 
17865 static bfd_boolean
17866 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
17867 {
17868   return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
17869 	  || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
17870 }
17871 
17872 
17873 /* Set the type and flags for an ARM section.  We do this by
17874    the section name, which is a hack, but ought to work.  */
17875 
17876 static bfd_boolean
17877 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
17878 {
17879   const char * name;
17880 
17881   name = bfd_section_name (sec);
17882 
17883   if (is_arm_elf_unwind_section_name (abfd, name))
17884     {
17885       hdr->sh_type = SHT_ARM_EXIDX;
17886       hdr->sh_flags |= SHF_LINK_ORDER;
17887     }
17888 
17889   if (sec->flags & SEC_ELF_PURECODE)
17890     hdr->sh_flags |= SHF_ARM_PURECODE;
17891 
17892   return TRUE;
17893 }
17894 
17895 /* Handle an ARM specific section when reading an object file.  This is
17896    called when bfd_section_from_shdr finds a section with an unknown
17897    type.  */
17898 
17899 static bfd_boolean
17900 elf32_arm_section_from_shdr (bfd *abfd,
17901 			     Elf_Internal_Shdr * hdr,
17902 			     const char *name,
17903 			     int shindex)
17904 {
17905   /* There ought to be a place to keep ELF backend specific flags, but
17906      at the moment there isn't one.  We just keep track of the
17907      sections by their name, instead.  Fortunately, the ABI gives
17908      names for all the ARM specific sections, so we will probably get
17909      away with this.  */
17910   switch (hdr->sh_type)
17911     {
17912     case SHT_ARM_EXIDX:
17913     case SHT_ARM_PREEMPTMAP:
17914     case SHT_ARM_ATTRIBUTES:
17915       break;
17916 
17917     default:
17918       return FALSE;
17919     }
17920 
17921   if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
17922     return FALSE;
17923 
17924   return TRUE;
17925 }
17926 
17927 static _arm_elf_section_data *
17928 get_arm_elf_section_data (asection * sec)
17929 {
17930   if (sec && sec->owner && is_arm_elf (sec->owner))
17931     return elf32_arm_section_data (sec);
17932   else
17933     return NULL;
17934 }
17935 
17936 typedef struct
17937 {
17938   void *flaginfo;
17939   struct bfd_link_info *info;
17940   asection *sec;
17941   int sec_shndx;
17942   int (*func) (void *, const char *, Elf_Internal_Sym *,
17943 	       asection *, struct elf_link_hash_entry *);
17944 } output_arch_syminfo;
17945 
17946 enum map_symbol_type
17947 {
17948   ARM_MAP_ARM,
17949   ARM_MAP_THUMB,
17950   ARM_MAP_DATA
17951 };
17952 
17953 
17954 /* Output a single mapping symbol.  */
17955 
17956 static bfd_boolean
17957 elf32_arm_output_map_sym (output_arch_syminfo *osi,
17958 			  enum map_symbol_type type,
17959 			  bfd_vma offset)
17960 {
17961   static const char *names[3] = {"$a", "$t", "$d"};
17962   Elf_Internal_Sym sym;
17963 
17964   sym.st_value = osi->sec->output_section->vma
17965 		 + osi->sec->output_offset
17966 		 + offset;
17967   sym.st_size = 0;
17968   sym.st_other = 0;
17969   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
17970   sym.st_shndx = osi->sec_shndx;
17971   sym.st_target_internal = 0;
17972   elf32_arm_section_map_add (osi->sec, names[type][1], offset);
17973   return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
17974 }
17975 
17976 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
17977    IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt.  */
17978 
17979 static bfd_boolean
17980 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
17981 			    bfd_boolean is_iplt_entry_p,
17982 			    union gotplt_union *root_plt,
17983 			    struct arm_plt_info *arm_plt)
17984 {
17985   struct elf32_arm_link_hash_table *htab;
17986   bfd_vma addr, plt_header_size;
17987 
17988   if (root_plt->offset == (bfd_vma) -1)
17989     return TRUE;
17990 
17991   htab = elf32_arm_hash_table (osi->info);
17992   if (htab == NULL)
17993     return FALSE;
17994 
17995   if (is_iplt_entry_p)
17996     {
17997       osi->sec = htab->root.iplt;
17998       plt_header_size = 0;
17999     }
18000   else
18001     {
18002       osi->sec = htab->root.splt;
18003       plt_header_size = htab->plt_header_size;
18004     }
18005   osi->sec_shndx = (_bfd_elf_section_from_bfd_section
18006 		    (osi->info->output_bfd, osi->sec->output_section));
18007 
18008   addr = root_plt->offset & -2;
18009   if (htab->symbian_p)
18010     {
18011       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18012 	return FALSE;
18013       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
18014 	return FALSE;
18015     }
18016   else if (htab->vxworks_p)
18017     {
18018       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18019 	return FALSE;
18020       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
18021 	return FALSE;
18022       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
18023 	return FALSE;
18024       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
18025 	return FALSE;
18026     }
18027   else if (htab->nacl_p)
18028     {
18029       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18030 	return FALSE;
18031     }
18032   else if (htab->fdpic_p)
18033     {
18034       enum map_symbol_type type = using_thumb_only(htab)
18035 	? ARM_MAP_THUMB
18036 	: ARM_MAP_ARM;
18037 
18038       if (elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt))
18039 	if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18040 	  return FALSE;
18041       if (!elf32_arm_output_map_sym (osi, type, addr))
18042 	return FALSE;
18043       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 16))
18044 	return FALSE;
18045       if (htab->plt_entry_size == 4 * ARRAY_SIZE(elf32_arm_fdpic_plt_entry))
18046 	if (!elf32_arm_output_map_sym (osi, type, addr + 24))
18047 	  return FALSE;
18048     }
18049   else if (using_thumb_only (htab))
18050     {
18051       if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
18052 	return FALSE;
18053     }
18054   else
18055     {
18056       bfd_boolean thumb_stub_p;
18057 
18058       thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
18059       if (thumb_stub_p)
18060 	{
18061 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
18062 	    return FALSE;
18063 	}
18064 #ifdef FOUR_WORD_PLT
18065       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18066 	return FALSE;
18067       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
18068 	return FALSE;
18069 #else
18070       /* A three-word PLT with no Thumb thunk contains only Arm code,
18071 	 so only need to output a mapping symbol for the first PLT entry and
18072 	 entries with thumb thunks.  */
18073       if (thumb_stub_p || addr == plt_header_size)
18074 	{
18075 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
18076 	    return FALSE;
18077 	}
18078 #endif
18079     }
18080 
18081   return TRUE;
18082 }
18083 
18084 /* Output mapping symbols for PLT entries associated with H.  */
18085 
18086 static bfd_boolean
18087 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
18088 {
18089   output_arch_syminfo *osi = (output_arch_syminfo *) inf;
18090   struct elf32_arm_link_hash_entry *eh;
18091 
18092   if (h->root.type == bfd_link_hash_indirect)
18093     return TRUE;
18094 
18095   if (h->root.type == bfd_link_hash_warning)
18096     /* When warning symbols are created, they **replace** the "real"
18097        entry in the hash table, thus we never get to see the real
18098        symbol in a hash traversal.  So look at it now.  */
18099     h = (struct elf_link_hash_entry *) h->root.u.i.link;
18100 
18101   eh = (struct elf32_arm_link_hash_entry *) h;
18102   return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
18103 				     &h->plt, &eh->plt);
18104 }
18105 
18106 /* Bind a veneered symbol to its veneer identified by its hash entry
18107    STUB_ENTRY.  The veneered location thus loose its symbol.  */
18108 
18109 static void
18110 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
18111 {
18112   struct elf32_arm_link_hash_entry *hash = stub_entry->h;
18113 
18114   BFD_ASSERT (hash);
18115   hash->root.root.u.def.section = stub_entry->stub_sec;
18116   hash->root.root.u.def.value = stub_entry->stub_offset;
18117   hash->root.size = stub_entry->stub_size;
18118 }
18119 
18120 /* Output a single local symbol for a generated stub.  */
18121 
18122 static bfd_boolean
18123 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
18124 			   bfd_vma offset, bfd_vma size)
18125 {
18126   Elf_Internal_Sym sym;
18127 
18128   sym.st_value = osi->sec->output_section->vma
18129 		 + osi->sec->output_offset
18130 		 + offset;
18131   sym.st_size = size;
18132   sym.st_other = 0;
18133   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
18134   sym.st_shndx = osi->sec_shndx;
18135   sym.st_target_internal = 0;
18136   return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
18137 }
18138 
18139 static bfd_boolean
18140 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
18141 		  void * in_arg)
18142 {
18143   struct elf32_arm_stub_hash_entry *stub_entry;
18144   asection *stub_sec;
18145   bfd_vma addr;
18146   char *stub_name;
18147   output_arch_syminfo *osi;
18148   const insn_sequence *template_sequence;
18149   enum stub_insn_type prev_type;
18150   int size;
18151   int i;
18152   enum map_symbol_type sym_type;
18153 
18154   /* Massage our args to the form they really have.  */
18155   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18156   osi = (output_arch_syminfo *) in_arg;
18157 
18158   stub_sec = stub_entry->stub_sec;
18159 
18160   /* Ensure this stub is attached to the current section being
18161      processed.  */
18162   if (stub_sec != osi->sec)
18163     return TRUE;
18164 
18165   addr = (bfd_vma) stub_entry->stub_offset;
18166   template_sequence = stub_entry->stub_template;
18167 
18168   if (arm_stub_sym_claimed (stub_entry->stub_type))
18169     arm_stub_claim_sym (stub_entry);
18170   else
18171     {
18172       stub_name = stub_entry->output_name;
18173       switch (template_sequence[0].type)
18174 	{
18175 	case ARM_TYPE:
18176 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
18177 					  stub_entry->stub_size))
18178 	    return FALSE;
18179 	  break;
18180 	case THUMB16_TYPE:
18181 	case THUMB32_TYPE:
18182 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
18183 					  stub_entry->stub_size))
18184 	    return FALSE;
18185 	  break;
18186 	default:
18187 	  BFD_FAIL ();
18188 	  return 0;
18189 	}
18190     }
18191 
18192   prev_type = DATA_TYPE;
18193   size = 0;
18194   for (i = 0; i < stub_entry->stub_template_size; i++)
18195     {
18196       switch (template_sequence[i].type)
18197 	{
18198 	case ARM_TYPE:
18199 	  sym_type = ARM_MAP_ARM;
18200 	  break;
18201 
18202 	case THUMB16_TYPE:
18203 	case THUMB32_TYPE:
18204 	  sym_type = ARM_MAP_THUMB;
18205 	  break;
18206 
18207 	case DATA_TYPE:
18208 	  sym_type = ARM_MAP_DATA;
18209 	  break;
18210 
18211 	default:
18212 	  BFD_FAIL ();
18213 	  return FALSE;
18214 	}
18215 
18216       if (template_sequence[i].type != prev_type)
18217 	{
18218 	  prev_type = template_sequence[i].type;
18219 	  if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
18220 	    return FALSE;
18221 	}
18222 
18223       switch (template_sequence[i].type)
18224 	{
18225 	case ARM_TYPE:
18226 	case THUMB32_TYPE:
18227 	  size += 4;
18228 	  break;
18229 
18230 	case THUMB16_TYPE:
18231 	  size += 2;
18232 	  break;
18233 
18234 	case DATA_TYPE:
18235 	  size += 4;
18236 	  break;
18237 
18238 	default:
18239 	  BFD_FAIL ();
18240 	  return FALSE;
18241 	}
18242     }
18243 
18244   return TRUE;
18245 }
18246 
18247 /* Output mapping symbols for linker generated sections,
18248    and for those data-only sections that do not have a
18249    $d.  */
18250 
18251 static bfd_boolean
18252 elf32_arm_output_arch_local_syms (bfd *output_bfd,
18253 				  struct bfd_link_info *info,
18254 				  void *flaginfo,
18255 				  int (*func) (void *, const char *,
18256 					       Elf_Internal_Sym *,
18257 					       asection *,
18258 					       struct elf_link_hash_entry *))
18259 {
18260   output_arch_syminfo osi;
18261   struct elf32_arm_link_hash_table *htab;
18262   bfd_vma offset;
18263   bfd_size_type size;
18264   bfd *input_bfd;
18265 
18266   htab = elf32_arm_hash_table (info);
18267   if (htab == NULL)
18268     return FALSE;
18269 
18270   check_use_blx (htab);
18271 
18272   osi.flaginfo = flaginfo;
18273   osi.info = info;
18274   osi.func = func;
18275 
18276   /* Add a $d mapping symbol to data-only sections that
18277      don't have any mapping symbol.  This may result in (harmless) redundant
18278      mapping symbols.  */
18279   for (input_bfd = info->input_bfds;
18280        input_bfd != NULL;
18281        input_bfd = input_bfd->link.next)
18282     {
18283       if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
18284 	for (osi.sec = input_bfd->sections;
18285 	     osi.sec != NULL;
18286 	     osi.sec = osi.sec->next)
18287 	  {
18288 	    if (osi.sec->output_section != NULL
18289 		&& ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
18290 		    != 0)
18291 		&& (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
18292 		   == SEC_HAS_CONTENTS
18293 		&& get_arm_elf_section_data (osi.sec) != NULL
18294 		&& get_arm_elf_section_data (osi.sec)->mapcount == 0
18295 		&& osi.sec->size > 0
18296 		&& (osi.sec->flags & SEC_EXCLUDE) == 0)
18297 	      {
18298 		osi.sec_shndx = _bfd_elf_section_from_bfd_section
18299 		  (output_bfd, osi.sec->output_section);
18300 		if (osi.sec_shndx != (int)SHN_BAD)
18301 		  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
18302 	      }
18303 	  }
18304     }
18305 
18306   /* ARM->Thumb glue.  */
18307   if (htab->arm_glue_size > 0)
18308     {
18309       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18310 					ARM2THUMB_GLUE_SECTION_NAME);
18311 
18312       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18313 	  (output_bfd, osi.sec->output_section);
18314       if (bfd_link_pic (info) || htab->root.is_relocatable_executable
18315 	  || htab->pic_veneer)
18316 	size = ARM2THUMB_PIC_GLUE_SIZE;
18317       else if (htab->use_blx)
18318 	size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
18319       else
18320 	size = ARM2THUMB_STATIC_GLUE_SIZE;
18321 
18322       for (offset = 0; offset < htab->arm_glue_size; offset += size)
18323 	{
18324 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
18325 	  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
18326 	}
18327     }
18328 
18329   /* Thumb->ARM glue.  */
18330   if (htab->thumb_glue_size > 0)
18331     {
18332       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18333 					THUMB2ARM_GLUE_SECTION_NAME);
18334 
18335       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18336 	  (output_bfd, osi.sec->output_section);
18337       size = THUMB2ARM_GLUE_SIZE;
18338 
18339       for (offset = 0; offset < htab->thumb_glue_size; offset += size)
18340 	{
18341 	  elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
18342 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
18343 	}
18344     }
18345 
18346   /* ARMv4 BX veneers.  */
18347   if (htab->bx_glue_size > 0)
18348     {
18349       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
18350 					ARM_BX_GLUE_SECTION_NAME);
18351 
18352       osi.sec_shndx = _bfd_elf_section_from_bfd_section
18353 	  (output_bfd, osi.sec->output_section);
18354 
18355       elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
18356     }
18357 
18358   /* Long calls stubs.  */
18359   if (htab->stub_bfd && htab->stub_bfd->sections)
18360     {
18361       asection* stub_sec;
18362 
18363       for (stub_sec = htab->stub_bfd->sections;
18364 	   stub_sec != NULL;
18365 	   stub_sec = stub_sec->next)
18366 	{
18367 	  /* Ignore non-stub sections.  */
18368 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
18369 	    continue;
18370 
18371 	  osi.sec = stub_sec;
18372 
18373 	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
18374 	    (output_bfd, osi.sec->output_section);
18375 
18376 	  bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
18377 	}
18378     }
18379 
18380   /* Finally, output mapping symbols for the PLT.  */
18381   if (htab->root.splt && htab->root.splt->size > 0)
18382     {
18383       osi.sec = htab->root.splt;
18384       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18385 		       (output_bfd, osi.sec->output_section));
18386 
18387       /* Output mapping symbols for the plt header.  SymbianOS does not have a
18388 	 plt header.  */
18389       if (htab->vxworks_p)
18390 	{
18391 	  /* VxWorks shared libraries have no PLT header.  */
18392 	  if (!bfd_link_pic (info))
18393 	    {
18394 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18395 		return FALSE;
18396 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18397 		return FALSE;
18398 	    }
18399 	}
18400       else if (htab->nacl_p)
18401 	{
18402 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18403 	    return FALSE;
18404 	}
18405       else if (using_thumb_only (htab) && !htab->fdpic_p)
18406 	{
18407 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
18408 	    return FALSE;
18409 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
18410 	    return FALSE;
18411 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
18412 	    return FALSE;
18413 	}
18414       else if (!htab->symbian_p && !htab->fdpic_p)
18415 	{
18416 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18417 	    return FALSE;
18418 #ifndef FOUR_WORD_PLT
18419 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
18420 	    return FALSE;
18421 #endif
18422 	}
18423     }
18424   if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
18425     {
18426       /* NaCl uses a special first entry in .iplt too.  */
18427       osi.sec = htab->root.iplt;
18428       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
18429 		       (output_bfd, osi.sec->output_section));
18430       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
18431 	return FALSE;
18432     }
18433   if ((htab->root.splt && htab->root.splt->size > 0)
18434       || (htab->root.iplt && htab->root.iplt->size > 0))
18435     {
18436       elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
18437       for (input_bfd = info->input_bfds;
18438 	   input_bfd != NULL;
18439 	   input_bfd = input_bfd->link.next)
18440 	{
18441 	  struct arm_local_iplt_info **local_iplt;
18442 	  unsigned int i, num_syms;
18443 
18444 	  local_iplt = elf32_arm_local_iplt (input_bfd);
18445 	  if (local_iplt != NULL)
18446 	    {
18447 	      num_syms = elf_symtab_hdr (input_bfd).sh_info;
18448 	      for (i = 0; i < num_syms; i++)
18449 		if (local_iplt[i] != NULL
18450 		    && !elf32_arm_output_plt_map_1 (&osi, TRUE,
18451 						    &local_iplt[i]->root,
18452 						    &local_iplt[i]->arm))
18453 		  return FALSE;
18454 	    }
18455 	}
18456     }
18457   if (htab->dt_tlsdesc_plt != 0)
18458     {
18459       /* Mapping symbols for the lazy tls trampoline.  */
18460       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
18461 	return FALSE;
18462 
18463       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18464 				     htab->dt_tlsdesc_plt + 24))
18465 	return FALSE;
18466     }
18467   if (htab->tls_trampoline != 0)
18468     {
18469       /* Mapping symbols for the tls trampoline.  */
18470       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
18471 	return FALSE;
18472 #ifdef FOUR_WORD_PLT
18473       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
18474 				     htab->tls_trampoline + 12))
18475 	return FALSE;
18476 #endif
18477     }
18478 
18479   return TRUE;
18480 }
18481 
18482 /* Filter normal symbols of CMSE entry functions of ABFD to include in
18483    the import library.  All SYMCOUNT symbols of ABFD can be examined
18484    from their pointers in SYMS.  Pointers of symbols to keep should be
18485    stored continuously at the beginning of that array.
18486 
18487    Returns the number of symbols to keep.  */
18488 
18489 static unsigned int
18490 elf32_arm_filter_cmse_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18491 			       struct bfd_link_info *info,
18492 			       asymbol **syms, long symcount)
18493 {
18494   size_t maxnamelen;
18495   char *cmse_name;
18496   long src_count, dst_count = 0;
18497   struct elf32_arm_link_hash_table *htab;
18498 
18499   htab = elf32_arm_hash_table (info);
18500   if (!htab->stub_bfd || !htab->stub_bfd->sections)
18501     symcount = 0;
18502 
18503   maxnamelen = 128;
18504   cmse_name = (char *) bfd_malloc (maxnamelen);
18505   BFD_ASSERT (cmse_name);
18506 
18507   for (src_count = 0; src_count < symcount; src_count++)
18508     {
18509       struct elf32_arm_link_hash_entry *cmse_hash;
18510       asymbol *sym;
18511       flagword flags;
18512       char *name;
18513       size_t namelen;
18514 
18515       sym = syms[src_count];
18516       flags = sym->flags;
18517       name = (char *) bfd_asymbol_name (sym);
18518 
18519       if ((flags & BSF_FUNCTION) != BSF_FUNCTION)
18520 	continue;
18521       if (!(flags & (BSF_GLOBAL | BSF_WEAK)))
18522 	continue;
18523 
18524       namelen = strlen (name) + sizeof (CMSE_PREFIX) + 1;
18525       if (namelen > maxnamelen)
18526 	{
18527 	  cmse_name = (char *)
18528 	    bfd_realloc (cmse_name, namelen);
18529 	  maxnamelen = namelen;
18530 	}
18531       snprintf (cmse_name, maxnamelen, "%s%s", CMSE_PREFIX, name);
18532       cmse_hash = (struct elf32_arm_link_hash_entry *)
18533 	elf_link_hash_lookup (&(htab)->root, cmse_name, FALSE, FALSE, TRUE);
18534 
18535       if (!cmse_hash
18536 	  || (cmse_hash->root.root.type != bfd_link_hash_defined
18537 	      && cmse_hash->root.root.type != bfd_link_hash_defweak)
18538 	  || cmse_hash->root.type != STT_FUNC)
18539 	continue;
18540 
18541       syms[dst_count++] = sym;
18542     }
18543   free (cmse_name);
18544 
18545   syms[dst_count] = NULL;
18546 
18547   return dst_count;
18548 }
18549 
18550 /* Filter symbols of ABFD to include in the import library.  All
18551    SYMCOUNT symbols of ABFD can be examined from their pointers in
18552    SYMS.  Pointers of symbols to keep should be stored continuously at
18553    the beginning of that array.
18554 
18555    Returns the number of symbols to keep.  */
18556 
18557 static unsigned int
18558 elf32_arm_filter_implib_symbols (bfd *abfd ATTRIBUTE_UNUSED,
18559 				 struct bfd_link_info *info,
18560 				 asymbol **syms, long symcount)
18561 {
18562   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
18563 
18564   /* Requirement 8 of "ARM v8-M Security Extensions: Requirements on
18565      Development Tools" (ARM-ECM-0359818) mandates Secure Gateway import
18566      library to be a relocatable object file.  */
18567   BFD_ASSERT (!(bfd_get_file_flags (info->out_implib_bfd) & EXEC_P));
18568   if (globals->cmse_implib)
18569     return elf32_arm_filter_cmse_symbols (abfd, info, syms, symcount);
18570   else
18571     return _bfd_elf_filter_global_symbols (abfd, info, syms, symcount);
18572 }
18573 
18574 /* Allocate target specific section data.  */
18575 
18576 static bfd_boolean
18577 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
18578 {
18579   if (!sec->used_by_bfd)
18580     {
18581       _arm_elf_section_data *sdata;
18582       bfd_size_type amt = sizeof (*sdata);
18583 
18584       sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
18585       if (sdata == NULL)
18586 	return FALSE;
18587       sec->used_by_bfd = sdata;
18588     }
18589 
18590   return _bfd_elf_new_section_hook (abfd, sec);
18591 }
18592 
18593 
18594 /* Used to order a list of mapping symbols by address.  */
18595 
18596 static int
18597 elf32_arm_compare_mapping (const void * a, const void * b)
18598 {
18599   const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
18600   const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
18601 
18602   if (amap->vma > bmap->vma)
18603     return 1;
18604   else if (amap->vma < bmap->vma)
18605     return -1;
18606   else if (amap->type > bmap->type)
18607     /* Ensure results do not depend on the host qsort for objects with
18608        multiple mapping symbols at the same address by sorting on type
18609        after vma.  */
18610     return 1;
18611   else if (amap->type < bmap->type)
18612     return -1;
18613   else
18614     return 0;
18615 }
18616 
18617 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified.  */
18618 
18619 static unsigned long
18620 offset_prel31 (unsigned long addr, bfd_vma offset)
18621 {
18622   return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
18623 }
18624 
18625 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
18626    relocations.  */
18627 
18628 static void
18629 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
18630 {
18631   unsigned long first_word = bfd_get_32 (output_bfd, from);
18632   unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
18633 
18634   /* High bit of first word is supposed to be zero.  */
18635   if ((first_word & 0x80000000ul) == 0)
18636     first_word = offset_prel31 (first_word, offset);
18637 
18638   /* If the high bit of the first word is clear, and the bit pattern is not 0x1
18639      (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry.  */
18640   if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
18641     second_word = offset_prel31 (second_word, offset);
18642 
18643   bfd_put_32 (output_bfd, first_word, to);
18644   bfd_put_32 (output_bfd, second_word, to + 4);
18645 }
18646 
18647 /* Data for make_branch_to_a8_stub().  */
18648 
18649 struct a8_branch_to_stub_data
18650 {
18651   asection *writing_section;
18652   bfd_byte *contents;
18653 };
18654 
18655 
18656 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
18657    places for a particular section.  */
18658 
18659 static bfd_boolean
18660 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
18661 		       void *in_arg)
18662 {
18663   struct elf32_arm_stub_hash_entry *stub_entry;
18664   struct a8_branch_to_stub_data *data;
18665   bfd_byte *contents;
18666   unsigned long branch_insn;
18667   bfd_vma veneered_insn_loc, veneer_entry_loc;
18668   bfd_signed_vma branch_offset;
18669   bfd *abfd;
18670   unsigned int loc;
18671 
18672   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
18673   data = (struct a8_branch_to_stub_data *) in_arg;
18674 
18675   if (stub_entry->target_section != data->writing_section
18676       || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
18677     return TRUE;
18678 
18679   contents = data->contents;
18680 
18681   /* We use target_section as Cortex-A8 erratum workaround stubs are only
18682      generated when both source and target are in the same section.  */
18683   veneered_insn_loc = stub_entry->target_section->output_section->vma
18684 		      + stub_entry->target_section->output_offset
18685 		      + stub_entry->source_value;
18686 
18687   veneer_entry_loc = stub_entry->stub_sec->output_section->vma
18688 		     + stub_entry->stub_sec->output_offset
18689 		     + stub_entry->stub_offset;
18690 
18691   if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
18692     veneered_insn_loc &= ~3u;
18693 
18694   branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
18695 
18696   abfd = stub_entry->target_section->owner;
18697   loc = stub_entry->source_value;
18698 
18699   /* We attempt to avoid this condition by setting stubs_always_after_branch
18700      in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
18701      This check is just to be on the safe side...  */
18702   if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
18703     {
18704       _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub is "
18705 			    "allocated in unsafe location"), abfd);
18706       return FALSE;
18707     }
18708 
18709   switch (stub_entry->stub_type)
18710     {
18711     case arm_stub_a8_veneer_b:
18712     case arm_stub_a8_veneer_b_cond:
18713       branch_insn = 0xf0009000;
18714       goto jump24;
18715 
18716     case arm_stub_a8_veneer_blx:
18717       branch_insn = 0xf000e800;
18718       goto jump24;
18719 
18720     case arm_stub_a8_veneer_bl:
18721       {
18722 	unsigned int i1, j1, i2, j2, s;
18723 
18724 	branch_insn = 0xf000d000;
18725 
18726       jump24:
18727 	if (branch_offset < -16777216 || branch_offset > 16777214)
18728 	  {
18729 	    /* There's not much we can do apart from complain if this
18730 	       happens.  */
18731 	    _bfd_error_handler (_("%pB: error: Cortex-A8 erratum stub out "
18732 				  "of range (input file too large)"), abfd);
18733 	    return FALSE;
18734 	  }
18735 
18736 	/* i1 = not(j1 eor s), so:
18737 	   not i1 = j1 eor s
18738 	   j1 = (not i1) eor s.  */
18739 
18740 	branch_insn |= (branch_offset >> 1) & 0x7ff;
18741 	branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
18742 	i2 = (branch_offset >> 22) & 1;
18743 	i1 = (branch_offset >> 23) & 1;
18744 	s = (branch_offset >> 24) & 1;
18745 	j1 = (!i1) ^ s;
18746 	j2 = (!i2) ^ s;
18747 	branch_insn |= j2 << 11;
18748 	branch_insn |= j1 << 13;
18749 	branch_insn |= s << 26;
18750       }
18751       break;
18752 
18753     default:
18754       BFD_FAIL ();
18755       return FALSE;
18756     }
18757 
18758   bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
18759   bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
18760 
18761   return TRUE;
18762 }
18763 
18764 /* Beginning of stm32l4xx work-around.  */
18765 
18766 /* Functions encoding instructions necessary for the emission of the
18767    fix-stm32l4xx-629360.
18768    Encoding is extracted from the
18769    ARM (C) Architecture Reference Manual
18770    ARMv7-A and ARMv7-R edition
18771    ARM DDI 0406C.b (ID072512).  */
18772 
18773 static inline bfd_vma
18774 create_instruction_branch_absolute (int branch_offset)
18775 {
18776   /* A8.8.18 B (A8-334)
18777      B target_address (Encoding T4).  */
18778   /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
18779   /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
18780   /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
18781 
18782   int s = ((branch_offset & 0x1000000) >> 24);
18783   int j1 = s ^ !((branch_offset & 0x800000) >> 23);
18784   int j2 = s ^ !((branch_offset & 0x400000) >> 22);
18785 
18786   if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
18787     BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
18788 
18789   bfd_vma patched_inst = 0xf0009000
18790     | s << 26 /* S.  */
18791     | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
18792     | j1 << 13 /* J1.  */
18793     | j2 << 11 /* J2.  */
18794     | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
18795 
18796   return patched_inst;
18797 }
18798 
18799 static inline bfd_vma
18800 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
18801 {
18802   /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
18803      LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
18804   bfd_vma patched_inst = 0xe8900000
18805     | (/*W=*/wback << 21)
18806     | (base_reg << 16)
18807     | (reg_mask & 0x0000ffff);
18808 
18809   return patched_inst;
18810 }
18811 
18812 static inline bfd_vma
18813 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
18814 {
18815   /* A8.8.60 LDMDB/LDMEA (A8-402)
18816      LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
18817   bfd_vma patched_inst = 0xe9100000
18818     | (/*W=*/wback << 21)
18819     | (base_reg << 16)
18820     | (reg_mask & 0x0000ffff);
18821 
18822   return patched_inst;
18823 }
18824 
18825 static inline bfd_vma
18826 create_instruction_mov (int target_reg, int source_reg)
18827 {
18828   /* A8.8.103 MOV (register) (A8-486)
18829      MOV Rd, Rm (Encoding T1).  */
18830   bfd_vma patched_inst = 0x4600
18831     | (target_reg & 0x7)
18832     | ((target_reg & 0x8) >> 3) << 7
18833     | (source_reg << 3);
18834 
18835   return patched_inst;
18836 }
18837 
18838 static inline bfd_vma
18839 create_instruction_sub (int target_reg, int source_reg, int value)
18840 {
18841   /* A8.8.221 SUB (immediate) (A8-708)
18842      SUB Rd, Rn, #value (Encoding T3).  */
18843   bfd_vma patched_inst = 0xf1a00000
18844     | (target_reg << 8)
18845     | (source_reg << 16)
18846     | (/*S=*/0 << 20)
18847     | ((value & 0x800) >> 11) << 26
18848     | ((value & 0x700) >>  8) << 12
18849     | (value & 0x0ff);
18850 
18851   return patched_inst;
18852 }
18853 
18854 static inline bfd_vma
18855 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
18856 			   int first_reg)
18857 {
18858   /* A8.8.332 VLDM (A8-922)
18859      VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
18860   bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
18861     | (/*W=*/wback << 21)
18862     | (base_reg << 16)
18863     | (num_words & 0x000000ff)
18864     | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
18865     | (first_reg & 0x00000001) << 22;
18866 
18867   return patched_inst;
18868 }
18869 
18870 static inline bfd_vma
18871 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
18872 			   int first_reg)
18873 {
18874   /* A8.8.332 VLDM (A8-922)
18875      VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
18876   bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
18877     | (base_reg << 16)
18878     | (num_words & 0x000000ff)
18879     | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
18880     | (first_reg & 0x00000001) << 22;
18881 
18882   return patched_inst;
18883 }
18884 
18885 static inline bfd_vma
18886 create_instruction_udf_w (int value)
18887 {
18888   /* A8.8.247 UDF (A8-758)
18889      Undefined (Encoding T2).  */
18890   bfd_vma patched_inst = 0xf7f0a000
18891     | (value & 0x00000fff)
18892     | (value & 0x000f0000) << 16;
18893 
18894   return patched_inst;
18895 }
18896 
18897 static inline bfd_vma
18898 create_instruction_udf (int value)
18899 {
18900   /* A8.8.247 UDF (A8-758)
18901      Undefined (Encoding T1).  */
18902   bfd_vma patched_inst = 0xde00
18903     | (value & 0xff);
18904 
18905   return patched_inst;
18906 }
18907 
18908 /* Functions writing an instruction in memory, returning the next
18909    memory position to write to.  */
18910 
18911 static inline bfd_byte *
18912 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
18913 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18914 {
18915   put_thumb2_insn (htab, output_bfd, insn, pt);
18916   return pt + 4;
18917 }
18918 
18919 static inline bfd_byte *
18920 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
18921 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
18922 {
18923   put_thumb_insn (htab, output_bfd, insn, pt);
18924   return pt + 2;
18925 }
18926 
18927 /* Function filling up a region in memory with T1 and T2 UDFs taking
18928    care of alignment.  */
18929 
18930 static bfd_byte *
18931 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
18932 			 bfd *			 output_bfd,
18933 			 const bfd_byte * const	 base_stub_contents,
18934 			 bfd_byte * const	 from_stub_contents,
18935 			 const bfd_byte * const	 end_stub_contents)
18936 {
18937   bfd_byte *current_stub_contents = from_stub_contents;
18938 
18939   /* Fill the remaining of the stub with deterministic contents : UDF
18940      instructions.
18941      Check if realignment is needed on modulo 4 frontier using T1, to
18942      further use T2.  */
18943   if ((current_stub_contents < end_stub_contents)
18944       && !((current_stub_contents - base_stub_contents) % 2)
18945       && ((current_stub_contents - base_stub_contents) % 4))
18946     current_stub_contents =
18947       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
18948 			  create_instruction_udf (0));
18949 
18950   for (; current_stub_contents < end_stub_contents;)
18951     current_stub_contents =
18952       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18953 			  create_instruction_udf_w (0));
18954 
18955   return current_stub_contents;
18956 }
18957 
18958 /* Functions writing the stream of instructions equivalent to the
18959    derived sequence for ldmia, ldmdb, vldm respectively.  */
18960 
18961 static void
18962 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
18963 				       bfd * output_bfd,
18964 				       const insn32 initial_insn,
18965 				       const bfd_byte *const initial_insn_addr,
18966 				       bfd_byte *const base_stub_contents)
18967 {
18968   int wback = (initial_insn & 0x00200000) >> 21;
18969   int ri, rn = (initial_insn & 0x000F0000) >> 16;
18970   int insn_all_registers = initial_insn & 0x0000ffff;
18971   int insn_low_registers, insn_high_registers;
18972   int usable_register_mask;
18973   int nb_registers = elf32_arm_popcount (insn_all_registers);
18974   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
18975   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
18976   bfd_byte *current_stub_contents = base_stub_contents;
18977 
18978   BFD_ASSERT (is_thumb2_ldmia (initial_insn));
18979 
18980   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
18981      smaller than 8 registers load sequences that do not cause the
18982      hardware issue.  */
18983   if (nb_registers <= 8)
18984     {
18985       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
18986       current_stub_contents =
18987 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18988 			    initial_insn);
18989 
18990       /* B initial_insn_addr+4.  */
18991       if (!restore_pc)
18992 	current_stub_contents =
18993 	  push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
18994 			      create_instruction_branch_absolute
18995 			      (initial_insn_addr - current_stub_contents));
18996 
18997       /* Fill the remaining of the stub with deterministic contents.  */
18998       current_stub_contents =
18999 	stm32l4xx_fill_stub_udf (htab, output_bfd,
19000 				 base_stub_contents, current_stub_contents,
19001 				 base_stub_contents +
19002 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19003 
19004       return;
19005     }
19006 
19007   /* - reg_list[13] == 0.  */
19008   BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
19009 
19010   /* - reg_list[14] & reg_list[15] != 1.  */
19011   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19012 
19013   /* - if (wback==1) reg_list[rn] == 0.  */
19014   BFD_ASSERT (!wback || !restore_rn);
19015 
19016   /* - nb_registers > 8.  */
19017   BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19018 
19019   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
19020 
19021   /* In the following algorithm, we split this wide LDM using 2 LDM insns:
19022     - One with the 7 lowest registers (register mask 0x007F)
19023       This LDM will finally contain between 2 and 7 registers
19024     - One with the 7 highest registers (register mask 0xDF80)
19025       This ldm will finally contain between 2 and 7 registers.  */
19026   insn_low_registers = insn_all_registers & 0x007F;
19027   insn_high_registers = insn_all_registers & 0xDF80;
19028 
19029   /* A spare register may be needed during this veneer to temporarily
19030      handle the base register.  This register will be restored with the
19031      last LDM operation.
19032      The usable register may be any general purpose register (that
19033      excludes PC, SP, LR : register mask is 0x1FFF).  */
19034   usable_register_mask = 0x1FFF;
19035 
19036   /* Generate the stub function.  */
19037   if (wback)
19038     {
19039       /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
19040       current_stub_contents =
19041 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19042 			    create_instruction_ldmia
19043 			    (rn, /*wback=*/1, insn_low_registers));
19044 
19045       /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
19046       current_stub_contents =
19047 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19048 			    create_instruction_ldmia
19049 			    (rn, /*wback=*/1, insn_high_registers));
19050       if (!restore_pc)
19051 	{
19052 	  /* B initial_insn_addr+4.  */
19053 	  current_stub_contents =
19054 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19055 				create_instruction_branch_absolute
19056 				(initial_insn_addr - current_stub_contents));
19057        }
19058     }
19059   else /* if (!wback).  */
19060     {
19061       ri = rn;
19062 
19063       /* If Rn is not part of the high-register-list, move it there.  */
19064       if (!(insn_high_registers & (1 << rn)))
19065 	{
19066 	  /* Choose a Ri in the high-register-list that will be restored.  */
19067 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19068 
19069 	  /* MOV Ri, Rn.  */
19070 	  current_stub_contents =
19071 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19072 				create_instruction_mov (ri, rn));
19073 	}
19074 
19075       /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
19076       current_stub_contents =
19077 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19078 			    create_instruction_ldmia
19079 			    (ri, /*wback=*/1, insn_low_registers));
19080 
19081       /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
19082       current_stub_contents =
19083 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19084 			    create_instruction_ldmia
19085 			    (ri, /*wback=*/0, insn_high_registers));
19086 
19087       if (!restore_pc)
19088 	{
19089 	  /* B initial_insn_addr+4.  */
19090 	  current_stub_contents =
19091 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19092 				create_instruction_branch_absolute
19093 				(initial_insn_addr - current_stub_contents));
19094 	}
19095     }
19096 
19097   /* Fill the remaining of the stub with deterministic contents.  */
19098   current_stub_contents =
19099     stm32l4xx_fill_stub_udf (htab, output_bfd,
19100 			     base_stub_contents, current_stub_contents,
19101 			     base_stub_contents +
19102 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19103 }
19104 
19105 static void
19106 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
19107 				       bfd * output_bfd,
19108 				       const insn32 initial_insn,
19109 				       const bfd_byte *const initial_insn_addr,
19110 				       bfd_byte *const base_stub_contents)
19111 {
19112   int wback = (initial_insn & 0x00200000) >> 21;
19113   int ri, rn = (initial_insn & 0x000f0000) >> 16;
19114   int insn_all_registers = initial_insn & 0x0000ffff;
19115   int insn_low_registers, insn_high_registers;
19116   int usable_register_mask;
19117   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
19118   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
19119   int nb_registers = elf32_arm_popcount (insn_all_registers);
19120   bfd_byte *current_stub_contents = base_stub_contents;
19121 
19122   BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
19123 
19124   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19125      smaller than 8 registers load sequences that do not cause the
19126      hardware issue.  */
19127   if (nb_registers <= 8)
19128     {
19129       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
19130       current_stub_contents =
19131 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19132 			    initial_insn);
19133 
19134       /* B initial_insn_addr+4.  */
19135       current_stub_contents =
19136 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19137 			    create_instruction_branch_absolute
19138 			    (initial_insn_addr - current_stub_contents));
19139 
19140       /* Fill the remaining of the stub with deterministic contents.  */
19141       current_stub_contents =
19142 	stm32l4xx_fill_stub_udf (htab, output_bfd,
19143 				 base_stub_contents, current_stub_contents,
19144 				 base_stub_contents +
19145 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19146 
19147       return;
19148     }
19149 
19150   /* - reg_list[13] == 0.  */
19151   BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
19152 
19153   /* - reg_list[14] & reg_list[15] != 1.  */
19154   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
19155 
19156   /* - if (wback==1) reg_list[rn] == 0.  */
19157   BFD_ASSERT (!wback || !restore_rn);
19158 
19159   /* - nb_registers > 8.  */
19160   BFD_ASSERT (elf32_arm_popcount (insn_all_registers) > 8);
19161 
19162   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
19163 
19164   /* In the following algorithm, we split this wide LDM using 2 LDM insn:
19165     - One with the 7 lowest registers (register mask 0x007F)
19166       This LDM will finally contain between 2 and 7 registers
19167     - One with the 7 highest registers (register mask 0xDF80)
19168       This ldm will finally contain between 2 and 7 registers.  */
19169   insn_low_registers = insn_all_registers & 0x007F;
19170   insn_high_registers = insn_all_registers & 0xDF80;
19171 
19172   /* A spare register may be needed during this veneer to temporarily
19173      handle the base register.  This register will be restored with
19174      the last LDM operation.
19175      The usable register may be any general purpose register (that excludes
19176      PC, SP, LR : register mask is 0x1FFF).  */
19177   usable_register_mask = 0x1FFF;
19178 
19179   /* Generate the stub function.  */
19180   if (!wback && !restore_pc && !restore_rn)
19181     {
19182       /* Choose a Ri in the low-register-list that will be restored.  */
19183       ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19184 
19185       /* MOV Ri, Rn.  */
19186       current_stub_contents =
19187 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19188 			    create_instruction_mov (ri, rn));
19189 
19190       /* LDMDB Ri!, {R-high-register-list}.  */
19191       current_stub_contents =
19192 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19193 			    create_instruction_ldmdb
19194 			    (ri, /*wback=*/1, insn_high_registers));
19195 
19196       /* LDMDB Ri, {R-low-register-list}.  */
19197       current_stub_contents =
19198 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19199 			    create_instruction_ldmdb
19200 			    (ri, /*wback=*/0, insn_low_registers));
19201 
19202       /* B initial_insn_addr+4.  */
19203       current_stub_contents =
19204 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19205 			    create_instruction_branch_absolute
19206 			    (initial_insn_addr - current_stub_contents));
19207     }
19208   else if (wback && !restore_pc && !restore_rn)
19209     {
19210       /* LDMDB Rn!, {R-high-register-list}.  */
19211       current_stub_contents =
19212 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19213 			    create_instruction_ldmdb
19214 			    (rn, /*wback=*/1, insn_high_registers));
19215 
19216       /* LDMDB Rn!, {R-low-register-list}.  */
19217       current_stub_contents =
19218 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19219 			    create_instruction_ldmdb
19220 			    (rn, /*wback=*/1, insn_low_registers));
19221 
19222       /* B initial_insn_addr+4.  */
19223       current_stub_contents =
19224 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19225 			    create_instruction_branch_absolute
19226 			    (initial_insn_addr - current_stub_contents));
19227     }
19228   else if (!wback && restore_pc && !restore_rn)
19229     {
19230       /* Choose a Ri in the high-register-list that will be restored.  */
19231       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19232 
19233       /* SUB Ri, Rn, #(4*nb_registers).  */
19234       current_stub_contents =
19235 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19236 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19237 
19238       /* LDMIA Ri!, {R-low-register-list}.  */
19239       current_stub_contents =
19240 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19241 			    create_instruction_ldmia
19242 			    (ri, /*wback=*/1, insn_low_registers));
19243 
19244       /* LDMIA Ri, {R-high-register-list}.  */
19245       current_stub_contents =
19246 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19247 			    create_instruction_ldmia
19248 			    (ri, /*wback=*/0, insn_high_registers));
19249     }
19250   else if (wback && restore_pc && !restore_rn)
19251     {
19252       /* Choose a Ri in the high-register-list that will be restored.  */
19253       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19254 
19255       /* SUB Rn, Rn, #(4*nb_registers)  */
19256       current_stub_contents =
19257 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19258 			    create_instruction_sub (rn, rn, (4 * nb_registers)));
19259 
19260       /* MOV Ri, Rn.  */
19261       current_stub_contents =
19262 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19263 			    create_instruction_mov (ri, rn));
19264 
19265       /* LDMIA Ri!, {R-low-register-list}.  */
19266       current_stub_contents =
19267 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19268 			    create_instruction_ldmia
19269 			    (ri, /*wback=*/1, insn_low_registers));
19270 
19271       /* LDMIA Ri, {R-high-register-list}.  */
19272       current_stub_contents =
19273 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19274 			    create_instruction_ldmia
19275 			    (ri, /*wback=*/0, insn_high_registers));
19276     }
19277   else if (!wback && !restore_pc && restore_rn)
19278     {
19279       ri = rn;
19280       if (!(insn_low_registers & (1 << rn)))
19281 	{
19282 	  /* Choose a Ri in the low-register-list that will be restored.  */
19283 	  ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
19284 
19285 	  /* MOV Ri, Rn.  */
19286 	  current_stub_contents =
19287 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
19288 				create_instruction_mov (ri, rn));
19289 	}
19290 
19291       /* LDMDB Ri!, {R-high-register-list}.  */
19292       current_stub_contents =
19293 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19294 			    create_instruction_ldmdb
19295 			    (ri, /*wback=*/1, insn_high_registers));
19296 
19297       /* LDMDB Ri, {R-low-register-list}.  */
19298       current_stub_contents =
19299 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19300 			    create_instruction_ldmdb
19301 			    (ri, /*wback=*/0, insn_low_registers));
19302 
19303       /* B initial_insn_addr+4.  */
19304       current_stub_contents =
19305 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19306 			    create_instruction_branch_absolute
19307 			    (initial_insn_addr - current_stub_contents));
19308     }
19309   else if (!wback && restore_pc && restore_rn)
19310     {
19311       ri = rn;
19312       if (!(insn_high_registers & (1 << rn)))
19313 	{
19314 	  /* Choose a Ri in the high-register-list that will be restored.  */
19315 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
19316 	}
19317 
19318       /* SUB Ri, Rn, #(4*nb_registers).  */
19319       current_stub_contents =
19320 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19321 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
19322 
19323       /* LDMIA Ri!, {R-low-register-list}.  */
19324       current_stub_contents =
19325 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19326 			    create_instruction_ldmia
19327 			    (ri, /*wback=*/1, insn_low_registers));
19328 
19329       /* LDMIA Ri, {R-high-register-list}.  */
19330       current_stub_contents =
19331 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19332 			    create_instruction_ldmia
19333 			    (ri, /*wback=*/0, insn_high_registers));
19334     }
19335   else if (wback && restore_rn)
19336     {
19337       /* The assembler should not have accepted to encode this.  */
19338       BFD_ASSERT (0 && "Cannot patch an instruction that has an "
19339 	"undefined behavior.\n");
19340     }
19341 
19342   /* Fill the remaining of the stub with deterministic contents.  */
19343   current_stub_contents =
19344     stm32l4xx_fill_stub_udf (htab, output_bfd,
19345 			     base_stub_contents, current_stub_contents,
19346 			     base_stub_contents +
19347 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
19348 
19349 }
19350 
19351 static void
19352 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
19353 				      bfd * output_bfd,
19354 				      const insn32 initial_insn,
19355 				      const bfd_byte *const initial_insn_addr,
19356 				      bfd_byte *const base_stub_contents)
19357 {
19358   int num_words = initial_insn & 0xff;
19359   bfd_byte *current_stub_contents = base_stub_contents;
19360 
19361   BFD_ASSERT (is_thumb2_vldm (initial_insn));
19362 
19363   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
19364      smaller than 8 words load sequences that do not cause the
19365      hardware issue.  */
19366   if (num_words <= 8)
19367     {
19368       /* Untouched instruction.  */
19369       current_stub_contents =
19370 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19371 			    initial_insn);
19372 
19373       /* B initial_insn_addr+4.  */
19374       current_stub_contents =
19375 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19376 			    create_instruction_branch_absolute
19377 			    (initial_insn_addr - current_stub_contents));
19378     }
19379   else
19380     {
19381       bfd_boolean is_dp = /* DP encoding.  */
19382 	(initial_insn & 0xfe100f00) == 0xec100b00;
19383       bfd_boolean is_ia_nobang = /* (IA without !).  */
19384 	(((initial_insn << 7) >> 28) & 0xd) == 0x4;
19385       bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP.  */
19386 	(((initial_insn << 7) >> 28) & 0xd) == 0x5;
19387       bfd_boolean is_db_bang = /* (DB with !).  */
19388 	(((initial_insn << 7) >> 28) & 0xd) == 0x9;
19389       int base_reg = ((unsigned int) initial_insn << 12) >> 28;
19390       /* d = UInt (Vd:D);.  */
19391       int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
19392 	| (((unsigned int)initial_insn << 9) >> 31);
19393 
19394       /* Compute the number of 8-words chunks needed to split.  */
19395       int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
19396       int chunk;
19397 
19398       /* The test coverage has been done assuming the following
19399 	 hypothesis that exactly one of the previous is_ predicates is
19400 	 true.  */
19401       BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
19402 		  && !(is_ia_nobang & is_ia_bang & is_db_bang));
19403 
19404       /* We treat the cutting of the words in one pass for all
19405 	 cases, then we emit the adjustments:
19406 
19407 	 vldm rx, {...}
19408 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19409 	 -> sub rx, rx, #size (list)
19410 
19411 	 vldm rx!, {...}
19412 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
19413 	 This also handles vpop instruction (when rx is sp)
19414 
19415 	 vldmd rx!, {...}
19416 	 -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
19417       for (chunk = 0; chunk < chunks; ++chunk)
19418 	{
19419 	  bfd_vma new_insn = 0;
19420 
19421 	  if (is_ia_nobang || is_ia_bang)
19422 	    {
19423 	      new_insn = create_instruction_vldmia
19424 		(base_reg,
19425 		 is_dp,
19426 		 /*wback= .  */1,
19427 		 chunks - (chunk + 1) ?
19428 		 8 : num_words - chunk * 8,
19429 		 first_reg + chunk * 8);
19430 	    }
19431 	  else if (is_db_bang)
19432 	    {
19433 	      new_insn = create_instruction_vldmdb
19434 		(base_reg,
19435 		 is_dp,
19436 		 chunks - (chunk + 1) ?
19437 		 8 : num_words - chunk * 8,
19438 		 first_reg + chunk * 8);
19439 	    }
19440 
19441 	  if (new_insn)
19442 	    current_stub_contents =
19443 	      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19444 				  new_insn);
19445 	}
19446 
19447       /* Only this case requires the base register compensation
19448 	 subtract.  */
19449       if (is_ia_nobang)
19450 	{
19451 	  current_stub_contents =
19452 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19453 				create_instruction_sub
19454 				(base_reg, base_reg, 4*num_words));
19455 	}
19456 
19457       /* B initial_insn_addr+4.  */
19458       current_stub_contents =
19459 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
19460 			    create_instruction_branch_absolute
19461 			    (initial_insn_addr - current_stub_contents));
19462     }
19463 
19464   /* Fill the remaining of the stub with deterministic contents.  */
19465   current_stub_contents =
19466     stm32l4xx_fill_stub_udf (htab, output_bfd,
19467 			     base_stub_contents, current_stub_contents,
19468 			     base_stub_contents +
19469 			     STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
19470 }
19471 
19472 static void
19473 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
19474 				 bfd * output_bfd,
19475 				 const insn32 wrong_insn,
19476 				 const bfd_byte *const wrong_insn_addr,
19477 				 bfd_byte *const stub_contents)
19478 {
19479   if (is_thumb2_ldmia (wrong_insn))
19480     stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
19481 					   wrong_insn, wrong_insn_addr,
19482 					   stub_contents);
19483   else if (is_thumb2_ldmdb (wrong_insn))
19484     stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
19485 					   wrong_insn, wrong_insn_addr,
19486 					   stub_contents);
19487   else if (is_thumb2_vldm (wrong_insn))
19488     stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
19489 					  wrong_insn, wrong_insn_addr,
19490 					  stub_contents);
19491 }
19492 
19493 /* End of stm32l4xx work-around.  */
19494 
19495 
19496 /* Do code byteswapping.  Return FALSE afterwards so that the section is
19497    written out as normal.  */
19498 
19499 static bfd_boolean
19500 elf32_arm_write_section (bfd *output_bfd,
19501 			 struct bfd_link_info *link_info,
19502 			 asection *sec,
19503 			 bfd_byte *contents)
19504 {
19505   unsigned int mapcount, errcount;
19506   _arm_elf_section_data *arm_data;
19507   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
19508   elf32_arm_section_map *map;
19509   elf32_vfp11_erratum_list *errnode;
19510   elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
19511   bfd_vma ptr;
19512   bfd_vma end;
19513   bfd_vma offset = sec->output_section->vma + sec->output_offset;
19514   bfd_byte tmp;
19515   unsigned int i;
19516 
19517   if (globals == NULL)
19518     return FALSE;
19519 
19520   /* If this section has not been allocated an _arm_elf_section_data
19521      structure then we cannot record anything.  */
19522   arm_data = get_arm_elf_section_data (sec);
19523   if (arm_data == NULL)
19524     return FALSE;
19525 
19526   mapcount = arm_data->mapcount;
19527   map = arm_data->map;
19528   errcount = arm_data->erratumcount;
19529 
19530   if (errcount != 0)
19531     {
19532       unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
19533 
19534       for (errnode = arm_data->erratumlist; errnode != 0;
19535 	   errnode = errnode->next)
19536 	{
19537 	  bfd_vma target = errnode->vma - offset;
19538 
19539 	  switch (errnode->type)
19540 	    {
19541 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
19542 	      {
19543 		bfd_vma branch_to_veneer;
19544 		/* Original condition code of instruction, plus bit mask for
19545 		   ARM B instruction.  */
19546 		unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
19547 				  | 0x0a000000;
19548 
19549 		/* The instruction is before the label.  */
19550 		target -= 4;
19551 
19552 		/* Above offset included in -4 below.  */
19553 		branch_to_veneer = errnode->u.b.veneer->vma
19554 				   - errnode->vma - 4;
19555 
19556 		if ((signed) branch_to_veneer < -(1 << 25)
19557 		    || (signed) branch_to_veneer >= (1 << 25))
19558 		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19559 					"range"), output_bfd);
19560 
19561 		insn |= (branch_to_veneer >> 2) & 0xffffff;
19562 		contents[endianflip ^ target] = insn & 0xff;
19563 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19564 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19565 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19566 	      }
19567 	      break;
19568 
19569 	    case VFP11_ERRATUM_ARM_VENEER:
19570 	      {
19571 		bfd_vma branch_from_veneer;
19572 		unsigned int insn;
19573 
19574 		/* Take size of veneer into account.  */
19575 		branch_from_veneer = errnode->u.v.branch->vma
19576 				     - errnode->vma - 12;
19577 
19578 		if ((signed) branch_from_veneer < -(1 << 25)
19579 		    || (signed) branch_from_veneer >= (1 << 25))
19580 		  _bfd_error_handler (_("%pB: error: VFP11 veneer out of "
19581 					"range"), output_bfd);
19582 
19583 		/* Original instruction.  */
19584 		insn = errnode->u.v.branch->u.b.vfp_insn;
19585 		contents[endianflip ^ target] = insn & 0xff;
19586 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
19587 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
19588 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
19589 
19590 		/* Branch back to insn after original insn.  */
19591 		insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
19592 		contents[endianflip ^ (target + 4)] = insn & 0xff;
19593 		contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
19594 		contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
19595 		contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
19596 	      }
19597 	      break;
19598 
19599 	    default:
19600 	      abort ();
19601 	    }
19602 	}
19603     }
19604 
19605   if (arm_data->stm32l4xx_erratumcount != 0)
19606     {
19607       for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
19608 	   stm32l4xx_errnode != 0;
19609 	   stm32l4xx_errnode = stm32l4xx_errnode->next)
19610 	{
19611 	  bfd_vma target = stm32l4xx_errnode->vma - offset;
19612 
19613 	  switch (stm32l4xx_errnode->type)
19614 	    {
19615 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
19616 	      {
19617 		unsigned int insn;
19618 		bfd_vma branch_to_veneer =
19619 		  stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
19620 
19621 		if ((signed) branch_to_veneer < -(1 << 24)
19622 		    || (signed) branch_to_veneer >= (1 << 24))
19623 		  {
19624 		    bfd_vma out_of_range =
19625 		      ((signed) branch_to_veneer < -(1 << 24)) ?
19626 		      - branch_to_veneer - (1 << 24) :
19627 		      ((signed) branch_to_veneer >= (1 << 24)) ?
19628 		      branch_to_veneer - (1 << 24) : 0;
19629 
19630 		    _bfd_error_handler
19631 		      (_("%pB(%#" PRIx64 "): error: "
19632 			 "cannot create STM32L4XX veneer; "
19633 			 "jump out of range by %" PRId64 " bytes; "
19634 			 "cannot encode branch instruction"),
19635 		       output_bfd,
19636 		       (uint64_t) (stm32l4xx_errnode->vma - 4),
19637 		       (int64_t) out_of_range);
19638 		    continue;
19639 		  }
19640 
19641 		insn = create_instruction_branch_absolute
19642 		  (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
19643 
19644 		/* The instruction is before the label.  */
19645 		target -= 4;
19646 
19647 		put_thumb2_insn (globals, output_bfd,
19648 				 (bfd_vma) insn, contents + target);
19649 	      }
19650 	      break;
19651 
19652 	    case STM32L4XX_ERRATUM_VENEER:
19653 	      {
19654 		bfd_byte * veneer;
19655 		bfd_byte * veneer_r;
19656 		unsigned int insn;
19657 
19658 		veneer = contents + target;
19659 		veneer_r = veneer
19660 		  + stm32l4xx_errnode->u.b.veneer->vma
19661 		  - stm32l4xx_errnode->vma - 4;
19662 
19663 		if ((signed) (veneer_r - veneer -
19664 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
19665 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
19666 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
19667 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
19668 		    || (signed) (veneer_r - veneer) >= (1 << 24))
19669 		  {
19670 		    _bfd_error_handler (_("%pB: error: cannot create STM32L4XX "
19671 					  "veneer"), output_bfd);
19672 		     continue;
19673 		  }
19674 
19675 		/* Original instruction.  */
19676 		insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
19677 
19678 		stm32l4xx_create_replacing_stub
19679 		  (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
19680 	      }
19681 	      break;
19682 
19683 	    default:
19684 	      abort ();
19685 	    }
19686 	}
19687     }
19688 
19689   if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
19690     {
19691       arm_unwind_table_edit *edit_node
19692 	= arm_data->u.exidx.unwind_edit_list;
19693       /* Now, sec->size is the size of the section we will write.  The original
19694 	 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
19695 	 markers) was sec->rawsize.  (This isn't the case if we perform no
19696 	 edits, then rawsize will be zero and we should use size).  */
19697       bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
19698       unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
19699       unsigned int in_index, out_index;
19700       bfd_vma add_to_offsets = 0;
19701 
19702       if (edited_contents == NULL)
19703 	return FALSE;
19704       for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
19705 	{
19706 	  if (edit_node)
19707 	    {
19708 	      unsigned int edit_index = edit_node->index;
19709 
19710 	      if (in_index < edit_index && in_index * 8 < input_size)
19711 		{
19712 		  copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19713 				    contents + in_index * 8, add_to_offsets);
19714 		  out_index++;
19715 		  in_index++;
19716 		}
19717 	      else if (in_index == edit_index
19718 		       || (in_index * 8 >= input_size
19719 			   && edit_index == UINT_MAX))
19720 		{
19721 		  switch (edit_node->type)
19722 		    {
19723 		    case DELETE_EXIDX_ENTRY:
19724 		      in_index++;
19725 		      add_to_offsets += 8;
19726 		      break;
19727 
19728 		    case INSERT_EXIDX_CANTUNWIND_AT_END:
19729 		      {
19730 			asection *text_sec = edit_node->linked_section;
19731 			bfd_vma text_offset = text_sec->output_section->vma
19732 					      + text_sec->output_offset
19733 					      + text_sec->size;
19734 			bfd_vma exidx_offset = offset + out_index * 8;
19735 			unsigned long prel31_offset;
19736 
19737 			/* Note: this is meant to be equivalent to an
19738 			   R_ARM_PREL31 relocation.  These synthetic
19739 			   EXIDX_CANTUNWIND markers are not relocated by the
19740 			   usual BFD method.  */
19741 			prel31_offset = (text_offset - exidx_offset)
19742 					& 0x7ffffffful;
19743 			if (bfd_link_relocatable (link_info))
19744 			  {
19745 			    /* Here relocation for new EXIDX_CANTUNWIND is
19746 			       created, so there is no need to
19747 			       adjust offset by hand.  */
19748 			    prel31_offset = text_sec->output_offset
19749 					    + text_sec->size;
19750 			  }
19751 
19752 			/* First address we can't unwind.  */
19753 			bfd_put_32 (output_bfd, prel31_offset,
19754 				    &edited_contents[out_index * 8]);
19755 
19756 			/* Code for EXIDX_CANTUNWIND.  */
19757 			bfd_put_32 (output_bfd, 0x1,
19758 				    &edited_contents[out_index * 8 + 4]);
19759 
19760 			out_index++;
19761 			add_to_offsets -= 8;
19762 		      }
19763 		      break;
19764 		    }
19765 
19766 		  edit_node = edit_node->next;
19767 		}
19768 	    }
19769 	  else
19770 	    {
19771 	      /* No more edits, copy remaining entries verbatim.  */
19772 	      copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
19773 				contents + in_index * 8, add_to_offsets);
19774 	      out_index++;
19775 	      in_index++;
19776 	    }
19777 	}
19778 
19779       if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
19780 	bfd_set_section_contents (output_bfd, sec->output_section,
19781 				  edited_contents,
19782 				  (file_ptr) sec->output_offset, sec->size);
19783 
19784       return TRUE;
19785     }
19786 
19787   /* Fix code to point to Cortex-A8 erratum stubs.  */
19788   if (globals->fix_cortex_a8)
19789     {
19790       struct a8_branch_to_stub_data data;
19791 
19792       data.writing_section = sec;
19793       data.contents = contents;
19794 
19795       bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
19796 			 & data);
19797     }
19798 
19799   if (mapcount == 0)
19800     return FALSE;
19801 
19802   if (globals->byteswap_code)
19803     {
19804       qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
19805 
19806       ptr = map[0].vma;
19807       for (i = 0; i < mapcount; i++)
19808 	{
19809 	  if (i == mapcount - 1)
19810 	    end = sec->size;
19811 	  else
19812 	    end = map[i + 1].vma;
19813 
19814 	  switch (map[i].type)
19815 	    {
19816 	    case 'a':
19817 	      /* Byte swap code words.  */
19818 	      while (ptr + 3 < end)
19819 		{
19820 		  tmp = contents[ptr];
19821 		  contents[ptr] = contents[ptr + 3];
19822 		  contents[ptr + 3] = tmp;
19823 		  tmp = contents[ptr + 1];
19824 		  contents[ptr + 1] = contents[ptr + 2];
19825 		  contents[ptr + 2] = tmp;
19826 		  ptr += 4;
19827 		}
19828 	      break;
19829 
19830 	    case 't':
19831 	      /* Byte swap code halfwords.  */
19832 	      while (ptr + 1 < end)
19833 		{
19834 		  tmp = contents[ptr];
19835 		  contents[ptr] = contents[ptr + 1];
19836 		  contents[ptr + 1] = tmp;
19837 		  ptr += 2;
19838 		}
19839 	      break;
19840 
19841 	    case 'd':
19842 	      /* Leave data alone.  */
19843 	      break;
19844 	    }
19845 	  ptr = end;
19846 	}
19847     }
19848 
19849   free (map);
19850   arm_data->mapcount = -1;
19851   arm_data->mapsize = 0;
19852   arm_data->map = NULL;
19853 
19854   return FALSE;
19855 }
19856 
19857 /* Mangle thumb function symbols as we read them in.  */
19858 
19859 static bfd_boolean
19860 elf32_arm_swap_symbol_in (bfd * abfd,
19861 			  const void *psrc,
19862 			  const void *pshn,
19863 			  Elf_Internal_Sym *dst)
19864 {
19865   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
19866     return FALSE;
19867   dst->st_target_internal = 0;
19868 
19869   /* New EABI objects mark thumb function symbols by setting the low bit of
19870      the address.  */
19871   if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
19872       || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
19873     {
19874       if (dst->st_value & 1)
19875 	{
19876 	  dst->st_value &= ~(bfd_vma) 1;
19877 	  ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
19878 				   ST_BRANCH_TO_THUMB);
19879 	}
19880       else
19881 	ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
19882     }
19883   else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
19884     {
19885       dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
19886       ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
19887     }
19888   else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
19889     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
19890   else
19891     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
19892 
19893   return TRUE;
19894 }
19895 
19896 
19897 /* Mangle thumb function symbols as we write them out.  */
19898 
19899 static void
19900 elf32_arm_swap_symbol_out (bfd *abfd,
19901 			   const Elf_Internal_Sym *src,
19902 			   void *cdst,
19903 			   void *shndx)
19904 {
19905   Elf_Internal_Sym newsym;
19906 
19907   /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
19908      of the address set, as per the new EABI.  We do this unconditionally
19909      because objcopy does not set the elf header flags until after
19910      it writes out the symbol table.  */
19911   if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
19912     {
19913       newsym = *src;
19914       if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
19915 	newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
19916       if (newsym.st_shndx != SHN_UNDEF)
19917 	{
19918 	  /* Do this only for defined symbols. At link type, the static
19919 	     linker will simulate the work of dynamic linker of resolving
19920 	     symbols and will carry over the thumbness of found symbols to
19921 	     the output symbol table. It's not clear how it happens, but
19922 	     the thumbness of undefined symbols can well be different at
19923 	     runtime, and writing '1' for them will be confusing for users
19924 	     and possibly for dynamic linker itself.
19925 	  */
19926 	  newsym.st_value |= 1;
19927 	}
19928 
19929       src = &newsym;
19930     }
19931   bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
19932 }
19933 
19934 /* Add the PT_ARM_EXIDX program header.  */
19935 
19936 static bfd_boolean
19937 elf32_arm_modify_segment_map (bfd *abfd,
19938 			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19939 {
19940   struct elf_segment_map *m;
19941   asection *sec;
19942 
19943   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19944   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19945     {
19946       /* If there is already a PT_ARM_EXIDX header, then we do not
19947 	 want to add another one.  This situation arises when running
19948 	 "strip"; the input binary already has the header.  */
19949       m = elf_seg_map (abfd);
19950       while (m && m->p_type != PT_ARM_EXIDX)
19951 	m = m->next;
19952       if (!m)
19953 	{
19954 	  m = (struct elf_segment_map *)
19955 	      bfd_zalloc (abfd, sizeof (struct elf_segment_map));
19956 	  if (m == NULL)
19957 	    return FALSE;
19958 	  m->p_type = PT_ARM_EXIDX;
19959 	  m->count = 1;
19960 	  m->sections[0] = sec;
19961 
19962 	  m->next = elf_seg_map (abfd);
19963 	  elf_seg_map (abfd) = m;
19964 	}
19965     }
19966 
19967   return TRUE;
19968 }
19969 
19970 /* We may add a PT_ARM_EXIDX program header.  */
19971 
19972 static int
19973 elf32_arm_additional_program_headers (bfd *abfd,
19974 				      struct bfd_link_info *info ATTRIBUTE_UNUSED)
19975 {
19976   asection *sec;
19977 
19978   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
19979   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
19980     return 1;
19981   else
19982     return 0;
19983 }
19984 
19985 /* Hook called by the linker routine which adds symbols from an object
19986    file.  */
19987 
19988 static bfd_boolean
19989 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
19990 			   Elf_Internal_Sym *sym, const char **namep,
19991 			   flagword *flagsp, asection **secp, bfd_vma *valp)
19992 {
19993   if (elf32_arm_hash_table (info) == NULL)
19994     return FALSE;
19995 
19996   if (elf32_arm_hash_table (info)->vxworks_p
19997       && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
19998 				       flagsp, secp, valp))
19999     return FALSE;
20000 
20001   return TRUE;
20002 }
20003 
20004 /* We use this to override swap_symbol_in and swap_symbol_out.  */
20005 const struct elf_size_info elf32_arm_size_info =
20006 {
20007   sizeof (Elf32_External_Ehdr),
20008   sizeof (Elf32_External_Phdr),
20009   sizeof (Elf32_External_Shdr),
20010   sizeof (Elf32_External_Rel),
20011   sizeof (Elf32_External_Rela),
20012   sizeof (Elf32_External_Sym),
20013   sizeof (Elf32_External_Dyn),
20014   sizeof (Elf_External_Note),
20015   4,
20016   1,
20017   32, 2,
20018   ELFCLASS32, EV_CURRENT,
20019   bfd_elf32_write_out_phdrs,
20020   bfd_elf32_write_shdrs_and_ehdr,
20021   bfd_elf32_checksum_contents,
20022   bfd_elf32_write_relocs,
20023   elf32_arm_swap_symbol_in,
20024   elf32_arm_swap_symbol_out,
20025   bfd_elf32_slurp_reloc_table,
20026   bfd_elf32_slurp_symbol_table,
20027   bfd_elf32_swap_dyn_in,
20028   bfd_elf32_swap_dyn_out,
20029   bfd_elf32_swap_reloc_in,
20030   bfd_elf32_swap_reloc_out,
20031   bfd_elf32_swap_reloca_in,
20032   bfd_elf32_swap_reloca_out
20033 };
20034 
20035 static bfd_vma
20036 read_code32 (const bfd *abfd, const bfd_byte *addr)
20037 {
20038   /* V7 BE8 code is always little endian.  */
20039   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20040     return bfd_getl32 (addr);
20041 
20042   return bfd_get_32 (abfd, addr);
20043 }
20044 
20045 static bfd_vma
20046 read_code16 (const bfd *abfd, const bfd_byte *addr)
20047 {
20048   /* V7 BE8 code is always little endian.  */
20049   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
20050     return bfd_getl16 (addr);
20051 
20052   return bfd_get_16 (abfd, addr);
20053 }
20054 
20055 /* Return size of plt0 entry starting at ADDR
20056    or (bfd_vma) -1 if size can not be determined.  */
20057 
20058 static bfd_vma
20059 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
20060 {
20061   bfd_vma first_word;
20062   bfd_vma plt0_size;
20063 
20064   first_word = read_code32 (abfd, addr);
20065 
20066   if (first_word == elf32_arm_plt0_entry[0])
20067     plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
20068   else if (first_word == elf32_thumb2_plt0_entry[0])
20069     plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
20070   else
20071     /* We don't yet handle this PLT format.  */
20072     return (bfd_vma) -1;
20073 
20074   return plt0_size;
20075 }
20076 
20077 /* Return size of plt entry starting at offset OFFSET
20078    of plt section located at address START
20079    or (bfd_vma) -1 if size can not be determined.  */
20080 
20081 static bfd_vma
20082 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
20083 {
20084   bfd_vma first_insn;
20085   bfd_vma plt_size = 0;
20086   const bfd_byte *addr = start + offset;
20087 
20088   /* PLT entry size if fixed on Thumb-only platforms.  */
20089   if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
20090       return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
20091 
20092   /* Respect Thumb stub if necessary.  */
20093   if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
20094     {
20095       plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
20096     }
20097 
20098   /* Strip immediate from first add.  */
20099   first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
20100 
20101 #ifdef FOUR_WORD_PLT
20102   if (first_insn == elf32_arm_plt_entry[0])
20103     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
20104 #else
20105   if (first_insn == elf32_arm_plt_entry_long[0])
20106     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
20107   else if (first_insn == elf32_arm_plt_entry_short[0])
20108     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
20109 #endif
20110   else
20111     /* We don't yet handle this PLT format.  */
20112     return (bfd_vma) -1;
20113 
20114   return plt_size;
20115 }
20116 
20117 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
20118 
20119 static long
20120 elf32_arm_get_synthetic_symtab (bfd *abfd,
20121 			       long symcount ATTRIBUTE_UNUSED,
20122 			       asymbol **syms ATTRIBUTE_UNUSED,
20123 			       long dynsymcount,
20124 			       asymbol **dynsyms,
20125 			       asymbol **ret)
20126 {
20127   asection *relplt;
20128   asymbol *s;
20129   arelent *p;
20130   long count, i, n;
20131   size_t size;
20132   Elf_Internal_Shdr *hdr;
20133   char *names;
20134   asection *plt;
20135   bfd_vma offset;
20136   bfd_byte *data;
20137 
20138   *ret = NULL;
20139 
20140   if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
20141     return 0;
20142 
20143   if (dynsymcount <= 0)
20144     return 0;
20145 
20146   relplt = bfd_get_section_by_name (abfd, ".rel.plt");
20147   if (relplt == NULL)
20148     return 0;
20149 
20150   hdr = &elf_section_data (relplt)->this_hdr;
20151   if (hdr->sh_link != elf_dynsymtab (abfd)
20152       || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
20153     return 0;
20154 
20155   plt = bfd_get_section_by_name (abfd, ".plt");
20156   if (plt == NULL)
20157     return 0;
20158 
20159   if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
20160     return -1;
20161 
20162   data = plt->contents;
20163   if (data == NULL)
20164     {
20165       if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
20166 	return -1;
20167       bfd_cache_section_contents((asection *) plt, data);
20168     }
20169 
20170   count = relplt->size / hdr->sh_entsize;
20171   size = count * sizeof (asymbol);
20172   p = relplt->relocation;
20173   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20174     {
20175       size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
20176       if (p->addend != 0)
20177 	size += sizeof ("+0x") - 1 + 8;
20178     }
20179 
20180   s = *ret = (asymbol *) bfd_malloc (size);
20181   if (s == NULL)
20182     return -1;
20183 
20184   offset = elf32_arm_plt0_size (abfd, data);
20185   if (offset == (bfd_vma) -1)
20186     return -1;
20187 
20188   names = (char *) (s + count);
20189   p = relplt->relocation;
20190   n = 0;
20191   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
20192     {
20193       size_t len;
20194 
20195       bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
20196       if (plt_size == (bfd_vma) -1)
20197 	break;
20198 
20199       *s = **p->sym_ptr_ptr;
20200       /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
20201 	 we are defining a symbol, ensure one of them is set.  */
20202       if ((s->flags & BSF_LOCAL) == 0)
20203 	s->flags |= BSF_GLOBAL;
20204       s->flags |= BSF_SYNTHETIC;
20205       s->section = plt;
20206       s->value = offset;
20207       s->name = names;
20208       s->udata.p = NULL;
20209       len = strlen ((*p->sym_ptr_ptr)->name);
20210       memcpy (names, (*p->sym_ptr_ptr)->name, len);
20211       names += len;
20212       if (p->addend != 0)
20213 	{
20214 	  char buf[30], *a;
20215 
20216 	  memcpy (names, "+0x", sizeof ("+0x") - 1);
20217 	  names += sizeof ("+0x") - 1;
20218 	  bfd_sprintf_vma (abfd, buf, p->addend);
20219 	  for (a = buf; *a == '0'; ++a)
20220 	    ;
20221 	  len = strlen (a);
20222 	  memcpy (names, a, len);
20223 	  names += len;
20224 	}
20225       memcpy (names, "@plt", sizeof ("@plt"));
20226       names += sizeof ("@plt");
20227       ++s, ++n;
20228       offset += plt_size;
20229     }
20230 
20231   return n;
20232 }
20233 
20234 static bfd_boolean
20235 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
20236 {
20237   if (hdr->sh_flags & SHF_ARM_PURECODE)
20238     *flags |= SEC_ELF_PURECODE;
20239   return TRUE;
20240 }
20241 
20242 static flagword
20243 elf32_arm_lookup_section_flags (char *flag_name)
20244 {
20245   if (!strcmp (flag_name, "SHF_ARM_PURECODE"))
20246     return SHF_ARM_PURECODE;
20247 
20248   return SEC_NO_FLAGS;
20249 }
20250 
20251 static unsigned int
20252 elf32_arm_count_additional_relocs (asection *sec)
20253 {
20254   struct _arm_elf_section_data *arm_data;
20255   arm_data = get_arm_elf_section_data (sec);
20256 
20257   return arm_data == NULL ? 0 : arm_data->additional_reloc_count;
20258 }
20259 
20260 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
20261    has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised
20262    FALSE otherwise.  ISECTION is the best guess matching section from the
20263    input bfd IBFD, but it might be NULL.  */
20264 
20265 static bfd_boolean
20266 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
20267 				       bfd *obfd ATTRIBUTE_UNUSED,
20268 				       const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
20269 				       Elf_Internal_Shdr *osection)
20270 {
20271   switch (osection->sh_type)
20272     {
20273     case SHT_ARM_EXIDX:
20274       {
20275 	Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
20276 	Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
20277 	unsigned i = 0;
20278 
20279 	osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
20280 	osection->sh_info = 0;
20281 
20282 	/* The sh_link field must be set to the text section associated with
20283 	   this index section.  Unfortunately the ARM EHABI does not specify
20284 	   exactly how to determine this association.  Our caller does try
20285 	   to match up OSECTION with its corresponding input section however
20286 	   so that is a good first guess.  */
20287 	if (isection != NULL
20288 	    && osection->bfd_section != NULL
20289 	    && isection->bfd_section != NULL
20290 	    && isection->bfd_section->output_section != NULL
20291 	    && isection->bfd_section->output_section == osection->bfd_section
20292 	    && iheaders != NULL
20293 	    && isection->sh_link > 0
20294 	    && isection->sh_link < elf_numsections (ibfd)
20295 	    && iheaders[isection->sh_link]->bfd_section != NULL
20296 	    && iheaders[isection->sh_link]->bfd_section->output_section != NULL
20297 	    )
20298 	  {
20299 	    for (i = elf_numsections (obfd); i-- > 0;)
20300 	      if (oheaders[i]->bfd_section
20301 		  == iheaders[isection->sh_link]->bfd_section->output_section)
20302 		break;
20303 	  }
20304 
20305 	if (i == 0)
20306 	  {
20307 	    /* Failing that we have to find a matching section ourselves.  If
20308 	       we had the output section name available we could compare that
20309 	       with input section names.  Unfortunately we don't.  So instead
20310 	       we use a simple heuristic and look for the nearest executable
20311 	       section before this one.  */
20312 	    for (i = elf_numsections (obfd); i-- > 0;)
20313 	      if (oheaders[i] == osection)
20314 		break;
20315 	    if (i == 0)
20316 	      break;
20317 
20318 	    while (i-- > 0)
20319 	      if (oheaders[i]->sh_type == SHT_PROGBITS
20320 		  && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
20321 		  == (SHF_ALLOC | SHF_EXECINSTR))
20322 		break;
20323 	  }
20324 
20325 	if (i)
20326 	  {
20327 	    osection->sh_link = i;
20328 	    /* If the text section was part of a group
20329 	       then the index section should be too.  */
20330 	    if (oheaders[i]->sh_flags & SHF_GROUP)
20331 	      osection->sh_flags |= SHF_GROUP;
20332 	    return TRUE;
20333 	  }
20334       }
20335       break;
20336 
20337     case SHT_ARM_PREEMPTMAP:
20338       osection->sh_flags = SHF_ALLOC;
20339       break;
20340 
20341     case SHT_ARM_ATTRIBUTES:
20342     case SHT_ARM_DEBUGOVERLAY:
20343     case SHT_ARM_OVERLAYSECTION:
20344     default:
20345       break;
20346     }
20347 
20348   return FALSE;
20349 }
20350 
20351 /* Returns TRUE if NAME is an ARM mapping symbol.
20352    Traditionally the symbols $a, $d and $t have been used.
20353    The ARM ELF standard also defines $x (for A64 code).  It also allows a
20354    period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
20355    Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
20356    not support them here.  $t.x indicates the start of ThumbEE instructions.  */
20357 
20358 static bfd_boolean
20359 is_arm_mapping_symbol (const char * name)
20360 {
20361   return name != NULL /* Paranoia.  */
20362     && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
20363 			 the mapping symbols could have acquired a prefix.
20364 			 We do not support this here, since such symbols no
20365 			 longer conform to the ARM ELF ABI.  */
20366     && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
20367     && (name[2] == 0 || name[2] == '.');
20368   /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
20369      any characters that follow the period are legal characters for the body
20370      of a symbol's name.  For now we just assume that this is the case.  */
20371 }
20372 
20373 /* Make sure that mapping symbols in object files are not removed via the
20374    "strip --strip-unneeded" tool.  These symbols are needed in order to
20375    correctly generate interworking veneers, and for byte swapping code
20376    regions.  Once an object file has been linked, it is safe to remove the
20377    symbols as they will no longer be needed.  */
20378 
20379 static void
20380 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
20381 {
20382   if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
20383       && sym->section != bfd_abs_section_ptr
20384       && is_arm_mapping_symbol (sym->name))
20385     sym->flags |= BSF_KEEP;
20386 }
20387 
20388 #undef  elf_backend_copy_special_section_fields
20389 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
20390 
20391 #define ELF_ARCH			bfd_arch_arm
20392 #define ELF_TARGET_ID			ARM_ELF_DATA
20393 #define ELF_MACHINE_CODE		EM_ARM
20394 #ifdef __QNXTARGET__
20395 #define ELF_MAXPAGESIZE			0x1000
20396 #else
20397 #define ELF_MAXPAGESIZE			0x10000
20398 #endif
20399 #define ELF_MINPAGESIZE			0x1000
20400 #define ELF_COMMONPAGESIZE		0x1000
20401 
20402 #define bfd_elf32_mkobject			elf32_arm_mkobject
20403 
20404 #define bfd_elf32_bfd_copy_private_bfd_data	elf32_arm_copy_private_bfd_data
20405 #define bfd_elf32_bfd_merge_private_bfd_data	elf32_arm_merge_private_bfd_data
20406 #define bfd_elf32_bfd_set_private_flags		elf32_arm_set_private_flags
20407 #define bfd_elf32_bfd_print_private_bfd_data	elf32_arm_print_private_bfd_data
20408 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_link_hash_table_create
20409 #define bfd_elf32_bfd_reloc_type_lookup		elf32_arm_reloc_type_lookup
20410 #define bfd_elf32_bfd_reloc_name_lookup		elf32_arm_reloc_name_lookup
20411 #define bfd_elf32_find_inliner_info		elf32_arm_find_inliner_info
20412 #define bfd_elf32_new_section_hook		elf32_arm_new_section_hook
20413 #define bfd_elf32_bfd_is_target_special_symbol	elf32_arm_is_target_special_symbol
20414 #define bfd_elf32_bfd_final_link		elf32_arm_final_link
20415 #define bfd_elf32_get_synthetic_symtab	elf32_arm_get_synthetic_symtab
20416 
20417 #define elf_backend_get_symbol_type		elf32_arm_get_symbol_type
20418 #define elf_backend_maybe_function_sym		elf32_arm_maybe_function_sym
20419 #define elf_backend_gc_mark_hook		elf32_arm_gc_mark_hook
20420 #define elf_backend_gc_mark_extra_sections	elf32_arm_gc_mark_extra_sections
20421 #define elf_backend_check_relocs		elf32_arm_check_relocs
20422 #define elf_backend_update_relocs		elf32_arm_update_relocs
20423 #define elf_backend_relocate_section		elf32_arm_relocate_section
20424 #define elf_backend_write_section		elf32_arm_write_section
20425 #define elf_backend_adjust_dynamic_symbol	elf32_arm_adjust_dynamic_symbol
20426 #define elf_backend_create_dynamic_sections	elf32_arm_create_dynamic_sections
20427 #define elf_backend_finish_dynamic_symbol	elf32_arm_finish_dynamic_symbol
20428 #define elf_backend_finish_dynamic_sections	elf32_arm_finish_dynamic_sections
20429 #define elf_backend_size_dynamic_sections	elf32_arm_size_dynamic_sections
20430 #define elf_backend_always_size_sections	elf32_arm_always_size_sections
20431 #define elf_backend_init_index_section		_bfd_elf_init_2_index_sections
20432 #define elf_backend_init_file_header		elf32_arm_init_file_header
20433 #define elf_backend_reloc_type_class		elf32_arm_reloc_type_class
20434 #define elf_backend_object_p			elf32_arm_object_p
20435 #define elf_backend_fake_sections		elf32_arm_fake_sections
20436 #define elf_backend_section_from_shdr		elf32_arm_section_from_shdr
20437 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20438 #define elf_backend_copy_indirect_symbol	elf32_arm_copy_indirect_symbol
20439 #define elf_backend_size_info			elf32_arm_size_info
20440 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20441 #define elf_backend_additional_program_headers	elf32_arm_additional_program_headers
20442 #define elf_backend_output_arch_local_syms	elf32_arm_output_arch_local_syms
20443 #define elf_backend_filter_implib_symbols	elf32_arm_filter_implib_symbols
20444 #define elf_backend_begin_write_processing	elf32_arm_begin_write_processing
20445 #define elf_backend_add_symbol_hook		elf32_arm_add_symbol_hook
20446 #define elf_backend_count_additional_relocs	elf32_arm_count_additional_relocs
20447 #define elf_backend_symbol_processing		elf32_arm_backend_symbol_processing
20448 
20449 #define elf_backend_can_refcount       1
20450 #define elf_backend_can_gc_sections    1
20451 #define elf_backend_plt_readonly       1
20452 #define elf_backend_want_got_plt       1
20453 #define elf_backend_want_plt_sym       0
20454 #define elf_backend_want_dynrelro      1
20455 #define elf_backend_may_use_rel_p      1
20456 #define elf_backend_may_use_rela_p     0
20457 #define elf_backend_default_use_rela_p 0
20458 #define elf_backend_dtrel_excludes_plt 1
20459 
20460 #define elf_backend_got_header_size	12
20461 #define elf_backend_extern_protected_data 1
20462 
20463 #undef	elf_backend_obj_attrs_vendor
20464 #define elf_backend_obj_attrs_vendor		"aeabi"
20465 #undef	elf_backend_obj_attrs_section
20466 #define elf_backend_obj_attrs_section		".ARM.attributes"
20467 #undef	elf_backend_obj_attrs_arg_type
20468 #define elf_backend_obj_attrs_arg_type		elf32_arm_obj_attrs_arg_type
20469 #undef	elf_backend_obj_attrs_section_type
20470 #define elf_backend_obj_attrs_section_type	SHT_ARM_ATTRIBUTES
20471 #define elf_backend_obj_attrs_order		elf32_arm_obj_attrs_order
20472 #define elf_backend_obj_attrs_handle_unknown	elf32_arm_obj_attrs_handle_unknown
20473 
20474 #undef	elf_backend_section_flags
20475 #define elf_backend_section_flags		elf32_arm_section_flags
20476 #undef	elf_backend_lookup_section_flags_hook
20477 #define elf_backend_lookup_section_flags_hook	elf32_arm_lookup_section_flags
20478 
20479 #define elf_backend_linux_prpsinfo32_ugid16	TRUE
20480 
20481 #include "elf32-target.h"
20482 
20483 /* Native Client targets.  */
20484 
20485 #undef	TARGET_LITTLE_SYM
20486 #define TARGET_LITTLE_SYM		arm_elf32_nacl_le_vec
20487 #undef	TARGET_LITTLE_NAME
20488 #define TARGET_LITTLE_NAME		"elf32-littlearm-nacl"
20489 #undef	TARGET_BIG_SYM
20490 #define TARGET_BIG_SYM			arm_elf32_nacl_be_vec
20491 #undef	TARGET_BIG_NAME
20492 #define TARGET_BIG_NAME			"elf32-bigarm-nacl"
20493 
20494 /* Like elf32_arm_link_hash_table_create -- but overrides
20495    appropriately for NaCl.  */
20496 
20497 static struct bfd_link_hash_table *
20498 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
20499 {
20500   struct bfd_link_hash_table *ret;
20501 
20502   ret = elf32_arm_link_hash_table_create (abfd);
20503   if (ret)
20504     {
20505       struct elf32_arm_link_hash_table *htab
20506 	= (struct elf32_arm_link_hash_table *) ret;
20507 
20508       htab->nacl_p = 1;
20509 
20510       htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
20511       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
20512     }
20513   return ret;
20514 }
20515 
20516 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
20517    really need to use elf32_arm_modify_segment_map.  But we do it
20518    anyway just to reduce gratuitous differences with the stock ARM backend.  */
20519 
20520 static bfd_boolean
20521 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
20522 {
20523   return (elf32_arm_modify_segment_map (abfd, info)
20524 	  && nacl_modify_segment_map (abfd, info));
20525 }
20526 
20527 static bfd_boolean
20528 elf32_arm_nacl_final_write_processing (bfd *abfd)
20529 {
20530   arm_final_write_processing (abfd);
20531   return nacl_final_write_processing (abfd);
20532 }
20533 
20534 static bfd_vma
20535 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
20536 			    const arelent *rel ATTRIBUTE_UNUSED)
20537 {
20538   return plt->vma
20539     + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
20540 	   i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
20541 }
20542 
20543 #undef	elf32_bed
20544 #define elf32_bed				elf32_arm_nacl_bed
20545 #undef  bfd_elf32_bfd_link_hash_table_create
20546 #define bfd_elf32_bfd_link_hash_table_create	\
20547   elf32_arm_nacl_link_hash_table_create
20548 #undef	elf_backend_plt_alignment
20549 #define elf_backend_plt_alignment		4
20550 #undef	elf_backend_modify_segment_map
20551 #define	elf_backend_modify_segment_map		elf32_arm_nacl_modify_segment_map
20552 #undef	elf_backend_modify_headers
20553 #define	elf_backend_modify_headers		nacl_modify_headers
20554 #undef  elf_backend_final_write_processing
20555 #define elf_backend_final_write_processing	elf32_arm_nacl_final_write_processing
20556 #undef bfd_elf32_get_synthetic_symtab
20557 #undef  elf_backend_plt_sym_val
20558 #define elf_backend_plt_sym_val			elf32_arm_nacl_plt_sym_val
20559 #undef  elf_backend_copy_special_section_fields
20560 
20561 #undef	ELF_MINPAGESIZE
20562 #undef	ELF_COMMONPAGESIZE
20563 
20564 
20565 #include "elf32-target.h"
20566 
20567 /* Reset to defaults.  */
20568 #undef	elf_backend_plt_alignment
20569 #undef	elf_backend_modify_segment_map
20570 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
20571 #undef	elf_backend_modify_headers
20572 #undef  elf_backend_final_write_processing
20573 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
20574 #undef	ELF_MINPAGESIZE
20575 #define ELF_MINPAGESIZE			0x1000
20576 #undef	ELF_COMMONPAGESIZE
20577 #define ELF_COMMONPAGESIZE		0x1000
20578 
20579 
20580 /* FDPIC Targets.  */
20581 
20582 #undef  TARGET_LITTLE_SYM
20583 #define TARGET_LITTLE_SYM		arm_elf32_fdpic_le_vec
20584 #undef  TARGET_LITTLE_NAME
20585 #define TARGET_LITTLE_NAME		"elf32-littlearm-fdpic"
20586 #undef  TARGET_BIG_SYM
20587 #define TARGET_BIG_SYM			arm_elf32_fdpic_be_vec
20588 #undef  TARGET_BIG_NAME
20589 #define TARGET_BIG_NAME			"elf32-bigarm-fdpic"
20590 #undef elf_match_priority
20591 #define elf_match_priority		128
20592 #undef ELF_OSABI
20593 #define ELF_OSABI		ELFOSABI_ARM_FDPIC
20594 
20595 /* Like elf32_arm_link_hash_table_create -- but overrides
20596    appropriately for FDPIC.  */
20597 
20598 static struct bfd_link_hash_table *
20599 elf32_arm_fdpic_link_hash_table_create (bfd *abfd)
20600 {
20601   struct bfd_link_hash_table *ret;
20602 
20603   ret = elf32_arm_link_hash_table_create (abfd);
20604   if (ret)
20605     {
20606       struct elf32_arm_link_hash_table *htab = (struct elf32_arm_link_hash_table *) ret;
20607 
20608       htab->fdpic_p = 1;
20609     }
20610   return ret;
20611 }
20612 
20613 /* We need dynamic symbols for every section, since segments can
20614    relocate independently.  */
20615 static bfd_boolean
20616 elf32_arm_fdpic_omit_section_dynsym (bfd *output_bfd ATTRIBUTE_UNUSED,
20617 				    struct bfd_link_info *info
20618 				    ATTRIBUTE_UNUSED,
20619 				    asection *p ATTRIBUTE_UNUSED)
20620 {
20621   switch (elf_section_data (p)->this_hdr.sh_type)
20622     {
20623     case SHT_PROGBITS:
20624     case SHT_NOBITS:
20625       /* If sh_type is yet undecided, assume it could be
20626 	 SHT_PROGBITS/SHT_NOBITS.  */
20627     case SHT_NULL:
20628       return FALSE;
20629 
20630       /* There shouldn't be section relative relocations
20631 	 against any other section.  */
20632     default:
20633       return TRUE;
20634     }
20635 }
20636 
20637 #undef  elf32_bed
20638 #define elf32_bed				elf32_arm_fdpic_bed
20639 
20640 #undef  bfd_elf32_bfd_link_hash_table_create
20641 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_fdpic_link_hash_table_create
20642 
20643 #undef elf_backend_omit_section_dynsym
20644 #define elf_backend_omit_section_dynsym		elf32_arm_fdpic_omit_section_dynsym
20645 
20646 #include "elf32-target.h"
20647 
20648 #undef elf_match_priority
20649 #undef ELF_OSABI
20650 #undef elf_backend_omit_section_dynsym
20651 
20652 /* VxWorks Targets.  */
20653 
20654 #undef	TARGET_LITTLE_SYM
20655 #define TARGET_LITTLE_SYM		arm_elf32_vxworks_le_vec
20656 #undef	TARGET_LITTLE_NAME
20657 #define TARGET_LITTLE_NAME		"elf32-littlearm-vxworks"
20658 #undef	TARGET_BIG_SYM
20659 #define TARGET_BIG_SYM			arm_elf32_vxworks_be_vec
20660 #undef	TARGET_BIG_NAME
20661 #define TARGET_BIG_NAME			"elf32-bigarm-vxworks"
20662 
20663 /* Like elf32_arm_link_hash_table_create -- but overrides
20664    appropriately for VxWorks.  */
20665 
20666 static struct bfd_link_hash_table *
20667 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
20668 {
20669   struct bfd_link_hash_table *ret;
20670 
20671   ret = elf32_arm_link_hash_table_create (abfd);
20672   if (ret)
20673     {
20674       struct elf32_arm_link_hash_table *htab
20675 	= (struct elf32_arm_link_hash_table *) ret;
20676       htab->use_rel = 0;
20677       htab->vxworks_p = 1;
20678     }
20679   return ret;
20680 }
20681 
20682 static bfd_boolean
20683 elf32_arm_vxworks_final_write_processing (bfd *abfd)
20684 {
20685   arm_final_write_processing (abfd);
20686   return elf_vxworks_final_write_processing (abfd);
20687 }
20688 
20689 #undef  elf32_bed
20690 #define elf32_bed elf32_arm_vxworks_bed
20691 
20692 #undef  bfd_elf32_bfd_link_hash_table_create
20693 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_vxworks_link_hash_table_create
20694 #undef  elf_backend_final_write_processing
20695 #define elf_backend_final_write_processing	elf32_arm_vxworks_final_write_processing
20696 #undef  elf_backend_emit_relocs
20697 #define elf_backend_emit_relocs			elf_vxworks_emit_relocs
20698 
20699 #undef  elf_backend_may_use_rel_p
20700 #define elf_backend_may_use_rel_p	0
20701 #undef  elf_backend_may_use_rela_p
20702 #define elf_backend_may_use_rela_p	1
20703 #undef  elf_backend_default_use_rela_p
20704 #define elf_backend_default_use_rela_p	1
20705 #undef  elf_backend_want_plt_sym
20706 #define elf_backend_want_plt_sym	1
20707 #undef  ELF_MAXPAGESIZE
20708 #define ELF_MAXPAGESIZE			0x1000
20709 
20710 #include "elf32-target.h"
20711 
20712 
20713 /* Merge backend specific data from an object file to the output
20714    object file when linking.  */
20715 
20716 static bfd_boolean
20717 elf32_arm_merge_private_bfd_data (bfd *ibfd, struct bfd_link_info *info)
20718 {
20719   bfd *obfd = info->output_bfd;
20720   flagword out_flags;
20721   flagword in_flags;
20722   bfd_boolean flags_compatible = TRUE;
20723   asection *sec;
20724 
20725   /* Check if we have the same endianness.  */
20726   if (! _bfd_generic_verify_endian_match (ibfd, info))
20727     return FALSE;
20728 
20729   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
20730     return TRUE;
20731 
20732   if (!elf32_arm_merge_eabi_attributes (ibfd, info))
20733     return FALSE;
20734 
20735   /* The input BFD must have had its flags initialised.  */
20736   /* The following seems bogus to me -- The flags are initialized in
20737      the assembler but I don't think an elf_flags_init field is
20738      written into the object.  */
20739   /* BFD_ASSERT (elf_flags_init (ibfd)); */
20740 
20741   in_flags  = elf_elfheader (ibfd)->e_flags;
20742   out_flags = elf_elfheader (obfd)->e_flags;
20743 
20744   /* In theory there is no reason why we couldn't handle this.  However
20745      in practice it isn't even close to working and there is no real
20746      reason to want it.  */
20747   if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
20748       && !(ibfd->flags & DYNAMIC)
20749       && (in_flags & EF_ARM_BE8))
20750     {
20751       _bfd_error_handler (_("error: %pB is already in final BE8 format"),
20752 			  ibfd);
20753       return FALSE;
20754     }
20755 
20756   if (!elf_flags_init (obfd))
20757     {
20758       /* If the input is the default architecture and had the default
20759 	 flags then do not bother setting the flags for the output
20760 	 architecture, instead allow future merges to do this.  If no
20761 	 future merges ever set these flags then they will retain their
20762 	 uninitialised values, which surprise surprise, correspond
20763 	 to the default values.  */
20764       if (bfd_get_arch_info (ibfd)->the_default
20765 	  && elf_elfheader (ibfd)->e_flags == 0)
20766 	return TRUE;
20767 
20768       elf_flags_init (obfd) = TRUE;
20769       elf_elfheader (obfd)->e_flags = in_flags;
20770 
20771       if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
20772 	  && bfd_get_arch_info (obfd)->the_default)
20773 	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
20774 
20775       return TRUE;
20776     }
20777 
20778   /* Determine what should happen if the input ARM architecture
20779      does not match the output ARM architecture.  */
20780   if (! bfd_arm_merge_machines (ibfd, obfd))
20781     return FALSE;
20782 
20783   /* Identical flags must be compatible.  */
20784   if (in_flags == out_flags)
20785     return TRUE;
20786 
20787   /* Check to see if the input BFD actually contains any sections.  If
20788      not, its flags may not have been initialised either, but it
20789      cannot actually cause any incompatiblity.  Do not short-circuit
20790      dynamic objects; their section list may be emptied by
20791     elf_link_add_object_symbols.
20792 
20793     Also check to see if there are no code sections in the input.
20794     In this case there is no need to check for code specific flags.
20795     XXX - do we need to worry about floating-point format compatability
20796     in data sections ?  */
20797   if (!(ibfd->flags & DYNAMIC))
20798     {
20799       bfd_boolean null_input_bfd = TRUE;
20800       bfd_boolean only_data_sections = TRUE;
20801 
20802       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
20803 	{
20804 	  /* Ignore synthetic glue sections.  */
20805 	  if (strcmp (sec->name, ".glue_7")
20806 	      && strcmp (sec->name, ".glue_7t"))
20807 	    {
20808 	      if ((bfd_section_flags (sec)
20809 		   & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20810 		  == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
20811 		only_data_sections = FALSE;
20812 
20813 	      null_input_bfd = FALSE;
20814 	      break;
20815 	    }
20816 	}
20817 
20818       if (null_input_bfd || only_data_sections)
20819 	return TRUE;
20820     }
20821 
20822   /* Complain about various flag mismatches.  */
20823   if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
20824 				      EF_ARM_EABI_VERSION (out_flags)))
20825     {
20826       _bfd_error_handler
20827 	(_("error: source object %pB has EABI version %d, but target %pB has EABI version %d"),
20828 	 ibfd, (in_flags & EF_ARM_EABIMASK) >> 24,
20829 	 obfd, (out_flags & EF_ARM_EABIMASK) >> 24);
20830       return FALSE;
20831     }
20832 
20833   /* Not sure what needs to be checked for EABI versions >= 1.  */
20834   /* VxWorks libraries do not use these flags.  */
20835   if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
20836       && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
20837       && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
20838     {
20839       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
20840 	{
20841 	  _bfd_error_handler
20842 	    (_("error: %pB is compiled for APCS-%d, whereas target %pB uses APCS-%d"),
20843 	     ibfd, in_flags & EF_ARM_APCS_26 ? 26 : 32,
20844 	     obfd, out_flags & EF_ARM_APCS_26 ? 26 : 32);
20845 	  flags_compatible = FALSE;
20846 	}
20847 
20848       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
20849 	{
20850 	  if (in_flags & EF_ARM_APCS_FLOAT)
20851 	    _bfd_error_handler
20852 	      (_("error: %pB passes floats in float registers, whereas %pB passes them in integer registers"),
20853 	       ibfd, obfd);
20854 	  else
20855 	    _bfd_error_handler
20856 	      (_("error: %pB passes floats in integer registers, whereas %pB passes them in float registers"),
20857 	       ibfd, obfd);
20858 
20859 	  flags_compatible = FALSE;
20860 	}
20861 
20862       if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
20863 	{
20864 	  if (in_flags & EF_ARM_VFP_FLOAT)
20865 	    _bfd_error_handler
20866 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20867 	       ibfd, "VFP", obfd);
20868 	  else
20869 	    _bfd_error_handler
20870 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20871 	       ibfd, "FPA", obfd);
20872 
20873 	  flags_compatible = FALSE;
20874 	}
20875 
20876       if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
20877 	{
20878 	  if (in_flags & EF_ARM_MAVERICK_FLOAT)
20879 	    _bfd_error_handler
20880 	      (_("error: %pB uses %s instructions, whereas %pB does not"),
20881 	       ibfd, "Maverick", obfd);
20882 	  else
20883 	    _bfd_error_handler
20884 	      (_("error: %pB does not use %s instructions, whereas %pB does"),
20885 	       ibfd, "Maverick", obfd);
20886 
20887 	  flags_compatible = FALSE;
20888 	}
20889 
20890 #ifdef EF_ARM_SOFT_FLOAT
20891       if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
20892 	{
20893 	  /* We can allow interworking between code that is VFP format
20894 	     layout, and uses either soft float or integer regs for
20895 	     passing floating point arguments and results.  We already
20896 	     know that the APCS_FLOAT flags match; similarly for VFP
20897 	     flags.  */
20898 	  if ((in_flags & EF_ARM_APCS_FLOAT) != 0
20899 	      || (in_flags & EF_ARM_VFP_FLOAT) == 0)
20900 	    {
20901 	      if (in_flags & EF_ARM_SOFT_FLOAT)
20902 		_bfd_error_handler
20903 		  (_("error: %pB uses software FP, whereas %pB uses hardware FP"),
20904 		   ibfd, obfd);
20905 	      else
20906 		_bfd_error_handler
20907 		  (_("error: %pB uses hardware FP, whereas %pB uses software FP"),
20908 		   ibfd, obfd);
20909 
20910 	      flags_compatible = FALSE;
20911 	    }
20912 	}
20913 #endif
20914 
20915       /* Interworking mismatch is only a warning.  */
20916       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
20917 	{
20918 	  if (in_flags & EF_ARM_INTERWORK)
20919 	    {
20920 	      _bfd_error_handler
20921 		(_("warning: %pB supports interworking, whereas %pB does not"),
20922 		 ibfd, obfd);
20923 	    }
20924 	  else
20925 	    {
20926 	      _bfd_error_handler
20927 		(_("warning: %pB does not support interworking, whereas %pB does"),
20928 		 ibfd, obfd);
20929 	    }
20930 	}
20931     }
20932 
20933   return flags_compatible;
20934 }
20935 
20936 
20937 /* Symbian OS Targets.  */
20938 
20939 #undef	TARGET_LITTLE_SYM
20940 #define TARGET_LITTLE_SYM		arm_elf32_symbian_le_vec
20941 #undef	TARGET_LITTLE_NAME
20942 #define TARGET_LITTLE_NAME		"elf32-littlearm-symbian"
20943 #undef	TARGET_BIG_SYM
20944 #define TARGET_BIG_SYM			arm_elf32_symbian_be_vec
20945 #undef	TARGET_BIG_NAME
20946 #define TARGET_BIG_NAME			"elf32-bigarm-symbian"
20947 
20948 /* Like elf32_arm_link_hash_table_create -- but overrides
20949    appropriately for Symbian OS.  */
20950 
20951 static struct bfd_link_hash_table *
20952 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
20953 {
20954   struct bfd_link_hash_table *ret;
20955 
20956   ret = elf32_arm_link_hash_table_create (abfd);
20957   if (ret)
20958     {
20959       struct elf32_arm_link_hash_table *htab
20960 	= (struct elf32_arm_link_hash_table *)ret;
20961       /* There is no PLT header for Symbian OS.  */
20962       htab->plt_header_size = 0;
20963       /* The PLT entries are each one instruction and one word.  */
20964       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
20965       htab->symbian_p = 1;
20966       /* Symbian uses armv5t or above, so use_blx is always true.  */
20967       htab->use_blx = 1;
20968       htab->root.is_relocatable_executable = 1;
20969     }
20970   return ret;
20971 }
20972 
20973 static const struct bfd_elf_special_section
20974 elf32_arm_symbian_special_sections[] =
20975 {
20976   /* In a BPABI executable, the dynamic linking sections do not go in
20977      the loadable read-only segment.  The post-linker may wish to
20978      refer to these sections, but they are not part of the final
20979      program image.  */
20980   { STRING_COMMA_LEN (".dynamic"),	 0, SHT_DYNAMIC,  0 },
20981   { STRING_COMMA_LEN (".dynstr"),	 0, SHT_STRTAB,	  0 },
20982   { STRING_COMMA_LEN (".dynsym"),	 0, SHT_DYNSYM,	  0 },
20983   { STRING_COMMA_LEN (".got"),		 0, SHT_PROGBITS, 0 },
20984   { STRING_COMMA_LEN (".hash"),		 0, SHT_HASH,	  0 },
20985   /* These sections do not need to be writable as the SymbianOS
20986      postlinker will arrange things so that no dynamic relocation is
20987      required.  */
20988   { STRING_COMMA_LEN (".init_array"),	 0, SHT_INIT_ARRAY,    SHF_ALLOC },
20989   { STRING_COMMA_LEN (".fini_array"),	 0, SHT_FINI_ARRAY,    SHF_ALLOC },
20990   { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
20991   { NULL,			      0, 0, 0,		       0 }
20992 };
20993 
20994 static void
20995 elf32_arm_symbian_begin_write_processing (bfd *abfd,
20996 					  struct bfd_link_info *link_info)
20997 {
20998   /* BPABI objects are never loaded directly by an OS kernel; they are
20999      processed by a postlinker first, into an OS-specific format.  If
21000      the D_PAGED bit is set on the file, BFD will align segments on
21001      page boundaries, so that an OS can directly map the file.  With
21002      BPABI objects, that just results in wasted space.  In addition,
21003      because we clear the D_PAGED bit, map_sections_to_segments will
21004      recognize that the program headers should not be mapped into any
21005      loadable segment.  */
21006   abfd->flags &= ~D_PAGED;
21007   elf32_arm_begin_write_processing (abfd, link_info);
21008 }
21009 
21010 static bfd_boolean
21011 elf32_arm_symbian_modify_segment_map (bfd *abfd,
21012 				      struct bfd_link_info *info)
21013 {
21014   struct elf_segment_map *m;
21015   asection *dynsec;
21016 
21017   /* BPABI shared libraries and executables should have a PT_DYNAMIC
21018      segment.  However, because the .dynamic section is not marked
21019      with SEC_LOAD, the generic ELF code will not create such a
21020      segment.  */
21021   dynsec = bfd_get_section_by_name (abfd, ".dynamic");
21022   if (dynsec)
21023     {
21024       for (m = elf_seg_map (abfd); m != NULL; m = m->next)
21025 	if (m->p_type == PT_DYNAMIC)
21026 	  break;
21027 
21028       if (m == NULL)
21029 	{
21030 	  m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
21031 	  m->next = elf_seg_map (abfd);
21032 	  elf_seg_map (abfd) = m;
21033 	}
21034     }
21035 
21036   /* Also call the generic arm routine.  */
21037   return elf32_arm_modify_segment_map (abfd, info);
21038 }
21039 
21040 /* Return address for Ith PLT stub in section PLT, for relocation REL
21041    or (bfd_vma) -1 if it should not be included.  */
21042 
21043 static bfd_vma
21044 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
21045 			       const arelent *rel ATTRIBUTE_UNUSED)
21046 {
21047   return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
21048 }
21049 
21050 #undef  elf32_bed
21051 #define elf32_bed elf32_arm_symbian_bed
21052 
21053 /* The dynamic sections are not allocated on SymbianOS; the postlinker
21054    will process them and then discard them.  */
21055 #undef  ELF_DYNAMIC_SEC_FLAGS
21056 #define ELF_DYNAMIC_SEC_FLAGS \
21057   (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
21058 
21059 #undef elf_backend_emit_relocs
21060 
21061 #undef  bfd_elf32_bfd_link_hash_table_create
21062 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_symbian_link_hash_table_create
21063 #undef  elf_backend_special_sections
21064 #define elf_backend_special_sections		elf32_arm_symbian_special_sections
21065 #undef  elf_backend_begin_write_processing
21066 #define elf_backend_begin_write_processing	elf32_arm_symbian_begin_write_processing
21067 #undef  elf_backend_final_write_processing
21068 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
21069 
21070 #undef  elf_backend_modify_segment_map
21071 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
21072 
21073 /* There is no .got section for BPABI objects, and hence no header.  */
21074 #undef  elf_backend_got_header_size
21075 #define elf_backend_got_header_size 0
21076 
21077 /* Similarly, there is no .got.plt section.  */
21078 #undef  elf_backend_want_got_plt
21079 #define elf_backend_want_got_plt 0
21080 
21081 #undef  elf_backend_plt_sym_val
21082 #define elf_backend_plt_sym_val		elf32_arm_symbian_plt_sym_val
21083 
21084 #undef  elf_backend_may_use_rel_p
21085 #define elf_backend_may_use_rel_p	1
21086 #undef  elf_backend_may_use_rela_p
21087 #define elf_backend_may_use_rela_p	0
21088 #undef  elf_backend_default_use_rela_p
21089 #define elf_backend_default_use_rela_p	0
21090 #undef  elf_backend_want_plt_sym
21091 #define elf_backend_want_plt_sym	0
21092 #undef  elf_backend_dtrel_excludes_plt
21093 #define elf_backend_dtrel_excludes_plt	0
21094 #undef  ELF_MAXPAGESIZE
21095 #define ELF_MAXPAGESIZE			0x8000
21096 
21097 #include "elf32-target.h"
21098