xref: /netbsd-src/external/gpl3/binutils.old/dist/bfd/elf32-arm.c (revision f3cfa6f6ce31685c6c4a758bc430e69eb99f50a4)
1 /* 32-bit ELF support for ARM
2    Copyright (C) 1998-2016 Free Software Foundation, Inc.
3 
4    This file is part of BFD, the Binary File Descriptor library.
5 
6    This program is free software; you can redistribute it and/or modify
7    it under the terms of the GNU General Public License as published by
8    the Free Software Foundation; either version 3 of the License, or
9    (at your option) any later version.
10 
11    This program is distributed in the hope that it will be useful,
12    but WITHOUT ANY WARRANTY; without even the implied warranty of
13    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14    GNU General Public License for more details.
15 
16    You should have received a copy of the GNU General Public License
17    along with this program; if not, write to the Free Software
18    Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
19    MA 02110-1301, USA.  */
20 
21 #include "sysdep.h"
22 #include <limits.h>
23 
24 #include "bfd.h"
25 #include "bfd_stdint.h"
26 #include "libiberty.h"
27 #include "libbfd.h"
28 #include "elf-bfd.h"
29 #include "elf-nacl.h"
30 #include "elf-vxworks.h"
31 #include "elf/arm.h"
32 
33 /* Return the relocation section associated with NAME.  HTAB is the
34    bfd's elf32_arm_link_hash_entry.  */
35 #define RELOC_SECTION(HTAB, NAME) \
36   ((HTAB)->use_rel ? ".rel" NAME : ".rela" NAME)
37 
38 /* Return size of a relocation entry.  HTAB is the bfd's
39    elf32_arm_link_hash_entry.  */
40 #define RELOC_SIZE(HTAB) \
41   ((HTAB)->use_rel \
42    ? sizeof (Elf32_External_Rel) \
43    : sizeof (Elf32_External_Rela))
44 
45 /* Return function to swap relocations in.  HTAB is the bfd's
46    elf32_arm_link_hash_entry.  */
47 #define SWAP_RELOC_IN(HTAB) \
48   ((HTAB)->use_rel \
49    ? bfd_elf32_swap_reloc_in \
50    : bfd_elf32_swap_reloca_in)
51 
52 /* Return function to swap relocations out.  HTAB is the bfd's
53    elf32_arm_link_hash_entry.  */
54 #define SWAP_RELOC_OUT(HTAB) \
55   ((HTAB)->use_rel \
56    ? bfd_elf32_swap_reloc_out \
57    : bfd_elf32_swap_reloca_out)
58 
59 #define elf_info_to_howto               0
60 #define elf_info_to_howto_rel           elf32_arm_info_to_howto
61 
62 #define ARM_ELF_ABI_VERSION		0
63 #define ARM_ELF_OS_ABI_VERSION		ELFOSABI_ARM
64 
65 /* The Adjusted Place, as defined by AAELF.  */
66 #define Pa(X) ((X) & 0xfffffffc)
67 
68 static bfd_boolean elf32_arm_write_section (bfd *output_bfd,
69 					    struct bfd_link_info *link_info,
70 					    asection *sec,
71 					    bfd_byte *contents);
72 
73 /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g.
74    R_ARM_PC24 as an index into this, and find the R_ARM_PC24 HOWTO
75    in that slot.  */
76 
77 static reloc_howto_type elf32_arm_howto_table_1[] =
78 {
79   /* No relocation.  */
80   HOWTO (R_ARM_NONE,		/* type */
81 	 0,			/* rightshift */
82 	 3,			/* size (0 = byte, 1 = short, 2 = long) */
83 	 0,			/* bitsize */
84 	 FALSE,			/* pc_relative */
85 	 0,			/* bitpos */
86 	 complain_overflow_dont,/* complain_on_overflow */
87 	 bfd_elf_generic_reloc,	/* special_function */
88 	 "R_ARM_NONE",		/* name */
89 	 FALSE,			/* partial_inplace */
90 	 0,			/* src_mask */
91 	 0,			/* dst_mask */
92 	 FALSE),		/* pcrel_offset */
93 
94   HOWTO (R_ARM_PC24,		/* type */
95 	 2,			/* rightshift */
96 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
97 	 24,			/* bitsize */
98 	 TRUE,			/* pc_relative */
99 	 0,			/* bitpos */
100 	 complain_overflow_signed,/* complain_on_overflow */
101 	 bfd_elf_generic_reloc,	/* special_function */
102 	 "R_ARM_PC24",		/* name */
103 	 FALSE,			/* partial_inplace */
104 	 0x00ffffff,		/* src_mask */
105 	 0x00ffffff,		/* dst_mask */
106 	 TRUE),			/* pcrel_offset */
107 
108   /* 32 bit absolute */
109   HOWTO (R_ARM_ABS32,		/* type */
110 	 0,			/* rightshift */
111 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
112 	 32,			/* bitsize */
113 	 FALSE,			/* pc_relative */
114 	 0,			/* bitpos */
115 	 complain_overflow_bitfield,/* complain_on_overflow */
116 	 bfd_elf_generic_reloc,	/* special_function */
117 	 "R_ARM_ABS32",		/* name */
118 	 FALSE,			/* partial_inplace */
119 	 0xffffffff,		/* src_mask */
120 	 0xffffffff,		/* dst_mask */
121 	 FALSE),		/* pcrel_offset */
122 
123   /* standard 32bit pc-relative reloc */
124   HOWTO (R_ARM_REL32,		/* type */
125 	 0,			/* rightshift */
126 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
127 	 32,			/* bitsize */
128 	 TRUE,			/* pc_relative */
129 	 0,			/* bitpos */
130 	 complain_overflow_bitfield,/* complain_on_overflow */
131 	 bfd_elf_generic_reloc,	/* special_function */
132 	 "R_ARM_REL32",		/* name */
133 	 FALSE,			/* partial_inplace */
134 	 0xffffffff,		/* src_mask */
135 	 0xffffffff,		/* dst_mask */
136 	 TRUE),			/* pcrel_offset */
137 
138   /* 8 bit absolute - R_ARM_LDR_PC_G0 in AAELF */
139   HOWTO (R_ARM_LDR_PC_G0,	/* type */
140 	 0,			/* rightshift */
141 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
142 	 32,			/* bitsize */
143 	 TRUE,			/* pc_relative */
144 	 0,			/* bitpos */
145 	 complain_overflow_dont,/* complain_on_overflow */
146 	 bfd_elf_generic_reloc,	/* special_function */
147 	 "R_ARM_LDR_PC_G0",     /* name */
148 	 FALSE,			/* partial_inplace */
149 	 0xffffffff,		/* src_mask */
150 	 0xffffffff,		/* dst_mask */
151 	 TRUE),			/* pcrel_offset */
152 
153    /* 16 bit absolute */
154   HOWTO (R_ARM_ABS16,		/* type */
155 	 0,			/* rightshift */
156 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
157 	 16,			/* bitsize */
158 	 FALSE,			/* pc_relative */
159 	 0,			/* bitpos */
160 	 complain_overflow_bitfield,/* complain_on_overflow */
161 	 bfd_elf_generic_reloc,	/* special_function */
162 	 "R_ARM_ABS16",		/* name */
163 	 FALSE,			/* partial_inplace */
164 	 0x0000ffff,		/* src_mask */
165 	 0x0000ffff,		/* dst_mask */
166 	 FALSE),		/* pcrel_offset */
167 
168   /* 12 bit absolute */
169   HOWTO (R_ARM_ABS12,		/* type */
170 	 0,			/* rightshift */
171 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
172 	 12,			/* bitsize */
173 	 FALSE,			/* pc_relative */
174 	 0,			/* bitpos */
175 	 complain_overflow_bitfield,/* complain_on_overflow */
176 	 bfd_elf_generic_reloc,	/* special_function */
177 	 "R_ARM_ABS12",		/* name */
178 	 FALSE,			/* partial_inplace */
179 	 0x00000fff,		/* src_mask */
180 	 0x00000fff,		/* dst_mask */
181 	 FALSE),		/* pcrel_offset */
182 
183   HOWTO (R_ARM_THM_ABS5,	/* type */
184 	 6,			/* rightshift */
185 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
186 	 5,			/* bitsize */
187 	 FALSE,			/* pc_relative */
188 	 0,			/* bitpos */
189 	 complain_overflow_bitfield,/* complain_on_overflow */
190 	 bfd_elf_generic_reloc,	/* special_function */
191 	 "R_ARM_THM_ABS5",	/* name */
192 	 FALSE,			/* partial_inplace */
193 	 0x000007e0,		/* src_mask */
194 	 0x000007e0,		/* dst_mask */
195 	 FALSE),		/* pcrel_offset */
196 
197   /* 8 bit absolute */
198   HOWTO (R_ARM_ABS8,		/* type */
199 	 0,			/* rightshift */
200 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
201 	 8,			/* bitsize */
202 	 FALSE,			/* pc_relative */
203 	 0,			/* bitpos */
204 	 complain_overflow_bitfield,/* complain_on_overflow */
205 	 bfd_elf_generic_reloc,	/* special_function */
206 	 "R_ARM_ABS8",		/* name */
207 	 FALSE,			/* partial_inplace */
208 	 0x000000ff,		/* src_mask */
209 	 0x000000ff,		/* dst_mask */
210 	 FALSE),		/* pcrel_offset */
211 
212   HOWTO (R_ARM_SBREL32,		/* type */
213 	 0,			/* rightshift */
214 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
215 	 32,			/* bitsize */
216 	 FALSE,			/* pc_relative */
217 	 0,			/* bitpos */
218 	 complain_overflow_dont,/* complain_on_overflow */
219 	 bfd_elf_generic_reloc,	/* special_function */
220 	 "R_ARM_SBREL32",	/* name */
221 	 FALSE,			/* partial_inplace */
222 	 0xffffffff,		/* src_mask */
223 	 0xffffffff,		/* dst_mask */
224 	 FALSE),		/* pcrel_offset */
225 
226   HOWTO (R_ARM_THM_CALL,	/* type */
227 	 1,			/* rightshift */
228 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
229 	 24,			/* bitsize */
230 	 TRUE,			/* pc_relative */
231 	 0,			/* bitpos */
232 	 complain_overflow_signed,/* complain_on_overflow */
233 	 bfd_elf_generic_reloc,	/* special_function */
234 	 "R_ARM_THM_CALL",	/* name */
235 	 FALSE,			/* partial_inplace */
236 	 0x07ff2fff,		/* src_mask */
237 	 0x07ff2fff,		/* dst_mask */
238 	 TRUE),			/* pcrel_offset */
239 
240   HOWTO (R_ARM_THM_PC8,	        /* type */
241 	 1,			/* rightshift */
242 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
243 	 8,			/* bitsize */
244 	 TRUE,			/* pc_relative */
245 	 0,			/* bitpos */
246 	 complain_overflow_signed,/* complain_on_overflow */
247 	 bfd_elf_generic_reloc,	/* special_function */
248 	 "R_ARM_THM_PC8",	/* name */
249 	 FALSE,			/* partial_inplace */
250 	 0x000000ff,		/* src_mask */
251 	 0x000000ff,		/* dst_mask */
252 	 TRUE),			/* pcrel_offset */
253 
254   HOWTO (R_ARM_BREL_ADJ,	/* type */
255 	 1,			/* rightshift */
256 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
257 	 32,			/* bitsize */
258 	 FALSE,			/* pc_relative */
259 	 0,			/* bitpos */
260 	 complain_overflow_signed,/* complain_on_overflow */
261 	 bfd_elf_generic_reloc,	/* special_function */
262 	 "R_ARM_BREL_ADJ",	/* name */
263 	 FALSE,			/* partial_inplace */
264 	 0xffffffff,		/* src_mask */
265 	 0xffffffff,		/* dst_mask */
266 	 FALSE),		/* pcrel_offset */
267 
268   HOWTO (R_ARM_TLS_DESC,	/* type */
269 	 0,			/* rightshift */
270 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
271 	 32,			/* bitsize */
272 	 FALSE,			/* pc_relative */
273 	 0,			/* bitpos */
274 	 complain_overflow_bitfield,/* complain_on_overflow */
275 	 bfd_elf_generic_reloc,	/* special_function */
276 	 "R_ARM_TLS_DESC",	/* name */
277 	 FALSE,			/* partial_inplace */
278 	 0xffffffff,		/* src_mask */
279 	 0xffffffff,		/* dst_mask */
280 	 FALSE),		/* pcrel_offset */
281 
282   HOWTO (R_ARM_THM_SWI8,	/* type */
283 	 0,			/* rightshift */
284 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
285 	 0,			/* bitsize */
286 	 FALSE,			/* pc_relative */
287 	 0,			/* bitpos */
288 	 complain_overflow_signed,/* complain_on_overflow */
289 	 bfd_elf_generic_reloc,	/* special_function */
290 	 "R_ARM_SWI8",		/* name */
291 	 FALSE,			/* partial_inplace */
292 	 0x00000000,		/* src_mask */
293 	 0x00000000,		/* dst_mask */
294 	 FALSE),		/* pcrel_offset */
295 
296   /* BLX instruction for the ARM.  */
297   HOWTO (R_ARM_XPC25,		/* type */
298 	 2,			/* rightshift */
299 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
300 	 24,			/* bitsize */
301 	 TRUE,			/* pc_relative */
302 	 0,			/* bitpos */
303 	 complain_overflow_signed,/* complain_on_overflow */
304 	 bfd_elf_generic_reloc,	/* special_function */
305 	 "R_ARM_XPC25",		/* name */
306 	 FALSE,			/* partial_inplace */
307 	 0x00ffffff,		/* src_mask */
308 	 0x00ffffff,		/* dst_mask */
309 	 TRUE),			/* pcrel_offset */
310 
311   /* BLX instruction for the Thumb.  */
312   HOWTO (R_ARM_THM_XPC22,	/* type */
313 	 2,			/* rightshift */
314 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
315 	 24,			/* bitsize */
316 	 TRUE,			/* pc_relative */
317 	 0,			/* bitpos */
318 	 complain_overflow_signed,/* complain_on_overflow */
319 	 bfd_elf_generic_reloc,	/* special_function */
320 	 "R_ARM_THM_XPC22",	/* name */
321 	 FALSE,			/* partial_inplace */
322 	 0x07ff2fff,		/* src_mask */
323 	 0x07ff2fff,		/* dst_mask */
324 	 TRUE),			/* pcrel_offset */
325 
326   /* Dynamic TLS relocations.  */
327 
328   HOWTO (R_ARM_TLS_DTPMOD32,	/* type */
329 	 0,                     /* rightshift */
330 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
331 	 32,                    /* bitsize */
332 	 FALSE,                 /* pc_relative */
333 	 0,                     /* bitpos */
334 	 complain_overflow_bitfield,/* complain_on_overflow */
335 	 bfd_elf_generic_reloc, /* special_function */
336 	 "R_ARM_TLS_DTPMOD32",	/* name */
337 	 TRUE,			/* partial_inplace */
338 	 0xffffffff,		/* src_mask */
339 	 0xffffffff,		/* dst_mask */
340 	 FALSE),                /* pcrel_offset */
341 
342   HOWTO (R_ARM_TLS_DTPOFF32,	/* type */
343 	 0,                     /* rightshift */
344 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
345 	 32,                    /* bitsize */
346 	 FALSE,                 /* pc_relative */
347 	 0,                     /* bitpos */
348 	 complain_overflow_bitfield,/* complain_on_overflow */
349 	 bfd_elf_generic_reloc, /* special_function */
350 	 "R_ARM_TLS_DTPOFF32",	/* name */
351 	 TRUE,			/* partial_inplace */
352 	 0xffffffff,		/* src_mask */
353 	 0xffffffff,		/* dst_mask */
354 	 FALSE),                /* pcrel_offset */
355 
356   HOWTO (R_ARM_TLS_TPOFF32,	/* type */
357 	 0,                     /* rightshift */
358 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
359 	 32,                    /* bitsize */
360 	 FALSE,                 /* pc_relative */
361 	 0,                     /* bitpos */
362 	 complain_overflow_bitfield,/* complain_on_overflow */
363 	 bfd_elf_generic_reloc, /* special_function */
364 	 "R_ARM_TLS_TPOFF32",	/* name */
365 	 TRUE,			/* partial_inplace */
366 	 0xffffffff,		/* src_mask */
367 	 0xffffffff,		/* dst_mask */
368 	 FALSE),                /* pcrel_offset */
369 
370   /* Relocs used in ARM Linux */
371 
372   HOWTO (R_ARM_COPY,		/* type */
373 	 0,                     /* rightshift */
374 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
375 	 32,                    /* bitsize */
376 	 FALSE,                 /* pc_relative */
377 	 0,                     /* bitpos */
378 	 complain_overflow_bitfield,/* complain_on_overflow */
379 	 bfd_elf_generic_reloc, /* special_function */
380 	 "R_ARM_COPY",		/* name */
381 	 TRUE,			/* partial_inplace */
382 	 0xffffffff,		/* src_mask */
383 	 0xffffffff,		/* dst_mask */
384 	 FALSE),                /* pcrel_offset */
385 
386   HOWTO (R_ARM_GLOB_DAT,	/* type */
387 	 0,                     /* rightshift */
388 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
389 	 32,                    /* bitsize */
390 	 FALSE,                 /* pc_relative */
391 	 0,                     /* bitpos */
392 	 complain_overflow_bitfield,/* complain_on_overflow */
393 	 bfd_elf_generic_reloc, /* special_function */
394 	 "R_ARM_GLOB_DAT",	/* name */
395 	 TRUE,			/* partial_inplace */
396 	 0xffffffff,		/* src_mask */
397 	 0xffffffff,		/* dst_mask */
398 	 FALSE),                /* pcrel_offset */
399 
400   HOWTO (R_ARM_JUMP_SLOT,	/* type */
401 	 0,                     /* rightshift */
402 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
403 	 32,                    /* bitsize */
404 	 FALSE,                 /* pc_relative */
405 	 0,                     /* bitpos */
406 	 complain_overflow_bitfield,/* complain_on_overflow */
407 	 bfd_elf_generic_reloc, /* special_function */
408 	 "R_ARM_JUMP_SLOT",	/* name */
409 	 TRUE,			/* partial_inplace */
410 	 0xffffffff,		/* src_mask */
411 	 0xffffffff,		/* dst_mask */
412 	 FALSE),                /* pcrel_offset */
413 
414   HOWTO (R_ARM_RELATIVE,	/* type */
415 	 0,                     /* rightshift */
416 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
417 	 32,                    /* bitsize */
418 	 FALSE,                 /* pc_relative */
419 	 0,                     /* bitpos */
420 	 complain_overflow_bitfield,/* complain_on_overflow */
421 	 bfd_elf_generic_reloc, /* special_function */
422 	 "R_ARM_RELATIVE",	/* name */
423 	 TRUE,			/* partial_inplace */
424 	 0xffffffff,		/* src_mask */
425 	 0xffffffff,		/* dst_mask */
426 	 FALSE),                /* pcrel_offset */
427 
428   HOWTO (R_ARM_GOTOFF32,	/* type */
429 	 0,                     /* rightshift */
430 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
431 	 32,                    /* bitsize */
432 	 FALSE,                 /* pc_relative */
433 	 0,                     /* bitpos */
434 	 complain_overflow_bitfield,/* complain_on_overflow */
435 	 bfd_elf_generic_reloc, /* special_function */
436 	 "R_ARM_GOTOFF32",	/* name */
437 	 TRUE,			/* partial_inplace */
438 	 0xffffffff,		/* src_mask */
439 	 0xffffffff,		/* dst_mask */
440 	 FALSE),                /* pcrel_offset */
441 
442   HOWTO (R_ARM_GOTPC,		/* type */
443 	 0,                     /* rightshift */
444 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
445 	 32,                    /* bitsize */
446 	 TRUE,			/* pc_relative */
447 	 0,                     /* bitpos */
448 	 complain_overflow_bitfield,/* complain_on_overflow */
449 	 bfd_elf_generic_reloc, /* special_function */
450 	 "R_ARM_GOTPC",		/* name */
451 	 TRUE,			/* partial_inplace */
452 	 0xffffffff,		/* src_mask */
453 	 0xffffffff,		/* dst_mask */
454 	 TRUE),			/* pcrel_offset */
455 
456   HOWTO (R_ARM_GOT32,		/* type */
457 	 0,                     /* rightshift */
458 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
459 	 32,                    /* bitsize */
460 	 FALSE,			/* pc_relative */
461 	 0,                     /* bitpos */
462 	 complain_overflow_bitfield,/* complain_on_overflow */
463 	 bfd_elf_generic_reloc, /* special_function */
464 	 "R_ARM_GOT32",		/* name */
465 	 TRUE,			/* partial_inplace */
466 	 0xffffffff,		/* src_mask */
467 	 0xffffffff,		/* dst_mask */
468 	 FALSE),		/* pcrel_offset */
469 
470   HOWTO (R_ARM_PLT32,		/* type */
471 	 2,                     /* rightshift */
472 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
473 	 24,                    /* bitsize */
474 	 TRUE,			/* pc_relative */
475 	 0,                     /* bitpos */
476 	 complain_overflow_bitfield,/* complain_on_overflow */
477 	 bfd_elf_generic_reloc, /* special_function */
478 	 "R_ARM_PLT32",		/* name */
479 	 FALSE,			/* partial_inplace */
480 	 0x00ffffff,		/* src_mask */
481 	 0x00ffffff,		/* dst_mask */
482 	 TRUE),			/* pcrel_offset */
483 
484   HOWTO (R_ARM_CALL,		/* type */
485 	 2,			/* rightshift */
486 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
487 	 24,			/* bitsize */
488 	 TRUE,			/* pc_relative */
489 	 0,			/* bitpos */
490 	 complain_overflow_signed,/* complain_on_overflow */
491 	 bfd_elf_generic_reloc,	/* special_function */
492 	 "R_ARM_CALL",		/* name */
493 	 FALSE,			/* partial_inplace */
494 	 0x00ffffff,		/* src_mask */
495 	 0x00ffffff,		/* dst_mask */
496 	 TRUE),			/* pcrel_offset */
497 
498   HOWTO (R_ARM_JUMP24,		/* type */
499 	 2,			/* rightshift */
500 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
501 	 24,			/* bitsize */
502 	 TRUE,			/* pc_relative */
503 	 0,			/* bitpos */
504 	 complain_overflow_signed,/* complain_on_overflow */
505 	 bfd_elf_generic_reloc,	/* special_function */
506 	 "R_ARM_JUMP24",	/* name */
507 	 FALSE,			/* partial_inplace */
508 	 0x00ffffff,		/* src_mask */
509 	 0x00ffffff,		/* dst_mask */
510 	 TRUE),			/* pcrel_offset */
511 
512   HOWTO (R_ARM_THM_JUMP24,	/* type */
513 	 1,			/* rightshift */
514 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
515 	 24,			/* bitsize */
516 	 TRUE,			/* pc_relative */
517 	 0,			/* bitpos */
518 	 complain_overflow_signed,/* complain_on_overflow */
519 	 bfd_elf_generic_reloc,	/* special_function */
520 	 "R_ARM_THM_JUMP24",	/* name */
521 	 FALSE,			/* partial_inplace */
522 	 0x07ff2fff,		/* src_mask */
523 	 0x07ff2fff,		/* dst_mask */
524 	 TRUE),			/* pcrel_offset */
525 
526   HOWTO (R_ARM_BASE_ABS,	/* type */
527 	 0,			/* rightshift */
528 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
529 	 32,			/* bitsize */
530 	 FALSE,			/* pc_relative */
531 	 0,			/* bitpos */
532 	 complain_overflow_dont,/* complain_on_overflow */
533 	 bfd_elf_generic_reloc,	/* special_function */
534 	 "R_ARM_BASE_ABS",	/* name */
535 	 FALSE,			/* partial_inplace */
536 	 0xffffffff,		/* src_mask */
537 	 0xffffffff,		/* dst_mask */
538 	 FALSE),		/* pcrel_offset */
539 
540   HOWTO (R_ARM_ALU_PCREL7_0,	/* type */
541 	 0,			/* rightshift */
542 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
543 	 12,			/* bitsize */
544 	 TRUE,			/* pc_relative */
545 	 0,			/* bitpos */
546 	 complain_overflow_dont,/* complain_on_overflow */
547 	 bfd_elf_generic_reloc,	/* special_function */
548 	 "R_ARM_ALU_PCREL_7_0",	/* name */
549 	 FALSE,			/* partial_inplace */
550 	 0x00000fff,		/* src_mask */
551 	 0x00000fff,		/* dst_mask */
552 	 TRUE),			/* pcrel_offset */
553 
554   HOWTO (R_ARM_ALU_PCREL15_8,	/* type */
555 	 0,			/* rightshift */
556 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
557 	 12,			/* bitsize */
558 	 TRUE,			/* pc_relative */
559 	 8,			/* bitpos */
560 	 complain_overflow_dont,/* complain_on_overflow */
561 	 bfd_elf_generic_reloc,	/* special_function */
562 	 "R_ARM_ALU_PCREL_15_8",/* name */
563 	 FALSE,			/* partial_inplace */
564 	 0x00000fff,		/* src_mask */
565 	 0x00000fff,		/* dst_mask */
566 	 TRUE),			/* pcrel_offset */
567 
568   HOWTO (R_ARM_ALU_PCREL23_15,	/* type */
569 	 0,			/* rightshift */
570 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
571 	 12,			/* bitsize */
572 	 TRUE,			/* pc_relative */
573 	 16,			/* bitpos */
574 	 complain_overflow_dont,/* complain_on_overflow */
575 	 bfd_elf_generic_reloc,	/* special_function */
576 	 "R_ARM_ALU_PCREL_23_15",/* name */
577 	 FALSE,			/* partial_inplace */
578 	 0x00000fff,		/* src_mask */
579 	 0x00000fff,		/* dst_mask */
580 	 TRUE),			/* pcrel_offset */
581 
582   HOWTO (R_ARM_LDR_SBREL_11_0,	/* type */
583 	 0,			/* rightshift */
584 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
585 	 12,			/* bitsize */
586 	 FALSE,			/* pc_relative */
587 	 0,			/* bitpos */
588 	 complain_overflow_dont,/* complain_on_overflow */
589 	 bfd_elf_generic_reloc,	/* special_function */
590 	 "R_ARM_LDR_SBREL_11_0",/* name */
591 	 FALSE,			/* partial_inplace */
592 	 0x00000fff,		/* src_mask */
593 	 0x00000fff,		/* dst_mask */
594 	 FALSE),		/* pcrel_offset */
595 
596   HOWTO (R_ARM_ALU_SBREL_19_12,	/* type */
597 	 0,			/* rightshift */
598 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
599 	 8,			/* bitsize */
600 	 FALSE,			/* pc_relative */
601 	 12,			/* bitpos */
602 	 complain_overflow_dont,/* complain_on_overflow */
603 	 bfd_elf_generic_reloc,	/* special_function */
604 	 "R_ARM_ALU_SBREL_19_12",/* name */
605 	 FALSE,			/* partial_inplace */
606 	 0x000ff000,		/* src_mask */
607 	 0x000ff000,		/* dst_mask */
608 	 FALSE),		/* pcrel_offset */
609 
610   HOWTO (R_ARM_ALU_SBREL_27_20,	/* type */
611 	 0,			/* rightshift */
612 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
613 	 8,			/* bitsize */
614 	 FALSE,			/* pc_relative */
615 	 20,			/* bitpos */
616 	 complain_overflow_dont,/* complain_on_overflow */
617 	 bfd_elf_generic_reloc,	/* special_function */
618 	 "R_ARM_ALU_SBREL_27_20",/* name */
619 	 FALSE,			/* partial_inplace */
620 	 0x0ff00000,		/* src_mask */
621 	 0x0ff00000,		/* dst_mask */
622 	 FALSE),		/* pcrel_offset */
623 
624   HOWTO (R_ARM_TARGET1,		/* type */
625 	 0,			/* rightshift */
626 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
627 	 32,			/* bitsize */
628 	 FALSE,			/* pc_relative */
629 	 0,			/* bitpos */
630 	 complain_overflow_dont,/* complain_on_overflow */
631 	 bfd_elf_generic_reloc,	/* special_function */
632 	 "R_ARM_TARGET1",	/* name */
633 	 FALSE,			/* partial_inplace */
634 	 0xffffffff,		/* src_mask */
635 	 0xffffffff,		/* dst_mask */
636 	 FALSE),		/* pcrel_offset */
637 
638   HOWTO (R_ARM_ROSEGREL32,	/* type */
639 	 0,			/* rightshift */
640 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
641 	 32,			/* bitsize */
642 	 FALSE,			/* pc_relative */
643 	 0,			/* bitpos */
644 	 complain_overflow_dont,/* complain_on_overflow */
645 	 bfd_elf_generic_reloc,	/* special_function */
646 	 "R_ARM_ROSEGREL32",	/* name */
647 	 FALSE,			/* partial_inplace */
648 	 0xffffffff,		/* src_mask */
649 	 0xffffffff,		/* dst_mask */
650 	 FALSE),		/* pcrel_offset */
651 
652   HOWTO (R_ARM_V4BX,		/* type */
653 	 0,			/* rightshift */
654 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
655 	 32,			/* bitsize */
656 	 FALSE,			/* pc_relative */
657 	 0,			/* bitpos */
658 	 complain_overflow_dont,/* complain_on_overflow */
659 	 bfd_elf_generic_reloc,	/* special_function */
660 	 "R_ARM_V4BX",		/* name */
661 	 FALSE,			/* partial_inplace */
662 	 0xffffffff,		/* src_mask */
663 	 0xffffffff,		/* dst_mask */
664 	 FALSE),		/* pcrel_offset */
665 
666   HOWTO (R_ARM_TARGET2,		/* type */
667 	 0,			/* rightshift */
668 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
669 	 32,			/* bitsize */
670 	 FALSE,			/* pc_relative */
671 	 0,			/* bitpos */
672 	 complain_overflow_signed,/* complain_on_overflow */
673 	 bfd_elf_generic_reloc,	/* special_function */
674 	 "R_ARM_TARGET2",	/* name */
675 	 FALSE,			/* partial_inplace */
676 	 0xffffffff,		/* src_mask */
677 	 0xffffffff,		/* dst_mask */
678 	 TRUE),			/* pcrel_offset */
679 
680   HOWTO (R_ARM_PREL31,		/* type */
681 	 0,			/* rightshift */
682 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
683 	 31,			/* bitsize */
684 	 TRUE,			/* pc_relative */
685 	 0,			/* bitpos */
686 	 complain_overflow_signed,/* complain_on_overflow */
687 	 bfd_elf_generic_reloc,	/* special_function */
688 	 "R_ARM_PREL31",	/* name */
689 	 FALSE,			/* partial_inplace */
690 	 0x7fffffff,		/* src_mask */
691 	 0x7fffffff,		/* dst_mask */
692 	 TRUE),			/* pcrel_offset */
693 
694   HOWTO (R_ARM_MOVW_ABS_NC,	/* type */
695 	 0,			/* rightshift */
696 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
697 	 16,			/* bitsize */
698 	 FALSE,			/* pc_relative */
699 	 0,			/* bitpos */
700 	 complain_overflow_dont,/* complain_on_overflow */
701 	 bfd_elf_generic_reloc,	/* special_function */
702 	 "R_ARM_MOVW_ABS_NC",	/* name */
703 	 FALSE,			/* partial_inplace */
704 	 0x000f0fff,		/* src_mask */
705 	 0x000f0fff,		/* dst_mask */
706 	 FALSE),		/* pcrel_offset */
707 
708   HOWTO (R_ARM_MOVT_ABS,	/* type */
709 	 0,			/* rightshift */
710 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
711 	 16,			/* bitsize */
712 	 FALSE,			/* pc_relative */
713 	 0,			/* bitpos */
714 	 complain_overflow_bitfield,/* complain_on_overflow */
715 	 bfd_elf_generic_reloc,	/* special_function */
716 	 "R_ARM_MOVT_ABS",	/* name */
717 	 FALSE,			/* partial_inplace */
718 	 0x000f0fff,		/* src_mask */
719 	 0x000f0fff,		/* dst_mask */
720 	 FALSE),		/* pcrel_offset */
721 
722   HOWTO (R_ARM_MOVW_PREL_NC,	/* type */
723 	 0,			/* rightshift */
724 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
725 	 16,			/* bitsize */
726 	 TRUE,			/* pc_relative */
727 	 0,			/* bitpos */
728 	 complain_overflow_dont,/* complain_on_overflow */
729 	 bfd_elf_generic_reloc,	/* special_function */
730 	 "R_ARM_MOVW_PREL_NC",	/* name */
731 	 FALSE,			/* partial_inplace */
732 	 0x000f0fff,		/* src_mask */
733 	 0x000f0fff,		/* dst_mask */
734 	 TRUE),			/* pcrel_offset */
735 
736   HOWTO (R_ARM_MOVT_PREL,	/* type */
737 	 0,			/* rightshift */
738 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
739 	 16,			/* bitsize */
740 	 TRUE,			/* pc_relative */
741 	 0,			/* bitpos */
742 	 complain_overflow_bitfield,/* complain_on_overflow */
743 	 bfd_elf_generic_reloc,	/* special_function */
744 	 "R_ARM_MOVT_PREL",	/* name */
745 	 FALSE,			/* partial_inplace */
746 	 0x000f0fff,		/* src_mask */
747 	 0x000f0fff,		/* dst_mask */
748 	 TRUE),			/* pcrel_offset */
749 
750   HOWTO (R_ARM_THM_MOVW_ABS_NC,	/* type */
751 	 0,			/* rightshift */
752 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
753 	 16,			/* bitsize */
754 	 FALSE,			/* pc_relative */
755 	 0,			/* bitpos */
756 	 complain_overflow_dont,/* complain_on_overflow */
757 	 bfd_elf_generic_reloc,	/* special_function */
758 	 "R_ARM_THM_MOVW_ABS_NC",/* name */
759 	 FALSE,			/* partial_inplace */
760 	 0x040f70ff,		/* src_mask */
761 	 0x040f70ff,		/* dst_mask */
762 	 FALSE),		/* pcrel_offset */
763 
764   HOWTO (R_ARM_THM_MOVT_ABS,	/* type */
765 	 0,			/* rightshift */
766 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
767 	 16,			/* bitsize */
768 	 FALSE,			/* pc_relative */
769 	 0,			/* bitpos */
770 	 complain_overflow_bitfield,/* complain_on_overflow */
771 	 bfd_elf_generic_reloc,	/* special_function */
772 	 "R_ARM_THM_MOVT_ABS",	/* name */
773 	 FALSE,			/* partial_inplace */
774 	 0x040f70ff,		/* src_mask */
775 	 0x040f70ff,		/* dst_mask */
776 	 FALSE),		/* pcrel_offset */
777 
778   HOWTO (R_ARM_THM_MOVW_PREL_NC,/* type */
779 	 0,			/* rightshift */
780 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
781 	 16,			/* bitsize */
782 	 TRUE,			/* pc_relative */
783 	 0,			/* bitpos */
784 	 complain_overflow_dont,/* complain_on_overflow */
785 	 bfd_elf_generic_reloc,	/* special_function */
786 	 "R_ARM_THM_MOVW_PREL_NC",/* name */
787 	 FALSE,			/* partial_inplace */
788 	 0x040f70ff,		/* src_mask */
789 	 0x040f70ff,		/* dst_mask */
790 	 TRUE),			/* pcrel_offset */
791 
792   HOWTO (R_ARM_THM_MOVT_PREL,	/* type */
793 	 0,			/* rightshift */
794 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
795 	 16,			/* bitsize */
796 	 TRUE,			/* pc_relative */
797 	 0,			/* bitpos */
798 	 complain_overflow_bitfield,/* complain_on_overflow */
799 	 bfd_elf_generic_reloc,	/* special_function */
800 	 "R_ARM_THM_MOVT_PREL",	/* name */
801 	 FALSE,			/* partial_inplace */
802 	 0x040f70ff,		/* src_mask */
803 	 0x040f70ff,		/* dst_mask */
804 	 TRUE),			/* pcrel_offset */
805 
806   HOWTO (R_ARM_THM_JUMP19,	/* type */
807 	 1,			/* rightshift */
808 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
809 	 19,			/* bitsize */
810 	 TRUE,			/* pc_relative */
811 	 0,			/* bitpos */
812 	 complain_overflow_signed,/* complain_on_overflow */
813 	 bfd_elf_generic_reloc, /* special_function */
814 	 "R_ARM_THM_JUMP19",	/* name */
815 	 FALSE,			/* partial_inplace */
816 	 0x043f2fff,		/* src_mask */
817 	 0x043f2fff,		/* dst_mask */
818 	 TRUE),			/* pcrel_offset */
819 
820   HOWTO (R_ARM_THM_JUMP6,	/* type */
821 	 1,			/* rightshift */
822 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
823 	 6,			/* bitsize */
824 	 TRUE,			/* pc_relative */
825 	 0,			/* bitpos */
826 	 complain_overflow_unsigned,/* complain_on_overflow */
827 	 bfd_elf_generic_reloc,	/* special_function */
828 	 "R_ARM_THM_JUMP6",	/* name */
829 	 FALSE,			/* partial_inplace */
830 	 0x02f8,		/* src_mask */
831 	 0x02f8,		/* dst_mask */
832 	 TRUE),			/* pcrel_offset */
833 
834   /* These are declared as 13-bit signed relocations because we can
835      address -4095 .. 4095(base) by altering ADDW to SUBW or vice
836      versa.  */
837   HOWTO (R_ARM_THM_ALU_PREL_11_0,/* type */
838 	 0,			/* rightshift */
839 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
840 	 13,			/* bitsize */
841 	 TRUE,			/* pc_relative */
842 	 0,			/* bitpos */
843 	 complain_overflow_dont,/* complain_on_overflow */
844 	 bfd_elf_generic_reloc,	/* special_function */
845 	 "R_ARM_THM_ALU_PREL_11_0",/* name */
846 	 FALSE,			/* partial_inplace */
847 	 0xffffffff,		/* src_mask */
848 	 0xffffffff,		/* dst_mask */
849 	 TRUE),			/* pcrel_offset */
850 
851   HOWTO (R_ARM_THM_PC12,	/* type */
852 	 0,			/* rightshift */
853 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
854 	 13,			/* bitsize */
855 	 TRUE,			/* pc_relative */
856 	 0,			/* bitpos */
857 	 complain_overflow_dont,/* complain_on_overflow */
858 	 bfd_elf_generic_reloc,	/* special_function */
859 	 "R_ARM_THM_PC12",	/* name */
860 	 FALSE,			/* partial_inplace */
861 	 0xffffffff,		/* src_mask */
862 	 0xffffffff,		/* dst_mask */
863 	 TRUE),			/* pcrel_offset */
864 
865   HOWTO (R_ARM_ABS32_NOI,	/* type */
866 	 0,			/* rightshift */
867 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
868 	 32,			/* bitsize */
869 	 FALSE,			/* pc_relative */
870 	 0,			/* bitpos */
871 	 complain_overflow_dont,/* complain_on_overflow */
872 	 bfd_elf_generic_reloc,	/* special_function */
873 	 "R_ARM_ABS32_NOI",	/* name */
874 	 FALSE,			/* partial_inplace */
875 	 0xffffffff,		/* src_mask */
876 	 0xffffffff,		/* dst_mask */
877 	 FALSE),		/* pcrel_offset */
878 
879   HOWTO (R_ARM_REL32_NOI,	/* type */
880 	 0,			/* rightshift */
881 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
882 	 32,			/* bitsize */
883 	 TRUE,			/* pc_relative */
884 	 0,			/* bitpos */
885 	 complain_overflow_dont,/* complain_on_overflow */
886 	 bfd_elf_generic_reloc,	/* special_function */
887 	 "R_ARM_REL32_NOI",	/* name */
888 	 FALSE,			/* partial_inplace */
889 	 0xffffffff,		/* src_mask */
890 	 0xffffffff,		/* dst_mask */
891 	 FALSE),		/* pcrel_offset */
892 
893   /* Group relocations.  */
894 
895   HOWTO (R_ARM_ALU_PC_G0_NC,	/* type */
896 	 0,			/* rightshift */
897 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
898 	 32,			/* bitsize */
899 	 TRUE,			/* pc_relative */
900 	 0,			/* bitpos */
901 	 complain_overflow_dont,/* complain_on_overflow */
902 	 bfd_elf_generic_reloc,	/* special_function */
903 	 "R_ARM_ALU_PC_G0_NC",	/* name */
904 	 FALSE,			/* partial_inplace */
905 	 0xffffffff,		/* src_mask */
906 	 0xffffffff,		/* dst_mask */
907 	 TRUE),			/* pcrel_offset */
908 
909   HOWTO (R_ARM_ALU_PC_G0,   	/* type */
910 	 0,			/* rightshift */
911 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
912 	 32,			/* bitsize */
913 	 TRUE,			/* pc_relative */
914 	 0,			/* bitpos */
915 	 complain_overflow_dont,/* complain_on_overflow */
916 	 bfd_elf_generic_reloc,	/* special_function */
917 	 "R_ARM_ALU_PC_G0",   	/* name */
918 	 FALSE,			/* partial_inplace */
919 	 0xffffffff,		/* src_mask */
920 	 0xffffffff,		/* dst_mask */
921 	 TRUE),			/* pcrel_offset */
922 
923   HOWTO (R_ARM_ALU_PC_G1_NC,	/* type */
924 	 0,			/* rightshift */
925 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
926 	 32,			/* bitsize */
927 	 TRUE,			/* pc_relative */
928 	 0,			/* bitpos */
929 	 complain_overflow_dont,/* complain_on_overflow */
930 	 bfd_elf_generic_reloc,	/* special_function */
931 	 "R_ARM_ALU_PC_G1_NC",	/* name */
932 	 FALSE,			/* partial_inplace */
933 	 0xffffffff,		/* src_mask */
934 	 0xffffffff,		/* dst_mask */
935 	 TRUE),			/* pcrel_offset */
936 
937   HOWTO (R_ARM_ALU_PC_G1,   	/* type */
938 	 0,			/* rightshift */
939 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
940 	 32,			/* bitsize */
941 	 TRUE,			/* pc_relative */
942 	 0,			/* bitpos */
943 	 complain_overflow_dont,/* complain_on_overflow */
944 	 bfd_elf_generic_reloc,	/* special_function */
945 	 "R_ARM_ALU_PC_G1",   	/* name */
946 	 FALSE,			/* partial_inplace */
947 	 0xffffffff,		/* src_mask */
948 	 0xffffffff,		/* dst_mask */
949 	 TRUE),			/* pcrel_offset */
950 
951   HOWTO (R_ARM_ALU_PC_G2,   	/* type */
952 	 0,			/* rightshift */
953 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
954 	 32,			/* bitsize */
955 	 TRUE,			/* pc_relative */
956 	 0,			/* bitpos */
957 	 complain_overflow_dont,/* complain_on_overflow */
958 	 bfd_elf_generic_reloc,	/* special_function */
959 	 "R_ARM_ALU_PC_G2",   	/* name */
960 	 FALSE,			/* partial_inplace */
961 	 0xffffffff,		/* src_mask */
962 	 0xffffffff,		/* dst_mask */
963 	 TRUE),			/* pcrel_offset */
964 
965   HOWTO (R_ARM_LDR_PC_G1,   	/* type */
966 	 0,			/* rightshift */
967 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
968 	 32,			/* bitsize */
969 	 TRUE,			/* pc_relative */
970 	 0,			/* bitpos */
971 	 complain_overflow_dont,/* complain_on_overflow */
972 	 bfd_elf_generic_reloc,	/* special_function */
973 	 "R_ARM_LDR_PC_G1",   	/* name */
974 	 FALSE,			/* partial_inplace */
975 	 0xffffffff,		/* src_mask */
976 	 0xffffffff,		/* dst_mask */
977 	 TRUE),			/* pcrel_offset */
978 
979   HOWTO (R_ARM_LDR_PC_G2,   	/* type */
980 	 0,			/* rightshift */
981 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
982 	 32,			/* bitsize */
983 	 TRUE,			/* pc_relative */
984 	 0,			/* bitpos */
985 	 complain_overflow_dont,/* complain_on_overflow */
986 	 bfd_elf_generic_reloc,	/* special_function */
987 	 "R_ARM_LDR_PC_G2",   	/* name */
988 	 FALSE,			/* partial_inplace */
989 	 0xffffffff,		/* src_mask */
990 	 0xffffffff,		/* dst_mask */
991 	 TRUE),			/* pcrel_offset */
992 
993   HOWTO (R_ARM_LDRS_PC_G0,   	/* type */
994 	 0,			/* rightshift */
995 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
996 	 32,			/* bitsize */
997 	 TRUE,			/* pc_relative */
998 	 0,			/* bitpos */
999 	 complain_overflow_dont,/* complain_on_overflow */
1000 	 bfd_elf_generic_reloc,	/* special_function */
1001 	 "R_ARM_LDRS_PC_G0",   	/* name */
1002 	 FALSE,			/* partial_inplace */
1003 	 0xffffffff,		/* src_mask */
1004 	 0xffffffff,		/* dst_mask */
1005 	 TRUE),			/* pcrel_offset */
1006 
1007   HOWTO (R_ARM_LDRS_PC_G1,   	/* type */
1008 	 0,			/* rightshift */
1009 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1010 	 32,			/* bitsize */
1011 	 TRUE,			/* pc_relative */
1012 	 0,			/* bitpos */
1013 	 complain_overflow_dont,/* complain_on_overflow */
1014 	 bfd_elf_generic_reloc,	/* special_function */
1015 	 "R_ARM_LDRS_PC_G1",   	/* name */
1016 	 FALSE,			/* partial_inplace */
1017 	 0xffffffff,		/* src_mask */
1018 	 0xffffffff,		/* dst_mask */
1019 	 TRUE),			/* pcrel_offset */
1020 
1021   HOWTO (R_ARM_LDRS_PC_G2,   	/* type */
1022 	 0,			/* rightshift */
1023 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1024 	 32,			/* bitsize */
1025 	 TRUE,			/* pc_relative */
1026 	 0,			/* bitpos */
1027 	 complain_overflow_dont,/* complain_on_overflow */
1028 	 bfd_elf_generic_reloc,	/* special_function */
1029 	 "R_ARM_LDRS_PC_G2",   	/* name */
1030 	 FALSE,			/* partial_inplace */
1031 	 0xffffffff,		/* src_mask */
1032 	 0xffffffff,		/* dst_mask */
1033 	 TRUE),			/* pcrel_offset */
1034 
1035   HOWTO (R_ARM_LDC_PC_G0,   	/* type */
1036 	 0,			/* rightshift */
1037 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1038 	 32,			/* bitsize */
1039 	 TRUE,			/* pc_relative */
1040 	 0,			/* bitpos */
1041 	 complain_overflow_dont,/* complain_on_overflow */
1042 	 bfd_elf_generic_reloc,	/* special_function */
1043 	 "R_ARM_LDC_PC_G0",   	/* name */
1044 	 FALSE,			/* partial_inplace */
1045 	 0xffffffff,		/* src_mask */
1046 	 0xffffffff,		/* dst_mask */
1047 	 TRUE),			/* pcrel_offset */
1048 
1049   HOWTO (R_ARM_LDC_PC_G1,   	/* type */
1050 	 0,			/* rightshift */
1051 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1052 	 32,			/* bitsize */
1053 	 TRUE,			/* pc_relative */
1054 	 0,			/* bitpos */
1055 	 complain_overflow_dont,/* complain_on_overflow */
1056 	 bfd_elf_generic_reloc,	/* special_function */
1057 	 "R_ARM_LDC_PC_G1",   	/* name */
1058 	 FALSE,			/* partial_inplace */
1059 	 0xffffffff,		/* src_mask */
1060 	 0xffffffff,		/* dst_mask */
1061 	 TRUE),			/* pcrel_offset */
1062 
1063   HOWTO (R_ARM_LDC_PC_G2,   	/* type */
1064 	 0,			/* rightshift */
1065 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1066 	 32,			/* bitsize */
1067 	 TRUE,			/* pc_relative */
1068 	 0,			/* bitpos */
1069 	 complain_overflow_dont,/* complain_on_overflow */
1070 	 bfd_elf_generic_reloc,	/* special_function */
1071 	 "R_ARM_LDC_PC_G2",   	/* name */
1072 	 FALSE,			/* partial_inplace */
1073 	 0xffffffff,		/* src_mask */
1074 	 0xffffffff,		/* dst_mask */
1075 	 TRUE),			/* pcrel_offset */
1076 
1077   HOWTO (R_ARM_ALU_SB_G0_NC,   	/* type */
1078 	 0,			/* rightshift */
1079 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1080 	 32,			/* bitsize */
1081 	 TRUE,			/* pc_relative */
1082 	 0,			/* bitpos */
1083 	 complain_overflow_dont,/* complain_on_overflow */
1084 	 bfd_elf_generic_reloc,	/* special_function */
1085 	 "R_ARM_ALU_SB_G0_NC", 	/* name */
1086 	 FALSE,			/* partial_inplace */
1087 	 0xffffffff,		/* src_mask */
1088 	 0xffffffff,		/* dst_mask */
1089 	 TRUE),			/* pcrel_offset */
1090 
1091   HOWTO (R_ARM_ALU_SB_G0,   	/* type */
1092 	 0,			/* rightshift */
1093 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1094 	 32,			/* bitsize */
1095 	 TRUE,			/* pc_relative */
1096 	 0,			/* bitpos */
1097 	 complain_overflow_dont,/* complain_on_overflow */
1098 	 bfd_elf_generic_reloc,	/* special_function */
1099 	 "R_ARM_ALU_SB_G0", 	/* name */
1100 	 FALSE,			/* partial_inplace */
1101 	 0xffffffff,		/* src_mask */
1102 	 0xffffffff,		/* dst_mask */
1103 	 TRUE),			/* pcrel_offset */
1104 
1105   HOWTO (R_ARM_ALU_SB_G1_NC,   	/* type */
1106 	 0,			/* rightshift */
1107 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1108 	 32,			/* bitsize */
1109 	 TRUE,			/* pc_relative */
1110 	 0,			/* bitpos */
1111 	 complain_overflow_dont,/* complain_on_overflow */
1112 	 bfd_elf_generic_reloc,	/* special_function */
1113 	 "R_ARM_ALU_SB_G1_NC", 	/* name */
1114 	 FALSE,			/* partial_inplace */
1115 	 0xffffffff,		/* src_mask */
1116 	 0xffffffff,		/* dst_mask */
1117 	 TRUE),			/* pcrel_offset */
1118 
1119   HOWTO (R_ARM_ALU_SB_G1,   	/* type */
1120 	 0,			/* rightshift */
1121 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1122 	 32,			/* bitsize */
1123 	 TRUE,			/* pc_relative */
1124 	 0,			/* bitpos */
1125 	 complain_overflow_dont,/* complain_on_overflow */
1126 	 bfd_elf_generic_reloc,	/* special_function */
1127 	 "R_ARM_ALU_SB_G1", 	/* name */
1128 	 FALSE,			/* partial_inplace */
1129 	 0xffffffff,		/* src_mask */
1130 	 0xffffffff,		/* dst_mask */
1131 	 TRUE),			/* pcrel_offset */
1132 
1133   HOWTO (R_ARM_ALU_SB_G2,   	/* type */
1134 	 0,			/* rightshift */
1135 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1136 	 32,			/* bitsize */
1137 	 TRUE,			/* pc_relative */
1138 	 0,			/* bitpos */
1139 	 complain_overflow_dont,/* complain_on_overflow */
1140 	 bfd_elf_generic_reloc,	/* special_function */
1141 	 "R_ARM_ALU_SB_G2", 	/* name */
1142 	 FALSE,			/* partial_inplace */
1143 	 0xffffffff,		/* src_mask */
1144 	 0xffffffff,		/* dst_mask */
1145 	 TRUE),			/* pcrel_offset */
1146 
1147   HOWTO (R_ARM_LDR_SB_G0,   	/* type */
1148 	 0,			/* rightshift */
1149 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1150 	 32,			/* bitsize */
1151 	 TRUE,			/* pc_relative */
1152 	 0,			/* bitpos */
1153 	 complain_overflow_dont,/* complain_on_overflow */
1154 	 bfd_elf_generic_reloc,	/* special_function */
1155 	 "R_ARM_LDR_SB_G0", 	/* name */
1156 	 FALSE,			/* partial_inplace */
1157 	 0xffffffff,		/* src_mask */
1158 	 0xffffffff,		/* dst_mask */
1159 	 TRUE),			/* pcrel_offset */
1160 
1161   HOWTO (R_ARM_LDR_SB_G1,   	/* type */
1162 	 0,			/* rightshift */
1163 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1164 	 32,			/* bitsize */
1165 	 TRUE,			/* pc_relative */
1166 	 0,			/* bitpos */
1167 	 complain_overflow_dont,/* complain_on_overflow */
1168 	 bfd_elf_generic_reloc,	/* special_function */
1169 	 "R_ARM_LDR_SB_G1", 	/* name */
1170 	 FALSE,			/* partial_inplace */
1171 	 0xffffffff,		/* src_mask */
1172 	 0xffffffff,		/* dst_mask */
1173 	 TRUE),			/* pcrel_offset */
1174 
1175   HOWTO (R_ARM_LDR_SB_G2,   	/* type */
1176 	 0,			/* rightshift */
1177 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1178 	 32,			/* bitsize */
1179 	 TRUE,			/* pc_relative */
1180 	 0,			/* bitpos */
1181 	 complain_overflow_dont,/* complain_on_overflow */
1182 	 bfd_elf_generic_reloc,	/* special_function */
1183 	 "R_ARM_LDR_SB_G2", 	/* name */
1184 	 FALSE,			/* partial_inplace */
1185 	 0xffffffff,		/* src_mask */
1186 	 0xffffffff,		/* dst_mask */
1187 	 TRUE),			/* pcrel_offset */
1188 
1189   HOWTO (R_ARM_LDRS_SB_G0,   	/* type */
1190 	 0,			/* rightshift */
1191 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1192 	 32,			/* bitsize */
1193 	 TRUE,			/* pc_relative */
1194 	 0,			/* bitpos */
1195 	 complain_overflow_dont,/* complain_on_overflow */
1196 	 bfd_elf_generic_reloc,	/* special_function */
1197 	 "R_ARM_LDRS_SB_G0", 	/* name */
1198 	 FALSE,			/* partial_inplace */
1199 	 0xffffffff,		/* src_mask */
1200 	 0xffffffff,		/* dst_mask */
1201 	 TRUE),			/* pcrel_offset */
1202 
1203   HOWTO (R_ARM_LDRS_SB_G1,   	/* type */
1204 	 0,			/* rightshift */
1205 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1206 	 32,			/* bitsize */
1207 	 TRUE,			/* pc_relative */
1208 	 0,			/* bitpos */
1209 	 complain_overflow_dont,/* complain_on_overflow */
1210 	 bfd_elf_generic_reloc,	/* special_function */
1211 	 "R_ARM_LDRS_SB_G1", 	/* name */
1212 	 FALSE,			/* partial_inplace */
1213 	 0xffffffff,		/* src_mask */
1214 	 0xffffffff,		/* dst_mask */
1215 	 TRUE),			/* pcrel_offset */
1216 
1217   HOWTO (R_ARM_LDRS_SB_G2,   	/* type */
1218 	 0,			/* rightshift */
1219 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1220 	 32,			/* bitsize */
1221 	 TRUE,			/* pc_relative */
1222 	 0,			/* bitpos */
1223 	 complain_overflow_dont,/* complain_on_overflow */
1224 	 bfd_elf_generic_reloc,	/* special_function */
1225 	 "R_ARM_LDRS_SB_G2", 	/* name */
1226 	 FALSE,			/* partial_inplace */
1227 	 0xffffffff,		/* src_mask */
1228 	 0xffffffff,		/* dst_mask */
1229 	 TRUE),			/* pcrel_offset */
1230 
1231   HOWTO (R_ARM_LDC_SB_G0,   	/* type */
1232 	 0,			/* rightshift */
1233 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1234 	 32,			/* bitsize */
1235 	 TRUE,			/* pc_relative */
1236 	 0,			/* bitpos */
1237 	 complain_overflow_dont,/* complain_on_overflow */
1238 	 bfd_elf_generic_reloc,	/* special_function */
1239 	 "R_ARM_LDC_SB_G0", 	/* name */
1240 	 FALSE,			/* partial_inplace */
1241 	 0xffffffff,		/* src_mask */
1242 	 0xffffffff,		/* dst_mask */
1243 	 TRUE),			/* pcrel_offset */
1244 
1245   HOWTO (R_ARM_LDC_SB_G1,   	/* type */
1246 	 0,			/* rightshift */
1247 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1248 	 32,			/* bitsize */
1249 	 TRUE,			/* pc_relative */
1250 	 0,			/* bitpos */
1251 	 complain_overflow_dont,/* complain_on_overflow */
1252 	 bfd_elf_generic_reloc,	/* special_function */
1253 	 "R_ARM_LDC_SB_G1", 	/* name */
1254 	 FALSE,			/* partial_inplace */
1255 	 0xffffffff,		/* src_mask */
1256 	 0xffffffff,		/* dst_mask */
1257 	 TRUE),			/* pcrel_offset */
1258 
1259   HOWTO (R_ARM_LDC_SB_G2,   	/* type */
1260 	 0,			/* rightshift */
1261 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1262 	 32,			/* bitsize */
1263 	 TRUE,			/* pc_relative */
1264 	 0,			/* bitpos */
1265 	 complain_overflow_dont,/* complain_on_overflow */
1266 	 bfd_elf_generic_reloc,	/* special_function */
1267 	 "R_ARM_LDC_SB_G2", 	/* name */
1268 	 FALSE,			/* partial_inplace */
1269 	 0xffffffff,		/* src_mask */
1270 	 0xffffffff,		/* dst_mask */
1271 	 TRUE),			/* pcrel_offset */
1272 
1273   /* End of group relocations.  */
1274 
1275   HOWTO (R_ARM_MOVW_BREL_NC,	/* type */
1276 	 0,			/* rightshift */
1277 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1278 	 16,			/* bitsize */
1279 	 FALSE,			/* pc_relative */
1280 	 0,			/* bitpos */
1281 	 complain_overflow_dont,/* complain_on_overflow */
1282 	 bfd_elf_generic_reloc,	/* special_function */
1283 	 "R_ARM_MOVW_BREL_NC",	/* name */
1284 	 FALSE,			/* partial_inplace */
1285 	 0x0000ffff,		/* src_mask */
1286 	 0x0000ffff,		/* dst_mask */
1287 	 FALSE),		/* pcrel_offset */
1288 
1289   HOWTO (R_ARM_MOVT_BREL,	/* type */
1290 	 0,			/* rightshift */
1291 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1292 	 16,			/* bitsize */
1293 	 FALSE,			/* pc_relative */
1294 	 0,			/* bitpos */
1295 	 complain_overflow_bitfield,/* complain_on_overflow */
1296 	 bfd_elf_generic_reloc,	/* special_function */
1297 	 "R_ARM_MOVT_BREL",	/* name */
1298 	 FALSE,			/* partial_inplace */
1299 	 0x0000ffff,		/* src_mask */
1300 	 0x0000ffff,		/* dst_mask */
1301 	 FALSE),		/* pcrel_offset */
1302 
1303   HOWTO (R_ARM_MOVW_BREL,	/* type */
1304 	 0,			/* rightshift */
1305 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1306 	 16,			/* bitsize */
1307 	 FALSE,			/* pc_relative */
1308 	 0,			/* bitpos */
1309 	 complain_overflow_dont,/* complain_on_overflow */
1310 	 bfd_elf_generic_reloc,	/* special_function */
1311 	 "R_ARM_MOVW_BREL",	/* name */
1312 	 FALSE,			/* partial_inplace */
1313 	 0x0000ffff,		/* src_mask */
1314 	 0x0000ffff,		/* dst_mask */
1315 	 FALSE),		/* pcrel_offset */
1316 
1317   HOWTO (R_ARM_THM_MOVW_BREL_NC,/* type */
1318 	 0,			/* rightshift */
1319 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1320 	 16,			/* bitsize */
1321 	 FALSE,			/* pc_relative */
1322 	 0,			/* bitpos */
1323 	 complain_overflow_dont,/* complain_on_overflow */
1324 	 bfd_elf_generic_reloc,	/* special_function */
1325 	 "R_ARM_THM_MOVW_BREL_NC",/* name */
1326 	 FALSE,			/* partial_inplace */
1327 	 0x040f70ff,		/* src_mask */
1328 	 0x040f70ff,		/* dst_mask */
1329 	 FALSE),		/* pcrel_offset */
1330 
1331   HOWTO (R_ARM_THM_MOVT_BREL,	/* type */
1332 	 0,			/* rightshift */
1333 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1334 	 16,			/* bitsize */
1335 	 FALSE,			/* pc_relative */
1336 	 0,			/* bitpos */
1337 	 complain_overflow_bitfield,/* complain_on_overflow */
1338 	 bfd_elf_generic_reloc,	/* special_function */
1339 	 "R_ARM_THM_MOVT_BREL",	/* name */
1340 	 FALSE,			/* partial_inplace */
1341 	 0x040f70ff,		/* src_mask */
1342 	 0x040f70ff,		/* dst_mask */
1343 	 FALSE),		/* pcrel_offset */
1344 
1345   HOWTO (R_ARM_THM_MOVW_BREL,	/* type */
1346 	 0,			/* rightshift */
1347 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1348 	 16,			/* bitsize */
1349 	 FALSE,			/* pc_relative */
1350 	 0,			/* bitpos */
1351 	 complain_overflow_dont,/* complain_on_overflow */
1352 	 bfd_elf_generic_reloc,	/* special_function */
1353 	 "R_ARM_THM_MOVW_BREL",	/* name */
1354 	 FALSE,			/* partial_inplace */
1355 	 0x040f70ff,		/* src_mask */
1356 	 0x040f70ff,		/* dst_mask */
1357 	 FALSE),		/* pcrel_offset */
1358 
1359   HOWTO (R_ARM_TLS_GOTDESC,	/* type */
1360 	 0,			/* rightshift */
1361 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1362 	 32,			/* bitsize */
1363 	 FALSE,			/* pc_relative */
1364 	 0,			/* bitpos */
1365 	 complain_overflow_bitfield,/* complain_on_overflow */
1366 	 NULL,			/* special_function */
1367 	 "R_ARM_TLS_GOTDESC",	/* name */
1368 	 TRUE,			/* partial_inplace */
1369 	 0xffffffff,		/* src_mask */
1370 	 0xffffffff,		/* dst_mask */
1371 	 FALSE),		/* pcrel_offset */
1372 
1373   HOWTO (R_ARM_TLS_CALL,	/* type */
1374 	 0,			/* rightshift */
1375 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1376 	 24,			/* bitsize */
1377 	 FALSE,			/* pc_relative */
1378 	 0,			/* bitpos */
1379 	 complain_overflow_dont,/* complain_on_overflow */
1380 	 bfd_elf_generic_reloc,	/* special_function */
1381 	 "R_ARM_TLS_CALL",	/* name */
1382 	 FALSE,			/* partial_inplace */
1383 	 0x00ffffff,		/* src_mask */
1384 	 0x00ffffff,		/* dst_mask */
1385 	 FALSE),		/* pcrel_offset */
1386 
1387   HOWTO (R_ARM_TLS_DESCSEQ,	/* type */
1388 	 0,			/* rightshift */
1389 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1390 	 0,			/* bitsize */
1391 	 FALSE,			/* pc_relative */
1392 	 0,			/* bitpos */
1393 	 complain_overflow_bitfield,/* complain_on_overflow */
1394 	 bfd_elf_generic_reloc,	/* special_function */
1395 	 "R_ARM_TLS_DESCSEQ",	/* name */
1396 	 FALSE,			/* partial_inplace */
1397 	 0x00000000,		/* src_mask */
1398 	 0x00000000,		/* dst_mask */
1399 	 FALSE),		/* pcrel_offset */
1400 
1401   HOWTO (R_ARM_THM_TLS_CALL,	/* type */
1402 	 0,			/* rightshift */
1403 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1404 	 24,			/* bitsize */
1405 	 FALSE,			/* pc_relative */
1406 	 0,			/* bitpos */
1407 	 complain_overflow_dont,/* complain_on_overflow */
1408 	 bfd_elf_generic_reloc,	/* special_function */
1409 	 "R_ARM_THM_TLS_CALL",	/* name */
1410 	 FALSE,			/* partial_inplace */
1411 	 0x07ff07ff,		/* src_mask */
1412 	 0x07ff07ff,		/* dst_mask */
1413 	 FALSE),		/* pcrel_offset */
1414 
1415   HOWTO (R_ARM_PLT32_ABS,	/* type */
1416 	 0,			/* rightshift */
1417 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1418 	 32,			/* bitsize */
1419 	 FALSE,			/* pc_relative */
1420 	 0,			/* bitpos */
1421 	 complain_overflow_dont,/* complain_on_overflow */
1422 	 bfd_elf_generic_reloc,	/* special_function */
1423 	 "R_ARM_PLT32_ABS",	/* name */
1424 	 FALSE,			/* partial_inplace */
1425 	 0xffffffff,		/* src_mask */
1426 	 0xffffffff,		/* dst_mask */
1427 	 FALSE),		/* pcrel_offset */
1428 
1429   HOWTO (R_ARM_GOT_ABS,		/* type */
1430 	 0,			/* rightshift */
1431 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1432 	 32,			/* bitsize */
1433 	 FALSE,			/* pc_relative */
1434 	 0,			/* bitpos */
1435 	 complain_overflow_dont,/* complain_on_overflow */
1436 	 bfd_elf_generic_reloc,	/* special_function */
1437 	 "R_ARM_GOT_ABS",	/* name */
1438 	 FALSE,			/* partial_inplace */
1439 	 0xffffffff,		/* src_mask */
1440 	 0xffffffff,		/* dst_mask */
1441 	 FALSE),			/* pcrel_offset */
1442 
1443   HOWTO (R_ARM_GOT_PREL,	/* type */
1444 	 0,			/* rightshift */
1445 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1446 	 32,			/* bitsize */
1447 	 TRUE,			/* pc_relative */
1448 	 0,			/* bitpos */
1449 	 complain_overflow_dont,	/* complain_on_overflow */
1450 	 bfd_elf_generic_reloc,	/* special_function */
1451 	 "R_ARM_GOT_PREL",	/* name */
1452 	 FALSE,			/* partial_inplace */
1453 	 0xffffffff,		/* src_mask */
1454 	 0xffffffff,		/* dst_mask */
1455 	 TRUE),			/* pcrel_offset */
1456 
1457   HOWTO (R_ARM_GOT_BREL12,	/* type */
1458 	 0,			/* rightshift */
1459 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1460 	 12,			/* bitsize */
1461 	 FALSE,			/* pc_relative */
1462 	 0,			/* bitpos */
1463 	 complain_overflow_bitfield,/* complain_on_overflow */
1464 	 bfd_elf_generic_reloc,	/* special_function */
1465 	 "R_ARM_GOT_BREL12",	/* name */
1466 	 FALSE,			/* partial_inplace */
1467 	 0x00000fff,		/* src_mask */
1468 	 0x00000fff,		/* dst_mask */
1469 	 FALSE),		/* pcrel_offset */
1470 
1471   HOWTO (R_ARM_GOTOFF12,	/* type */
1472 	 0,			/* rightshift */
1473 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1474 	 12,			/* bitsize */
1475 	 FALSE,			/* pc_relative */
1476 	 0,			/* bitpos */
1477 	 complain_overflow_bitfield,/* complain_on_overflow */
1478 	 bfd_elf_generic_reloc,	/* special_function */
1479 	 "R_ARM_GOTOFF12",	/* name */
1480 	 FALSE,			/* partial_inplace */
1481 	 0x00000fff,		/* src_mask */
1482 	 0x00000fff,		/* dst_mask */
1483 	 FALSE),		/* pcrel_offset */
1484 
1485   EMPTY_HOWTO (R_ARM_GOTRELAX),  /* reserved for future GOT-load optimizations */
1486 
1487   /* GNU extension to record C++ vtable member usage */
1488   HOWTO (R_ARM_GNU_VTENTRY,     /* type */
1489 	 0,                     /* rightshift */
1490 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1491 	 0,                     /* bitsize */
1492 	 FALSE,                 /* pc_relative */
1493 	 0,                     /* bitpos */
1494 	 complain_overflow_dont, /* complain_on_overflow */
1495 	 _bfd_elf_rel_vtable_reloc_fn,  /* special_function */
1496 	 "R_ARM_GNU_VTENTRY",   /* name */
1497 	 FALSE,                 /* partial_inplace */
1498 	 0,                     /* src_mask */
1499 	 0,                     /* dst_mask */
1500 	 FALSE),                /* pcrel_offset */
1501 
1502   /* GNU extension to record C++ vtable hierarchy */
1503   HOWTO (R_ARM_GNU_VTINHERIT, /* type */
1504 	 0,                     /* rightshift */
1505 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1506 	 0,                     /* bitsize */
1507 	 FALSE,                 /* pc_relative */
1508 	 0,                     /* bitpos */
1509 	 complain_overflow_dont, /* complain_on_overflow */
1510 	 NULL,                  /* special_function */
1511 	 "R_ARM_GNU_VTINHERIT", /* name */
1512 	 FALSE,                 /* partial_inplace */
1513 	 0,                     /* src_mask */
1514 	 0,                     /* dst_mask */
1515 	 FALSE),                /* pcrel_offset */
1516 
1517   HOWTO (R_ARM_THM_JUMP11,	/* type */
1518 	 1,			/* rightshift */
1519 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1520 	 11,			/* bitsize */
1521 	 TRUE,			/* pc_relative */
1522 	 0,			/* bitpos */
1523 	 complain_overflow_signed,	/* complain_on_overflow */
1524 	 bfd_elf_generic_reloc,	/* special_function */
1525 	 "R_ARM_THM_JUMP11",	/* name */
1526 	 FALSE,			/* partial_inplace */
1527 	 0x000007ff,		/* src_mask */
1528 	 0x000007ff,		/* dst_mask */
1529 	 TRUE),			/* pcrel_offset */
1530 
1531   HOWTO (R_ARM_THM_JUMP8,	/* type */
1532 	 1,			/* rightshift */
1533 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1534 	 8,			/* bitsize */
1535 	 TRUE,			/* pc_relative */
1536 	 0,			/* bitpos */
1537 	 complain_overflow_signed,	/* complain_on_overflow */
1538 	 bfd_elf_generic_reloc,	/* special_function */
1539 	 "R_ARM_THM_JUMP8",	/* name */
1540 	 FALSE,			/* partial_inplace */
1541 	 0x000000ff,		/* src_mask */
1542 	 0x000000ff,		/* dst_mask */
1543 	 TRUE),			/* pcrel_offset */
1544 
1545   /* TLS relocations */
1546   HOWTO (R_ARM_TLS_GD32,	/* type */
1547 	 0,                     /* rightshift */
1548 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1549 	 32,                    /* bitsize */
1550 	 FALSE,                 /* pc_relative */
1551 	 0,                     /* bitpos */
1552 	 complain_overflow_bitfield,/* complain_on_overflow */
1553 	 NULL,			/* special_function */
1554 	 "R_ARM_TLS_GD32",	/* name */
1555 	 TRUE,			/* partial_inplace */
1556 	 0xffffffff,		/* src_mask */
1557 	 0xffffffff,		/* dst_mask */
1558 	 FALSE),                /* pcrel_offset */
1559 
1560   HOWTO (R_ARM_TLS_LDM32,	/* type */
1561 	 0,                     /* rightshift */
1562 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1563 	 32,                    /* bitsize */
1564 	 FALSE,                 /* pc_relative */
1565 	 0,                     /* bitpos */
1566 	 complain_overflow_bitfield,/* complain_on_overflow */
1567 	 bfd_elf_generic_reloc, /* special_function */
1568 	 "R_ARM_TLS_LDM32",	/* name */
1569 	 TRUE,			/* partial_inplace */
1570 	 0xffffffff,		/* src_mask */
1571 	 0xffffffff,		/* dst_mask */
1572 	 FALSE),                /* pcrel_offset */
1573 
1574   HOWTO (R_ARM_TLS_LDO32,	/* type */
1575 	 0,                     /* rightshift */
1576 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1577 	 32,                    /* bitsize */
1578 	 FALSE,                 /* pc_relative */
1579 	 0,                     /* bitpos */
1580 	 complain_overflow_bitfield,/* complain_on_overflow */
1581 	 bfd_elf_generic_reloc, /* special_function */
1582 	 "R_ARM_TLS_LDO32",	/* name */
1583 	 TRUE,			/* partial_inplace */
1584 	 0xffffffff,		/* src_mask */
1585 	 0xffffffff,		/* dst_mask */
1586 	 FALSE),                /* pcrel_offset */
1587 
1588   HOWTO (R_ARM_TLS_IE32,	/* type */
1589 	 0,                     /* rightshift */
1590 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1591 	 32,                    /* bitsize */
1592 	 FALSE,                  /* pc_relative */
1593 	 0,                     /* bitpos */
1594 	 complain_overflow_bitfield,/* complain_on_overflow */
1595 	 NULL,			/* special_function */
1596 	 "R_ARM_TLS_IE32",	/* name */
1597 	 TRUE,			/* partial_inplace */
1598 	 0xffffffff,		/* src_mask */
1599 	 0xffffffff,		/* dst_mask */
1600 	 FALSE),                /* pcrel_offset */
1601 
1602   HOWTO (R_ARM_TLS_LE32,	/* type */
1603 	 0,                     /* rightshift */
1604 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1605 	 32,                    /* bitsize */
1606 	 FALSE,                 /* pc_relative */
1607 	 0,                     /* bitpos */
1608 	 complain_overflow_bitfield,/* complain_on_overflow */
1609 	 NULL, 			/* special_function */
1610 	 "R_ARM_TLS_LE32",	/* name */
1611 	 TRUE,			/* partial_inplace */
1612 	 0xffffffff,		/* src_mask */
1613 	 0xffffffff,		/* dst_mask */
1614 	 FALSE),                /* pcrel_offset */
1615 
1616   HOWTO (R_ARM_TLS_LDO12,	/* type */
1617 	 0,			/* rightshift */
1618 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1619 	 12,			/* bitsize */
1620 	 FALSE,			/* pc_relative */
1621 	 0,			/* bitpos */
1622 	 complain_overflow_bitfield,/* complain_on_overflow */
1623 	 bfd_elf_generic_reloc,	/* special_function */
1624 	 "R_ARM_TLS_LDO12",	/* name */
1625 	 FALSE,			/* partial_inplace */
1626 	 0x00000fff,		/* src_mask */
1627 	 0x00000fff,		/* dst_mask */
1628 	 FALSE),		/* pcrel_offset */
1629 
1630   HOWTO (R_ARM_TLS_LE12,	/* type */
1631 	 0,			/* rightshift */
1632 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1633 	 12,			/* bitsize */
1634 	 FALSE,			/* pc_relative */
1635 	 0,			/* bitpos */
1636 	 complain_overflow_bitfield,/* complain_on_overflow */
1637 	 bfd_elf_generic_reloc,	/* special_function */
1638 	 "R_ARM_TLS_LE12",	/* name */
1639 	 FALSE,			/* partial_inplace */
1640 	 0x00000fff,		/* src_mask */
1641 	 0x00000fff,		/* dst_mask */
1642 	 FALSE),		/* pcrel_offset */
1643 
1644   HOWTO (R_ARM_TLS_IE12GP,	/* type */
1645 	 0,			/* rightshift */
1646 	 2,			/* size (0 = byte, 1 = short, 2 = long) */
1647 	 12,			/* bitsize */
1648 	 FALSE,			/* pc_relative */
1649 	 0,			/* bitpos */
1650 	 complain_overflow_bitfield,/* complain_on_overflow */
1651 	 bfd_elf_generic_reloc,	/* special_function */
1652 	 "R_ARM_TLS_IE12GP",	/* name */
1653 	 FALSE,			/* partial_inplace */
1654 	 0x00000fff,		/* src_mask */
1655 	 0x00000fff,		/* dst_mask */
1656 	 FALSE),		/* pcrel_offset */
1657 
1658   /* 112-127 private relocations.  */
1659   EMPTY_HOWTO (112),
1660   EMPTY_HOWTO (113),
1661   EMPTY_HOWTO (114),
1662   EMPTY_HOWTO (115),
1663   EMPTY_HOWTO (116),
1664   EMPTY_HOWTO (117),
1665   EMPTY_HOWTO (118),
1666   EMPTY_HOWTO (119),
1667   EMPTY_HOWTO (120),
1668   EMPTY_HOWTO (121),
1669   EMPTY_HOWTO (122),
1670   EMPTY_HOWTO (123),
1671   EMPTY_HOWTO (124),
1672   EMPTY_HOWTO (125),
1673   EMPTY_HOWTO (126),
1674   EMPTY_HOWTO (127),
1675 
1676   /* R_ARM_ME_TOO, obsolete.  */
1677   EMPTY_HOWTO (128),
1678 
1679   HOWTO (R_ARM_THM_TLS_DESCSEQ,	/* type */
1680 	 0,			/* rightshift */
1681 	 1,			/* size (0 = byte, 1 = short, 2 = long) */
1682 	 0,			/* bitsize */
1683 	 FALSE,			/* pc_relative */
1684 	 0,			/* bitpos */
1685 	 complain_overflow_bitfield,/* complain_on_overflow */
1686 	 bfd_elf_generic_reloc,	/* special_function */
1687 	 "R_ARM_THM_TLS_DESCSEQ",/* name */
1688 	 FALSE,			/* partial_inplace */
1689 	 0x00000000,		/* src_mask */
1690 	 0x00000000,		/* dst_mask */
1691 	 FALSE),		/* pcrel_offset */
1692   EMPTY_HOWTO (130),
1693   EMPTY_HOWTO (131),
1694   HOWTO (R_ARM_THM_ALU_ABS_G0_NC,/* type.  */
1695 	 0,			/* rightshift.  */
1696 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1697 	 16,			/* bitsize.  */
1698 	 FALSE,			/* pc_relative.  */
1699 	 0,			/* bitpos.  */
1700 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1701 	 bfd_elf_generic_reloc,	/* special_function.  */
1702 	 "R_ARM_THM_ALU_ABS_G0_NC",/* name.  */
1703 	 FALSE,			/* partial_inplace.  */
1704 	 0x00000000,		/* src_mask.  */
1705 	 0x00000000,		/* dst_mask.  */
1706 	 FALSE),		/* pcrel_offset.  */
1707   HOWTO (R_ARM_THM_ALU_ABS_G1_NC,/* type.  */
1708 	 0,			/* rightshift.  */
1709 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1710 	 16,			/* bitsize.  */
1711 	 FALSE,			/* pc_relative.  */
1712 	 0,			/* bitpos.  */
1713 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1714 	 bfd_elf_generic_reloc,	/* special_function.  */
1715 	 "R_ARM_THM_ALU_ABS_G1_NC",/* name.  */
1716 	 FALSE,			/* partial_inplace.  */
1717 	 0x00000000,		/* src_mask.  */
1718 	 0x00000000,		/* dst_mask.  */
1719 	 FALSE),		/* pcrel_offset.  */
1720   HOWTO (R_ARM_THM_ALU_ABS_G2_NC,/* type.  */
1721 	 0,			/* rightshift.  */
1722 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1723 	 16,			/* bitsize.  */
1724 	 FALSE,			/* pc_relative.  */
1725 	 0,			/* bitpos.  */
1726 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1727 	 bfd_elf_generic_reloc,	/* special_function.  */
1728 	 "R_ARM_THM_ALU_ABS_G2_NC",/* name.  */
1729 	 FALSE,			/* partial_inplace.  */
1730 	 0x00000000,		/* src_mask.  */
1731 	 0x00000000,		/* dst_mask.  */
1732 	 FALSE),		/* pcrel_offset.  */
1733   HOWTO (R_ARM_THM_ALU_ABS_G3_NC,/* type.  */
1734 	 0,			/* rightshift.  */
1735 	 1,			/* size (0 = byte, 1 = short, 2 = long).  */
1736 	 16,			/* bitsize.  */
1737 	 FALSE,			/* pc_relative.  */
1738 	 0,			/* bitpos.  */
1739 	 complain_overflow_bitfield,/* complain_on_overflow.  */
1740 	 bfd_elf_generic_reloc,	/* special_function.  */
1741 	 "R_ARM_THM_ALU_ABS_G3_NC",/* name.  */
1742 	 FALSE,			/* partial_inplace.  */
1743 	 0x00000000,		/* src_mask.  */
1744 	 0x00000000,		/* dst_mask.  */
1745 	 FALSE),		/* pcrel_offset.  */
1746 };
1747 
1748 /* 160 onwards: */
1749 static reloc_howto_type elf32_arm_howto_table_2[1] =
1750 {
1751   HOWTO (R_ARM_IRELATIVE,	/* type */
1752 	 0,                     /* rightshift */
1753 	 2,                     /* size (0 = byte, 1 = short, 2 = long) */
1754 	 32,                    /* bitsize */
1755 	 FALSE,                 /* pc_relative */
1756 	 0,                     /* bitpos */
1757 	 complain_overflow_bitfield,/* complain_on_overflow */
1758 	 bfd_elf_generic_reloc, /* special_function */
1759 	 "R_ARM_IRELATIVE",	/* name */
1760 	 TRUE,			/* partial_inplace */
1761 	 0xffffffff,		/* src_mask */
1762 	 0xffffffff,		/* dst_mask */
1763 	 FALSE)			/* pcrel_offset */
1764 };
1765 
1766 /* 249-255 extended, currently unused, relocations:  */
1767 static reloc_howto_type elf32_arm_howto_table_3[4] =
1768 {
1769   HOWTO (R_ARM_RREL32,		/* type */
1770 	 0,			/* rightshift */
1771 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1772 	 0,			/* bitsize */
1773 	 FALSE,			/* pc_relative */
1774 	 0,			/* bitpos */
1775 	 complain_overflow_dont,/* complain_on_overflow */
1776 	 bfd_elf_generic_reloc,	/* special_function */
1777 	 "R_ARM_RREL32",	/* name */
1778 	 FALSE,			/* partial_inplace */
1779 	 0,			/* src_mask */
1780 	 0,			/* dst_mask */
1781 	 FALSE),		/* pcrel_offset */
1782 
1783   HOWTO (R_ARM_RABS32,		/* type */
1784 	 0,			/* rightshift */
1785 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1786 	 0,			/* bitsize */
1787 	 FALSE,			/* pc_relative */
1788 	 0,			/* bitpos */
1789 	 complain_overflow_dont,/* complain_on_overflow */
1790 	 bfd_elf_generic_reloc,	/* special_function */
1791 	 "R_ARM_RABS32",	/* name */
1792 	 FALSE,			/* partial_inplace */
1793 	 0,			/* src_mask */
1794 	 0,			/* dst_mask */
1795 	 FALSE),		/* pcrel_offset */
1796 
1797   HOWTO (R_ARM_RPC24,		/* type */
1798 	 0,			/* rightshift */
1799 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1800 	 0,			/* bitsize */
1801 	 FALSE,			/* pc_relative */
1802 	 0,			/* bitpos */
1803 	 complain_overflow_dont,/* complain_on_overflow */
1804 	 bfd_elf_generic_reloc,	/* special_function */
1805 	 "R_ARM_RPC24",		/* name */
1806 	 FALSE,			/* partial_inplace */
1807 	 0,			/* src_mask */
1808 	 0,			/* dst_mask */
1809 	 FALSE),		/* pcrel_offset */
1810 
1811   HOWTO (R_ARM_RBASE,		/* type */
1812 	 0,			/* rightshift */
1813 	 0,			/* size (0 = byte, 1 = short, 2 = long) */
1814 	 0,			/* bitsize */
1815 	 FALSE,			/* pc_relative */
1816 	 0,			/* bitpos */
1817 	 complain_overflow_dont,/* complain_on_overflow */
1818 	 bfd_elf_generic_reloc,	/* special_function */
1819 	 "R_ARM_RBASE",		/* name */
1820 	 FALSE,			/* partial_inplace */
1821 	 0,			/* src_mask */
1822 	 0,			/* dst_mask */
1823 	 FALSE)			/* pcrel_offset */
1824 };
1825 
1826 static reloc_howto_type *
1827 elf32_arm_howto_from_type (unsigned int r_type)
1828 {
1829   if (r_type < ARRAY_SIZE (elf32_arm_howto_table_1))
1830     return &elf32_arm_howto_table_1[r_type];
1831 
1832   if (r_type == R_ARM_IRELATIVE)
1833     return &elf32_arm_howto_table_2[r_type - R_ARM_IRELATIVE];
1834 
1835   if (r_type >= R_ARM_RREL32
1836       && r_type < R_ARM_RREL32 + ARRAY_SIZE (elf32_arm_howto_table_3))
1837     return &elf32_arm_howto_table_3[r_type - R_ARM_RREL32];
1838 
1839   return NULL;
1840 }
1841 
1842 static void
1843 elf32_arm_info_to_howto (bfd * abfd ATTRIBUTE_UNUSED, arelent * bfd_reloc,
1844 			 Elf_Internal_Rela * elf_reloc)
1845 {
1846   unsigned int r_type;
1847 
1848   r_type = ELF32_R_TYPE (elf_reloc->r_info);
1849   bfd_reloc->howto = elf32_arm_howto_from_type (r_type);
1850 }
1851 
1852 struct elf32_arm_reloc_map
1853   {
1854     bfd_reloc_code_real_type  bfd_reloc_val;
1855     unsigned char             elf_reloc_val;
1856   };
1857 
1858 /* All entries in this list must also be present in elf32_arm_howto_table.  */
1859 static const struct elf32_arm_reloc_map elf32_arm_reloc_map[] =
1860   {
1861     {BFD_RELOC_NONE,                 R_ARM_NONE},
1862     {BFD_RELOC_ARM_PCREL_BRANCH,     R_ARM_PC24},
1863     {BFD_RELOC_ARM_PCREL_CALL,	     R_ARM_CALL},
1864     {BFD_RELOC_ARM_PCREL_JUMP,	     R_ARM_JUMP24},
1865     {BFD_RELOC_ARM_PCREL_BLX,        R_ARM_XPC25},
1866     {BFD_RELOC_THUMB_PCREL_BLX,      R_ARM_THM_XPC22},
1867     {BFD_RELOC_32,                   R_ARM_ABS32},
1868     {BFD_RELOC_32_PCREL,             R_ARM_REL32},
1869     {BFD_RELOC_8,                    R_ARM_ABS8},
1870     {BFD_RELOC_16,                   R_ARM_ABS16},
1871     {BFD_RELOC_ARM_OFFSET_IMM,       R_ARM_ABS12},
1872     {BFD_RELOC_ARM_THUMB_OFFSET,     R_ARM_THM_ABS5},
1873     {BFD_RELOC_THUMB_PCREL_BRANCH25, R_ARM_THM_JUMP24},
1874     {BFD_RELOC_THUMB_PCREL_BRANCH23, R_ARM_THM_CALL},
1875     {BFD_RELOC_THUMB_PCREL_BRANCH12, R_ARM_THM_JUMP11},
1876     {BFD_RELOC_THUMB_PCREL_BRANCH20, R_ARM_THM_JUMP19},
1877     {BFD_RELOC_THUMB_PCREL_BRANCH9,  R_ARM_THM_JUMP8},
1878     {BFD_RELOC_THUMB_PCREL_BRANCH7,  R_ARM_THM_JUMP6},
1879     {BFD_RELOC_ARM_GLOB_DAT,         R_ARM_GLOB_DAT},
1880     {BFD_RELOC_ARM_JUMP_SLOT,        R_ARM_JUMP_SLOT},
1881     {BFD_RELOC_ARM_RELATIVE,         R_ARM_RELATIVE},
1882     {BFD_RELOC_ARM_GOTOFF,           R_ARM_GOTOFF32},
1883     {BFD_RELOC_ARM_GOTPC,            R_ARM_GOTPC},
1884     {BFD_RELOC_ARM_GOT_PREL,         R_ARM_GOT_PREL},
1885     {BFD_RELOC_ARM_GOT32,            R_ARM_GOT32},
1886     {BFD_RELOC_ARM_PLT32,            R_ARM_PLT32},
1887     {BFD_RELOC_ARM_TARGET1,	     R_ARM_TARGET1},
1888     {BFD_RELOC_ARM_ROSEGREL32,	     R_ARM_ROSEGREL32},
1889     {BFD_RELOC_ARM_SBREL32,	     R_ARM_SBREL32},
1890     {BFD_RELOC_ARM_PREL31,	     R_ARM_PREL31},
1891     {BFD_RELOC_ARM_TARGET2,	     R_ARM_TARGET2},
1892     {BFD_RELOC_ARM_PLT32,            R_ARM_PLT32},
1893     {BFD_RELOC_ARM_TLS_GOTDESC,      R_ARM_TLS_GOTDESC},
1894     {BFD_RELOC_ARM_TLS_CALL,         R_ARM_TLS_CALL},
1895     {BFD_RELOC_ARM_THM_TLS_CALL,     R_ARM_THM_TLS_CALL},
1896     {BFD_RELOC_ARM_TLS_DESCSEQ,      R_ARM_TLS_DESCSEQ},
1897     {BFD_RELOC_ARM_THM_TLS_DESCSEQ,  R_ARM_THM_TLS_DESCSEQ},
1898     {BFD_RELOC_ARM_TLS_DESC,         R_ARM_TLS_DESC},
1899     {BFD_RELOC_ARM_TLS_GD32,	     R_ARM_TLS_GD32},
1900     {BFD_RELOC_ARM_TLS_LDO32,	     R_ARM_TLS_LDO32},
1901     {BFD_RELOC_ARM_TLS_LDM32,	     R_ARM_TLS_LDM32},
1902     {BFD_RELOC_ARM_TLS_DTPMOD32,     R_ARM_TLS_DTPMOD32},
1903     {BFD_RELOC_ARM_TLS_DTPOFF32,     R_ARM_TLS_DTPOFF32},
1904     {BFD_RELOC_ARM_TLS_TPOFF32,      R_ARM_TLS_TPOFF32},
1905     {BFD_RELOC_ARM_TLS_IE32,         R_ARM_TLS_IE32},
1906     {BFD_RELOC_ARM_TLS_LE32,         R_ARM_TLS_LE32},
1907     {BFD_RELOC_ARM_IRELATIVE,        R_ARM_IRELATIVE},
1908     {BFD_RELOC_VTABLE_INHERIT,	     R_ARM_GNU_VTINHERIT},
1909     {BFD_RELOC_VTABLE_ENTRY,	     R_ARM_GNU_VTENTRY},
1910     {BFD_RELOC_ARM_MOVW,	     R_ARM_MOVW_ABS_NC},
1911     {BFD_RELOC_ARM_MOVT,	     R_ARM_MOVT_ABS},
1912     {BFD_RELOC_ARM_MOVW_PCREL,	     R_ARM_MOVW_PREL_NC},
1913     {BFD_RELOC_ARM_MOVT_PCREL,	     R_ARM_MOVT_PREL},
1914     {BFD_RELOC_ARM_THUMB_MOVW,	     R_ARM_THM_MOVW_ABS_NC},
1915     {BFD_RELOC_ARM_THUMB_MOVT,	     R_ARM_THM_MOVT_ABS},
1916     {BFD_RELOC_ARM_THUMB_MOVW_PCREL, R_ARM_THM_MOVW_PREL_NC},
1917     {BFD_RELOC_ARM_THUMB_MOVT_PCREL, R_ARM_THM_MOVT_PREL},
1918     {BFD_RELOC_ARM_ALU_PC_G0_NC, R_ARM_ALU_PC_G0_NC},
1919     {BFD_RELOC_ARM_ALU_PC_G0, R_ARM_ALU_PC_G0},
1920     {BFD_RELOC_ARM_ALU_PC_G1_NC, R_ARM_ALU_PC_G1_NC},
1921     {BFD_RELOC_ARM_ALU_PC_G1, R_ARM_ALU_PC_G1},
1922     {BFD_RELOC_ARM_ALU_PC_G2, R_ARM_ALU_PC_G2},
1923     {BFD_RELOC_ARM_LDR_PC_G0, R_ARM_LDR_PC_G0},
1924     {BFD_RELOC_ARM_LDR_PC_G1, R_ARM_LDR_PC_G1},
1925     {BFD_RELOC_ARM_LDR_PC_G2, R_ARM_LDR_PC_G2},
1926     {BFD_RELOC_ARM_LDRS_PC_G0, R_ARM_LDRS_PC_G0},
1927     {BFD_RELOC_ARM_LDRS_PC_G1, R_ARM_LDRS_PC_G1},
1928     {BFD_RELOC_ARM_LDRS_PC_G2, R_ARM_LDRS_PC_G2},
1929     {BFD_RELOC_ARM_LDC_PC_G0, R_ARM_LDC_PC_G0},
1930     {BFD_RELOC_ARM_LDC_PC_G1, R_ARM_LDC_PC_G1},
1931     {BFD_RELOC_ARM_LDC_PC_G2, R_ARM_LDC_PC_G2},
1932     {BFD_RELOC_ARM_ALU_SB_G0_NC, R_ARM_ALU_SB_G0_NC},
1933     {BFD_RELOC_ARM_ALU_SB_G0, R_ARM_ALU_SB_G0},
1934     {BFD_RELOC_ARM_ALU_SB_G1_NC, R_ARM_ALU_SB_G1_NC},
1935     {BFD_RELOC_ARM_ALU_SB_G1, R_ARM_ALU_SB_G1},
1936     {BFD_RELOC_ARM_ALU_SB_G2, R_ARM_ALU_SB_G2},
1937     {BFD_RELOC_ARM_LDR_SB_G0, R_ARM_LDR_SB_G0},
1938     {BFD_RELOC_ARM_LDR_SB_G1, R_ARM_LDR_SB_G1},
1939     {BFD_RELOC_ARM_LDR_SB_G2, R_ARM_LDR_SB_G2},
1940     {BFD_RELOC_ARM_LDRS_SB_G0, R_ARM_LDRS_SB_G0},
1941     {BFD_RELOC_ARM_LDRS_SB_G1, R_ARM_LDRS_SB_G1},
1942     {BFD_RELOC_ARM_LDRS_SB_G2, R_ARM_LDRS_SB_G2},
1943     {BFD_RELOC_ARM_LDC_SB_G0, R_ARM_LDC_SB_G0},
1944     {BFD_RELOC_ARM_LDC_SB_G1, R_ARM_LDC_SB_G1},
1945     {BFD_RELOC_ARM_LDC_SB_G2, R_ARM_LDC_SB_G2},
1946     {BFD_RELOC_ARM_V4BX,	     R_ARM_V4BX},
1947     {BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC, R_ARM_THM_ALU_ABS_G3_NC},
1948     {BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC, R_ARM_THM_ALU_ABS_G2_NC},
1949     {BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC, R_ARM_THM_ALU_ABS_G1_NC},
1950     {BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC, R_ARM_THM_ALU_ABS_G0_NC}
1951   };
1952 
1953 static reloc_howto_type *
1954 elf32_arm_reloc_type_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1955 			     bfd_reloc_code_real_type code)
1956 {
1957   unsigned int i;
1958 
1959   for (i = 0; i < ARRAY_SIZE (elf32_arm_reloc_map); i ++)
1960     if (elf32_arm_reloc_map[i].bfd_reloc_val == code)
1961       return elf32_arm_howto_from_type (elf32_arm_reloc_map[i].elf_reloc_val);
1962 
1963   return NULL;
1964 }
1965 
1966 static reloc_howto_type *
1967 elf32_arm_reloc_name_lookup (bfd *abfd ATTRIBUTE_UNUSED,
1968 			     const char *r_name)
1969 {
1970   unsigned int i;
1971 
1972   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_1); i++)
1973     if (elf32_arm_howto_table_1[i].name != NULL
1974 	&& strcasecmp (elf32_arm_howto_table_1[i].name, r_name) == 0)
1975       return &elf32_arm_howto_table_1[i];
1976 
1977   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_2); i++)
1978     if (elf32_arm_howto_table_2[i].name != NULL
1979 	&& strcasecmp (elf32_arm_howto_table_2[i].name, r_name) == 0)
1980       return &elf32_arm_howto_table_2[i];
1981 
1982   for (i = 0; i < ARRAY_SIZE (elf32_arm_howto_table_3); i++)
1983     if (elf32_arm_howto_table_3[i].name != NULL
1984 	&& strcasecmp (elf32_arm_howto_table_3[i].name, r_name) == 0)
1985       return &elf32_arm_howto_table_3[i];
1986 
1987   return NULL;
1988 }
1989 
1990 /* Support for core dump NOTE sections.  */
1991 
1992 static bfd_boolean
1993 elf32_arm_nabi_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
1994 {
1995   int offset;
1996   size_t size;
1997 
1998   switch (note->descsz)
1999     {
2000       default:
2001 	return FALSE;
2002 
2003       case 148:		/* Linux/ARM 32-bit.  */
2004 	/* pr_cursig */
2005 	elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
2006 
2007 	/* pr_pid */
2008 	elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
2009 
2010 	/* pr_reg */
2011 	offset = 72;
2012 	size = 72;
2013 
2014 	break;
2015     }
2016 
2017   /* Make a ".reg/999" section.  */
2018   return _bfd_elfcore_make_pseudosection (abfd, ".reg",
2019 					  size, note->descpos + offset);
2020 }
2021 
2022 static bfd_boolean
2023 elf32_arm_nabi_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
2024 {
2025   switch (note->descsz)
2026     {
2027       default:
2028 	return FALSE;
2029 
2030       case 124:		/* Linux/ARM elf_prpsinfo.  */
2031 	elf_tdata (abfd)->core->pid
2032 	 = bfd_get_32 (abfd, note->descdata + 12);
2033 	elf_tdata (abfd)->core->program
2034 	 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
2035 	elf_tdata (abfd)->core->command
2036 	 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
2037     }
2038 
2039   /* Note that for some reason, a spurious space is tacked
2040      onto the end of the args in some (at least one anyway)
2041      implementations, so strip it off if it exists.  */
2042   {
2043     char *command = elf_tdata (abfd)->core->command;
2044     int n = strlen (command);
2045 
2046     if (0 < n && command[n - 1] == ' ')
2047       command[n - 1] = '\0';
2048   }
2049 
2050   return TRUE;
2051 }
2052 
2053 static char *
2054 elf32_arm_nabi_write_core_note (bfd *abfd, char *buf, int *bufsiz,
2055 				int note_type, ...)
2056 {
2057   switch (note_type)
2058     {
2059     default:
2060       return NULL;
2061 
2062     case NT_PRPSINFO:
2063       {
2064 	char data[124];
2065 	va_list ap;
2066 
2067 	va_start (ap, note_type);
2068 	memset (data, 0, sizeof (data));
2069 	strncpy (data + 28, va_arg (ap, const char *), 16);
2070 	strncpy (data + 44, va_arg (ap, const char *), 80);
2071 	va_end (ap);
2072 
2073 	return elfcore_write_note (abfd, buf, bufsiz,
2074 				   "CORE", note_type, data, sizeof (data));
2075       }
2076 
2077     case NT_PRSTATUS:
2078       {
2079 	char data[148];
2080 	va_list ap;
2081 	long pid;
2082 	int cursig;
2083 	const void *greg;
2084 
2085 	va_start (ap, note_type);
2086 	memset (data, 0, sizeof (data));
2087 	pid = va_arg (ap, long);
2088 	bfd_put_32 (abfd, pid, data + 24);
2089 	cursig = va_arg (ap, int);
2090 	bfd_put_16 (abfd, cursig, data + 12);
2091 	greg = va_arg (ap, const void *);
2092 	memcpy (data + 72, greg, 72);
2093 	va_end (ap);
2094 
2095 	return elfcore_write_note (abfd, buf, bufsiz,
2096 				   "CORE", note_type, data, sizeof (data));
2097       }
2098     }
2099 }
2100 
2101 #define TARGET_LITTLE_SYM               arm_elf32_le_vec
2102 #define TARGET_LITTLE_NAME              "elf32-littlearm"
2103 #define TARGET_BIG_SYM                  arm_elf32_be_vec
2104 #define TARGET_BIG_NAME                 "elf32-bigarm"
2105 
2106 #define elf_backend_grok_prstatus	elf32_arm_nabi_grok_prstatus
2107 #define elf_backend_grok_psinfo		elf32_arm_nabi_grok_psinfo
2108 #define elf_backend_write_core_note	elf32_arm_nabi_write_core_note
2109 
2110 typedef unsigned long int insn32;
2111 typedef unsigned short int insn16;
2112 
2113 /* In lieu of proper flags, assume all EABIv4 or later objects are
2114    interworkable.  */
2115 #define INTERWORK_FLAG(abfd)  \
2116   (EF_ARM_EABI_VERSION (elf_elfheader (abfd)->e_flags) >= EF_ARM_EABI_VER4 \
2117   || (elf_elfheader (abfd)->e_flags & EF_ARM_INTERWORK) \
2118   || ((abfd)->flags & BFD_LINKER_CREATED))
2119 
2120 /* The linker script knows the section names for placement.
2121    The entry_names are used to do simple name mangling on the stubs.
2122    Given a function name, and its type, the stub can be found. The
2123    name can be changed. The only requirement is the %s be present.  */
2124 #define THUMB2ARM_GLUE_SECTION_NAME ".glue_7t"
2125 #define THUMB2ARM_GLUE_ENTRY_NAME   "__%s_from_thumb"
2126 
2127 #define ARM2THUMB_GLUE_SECTION_NAME ".glue_7"
2128 #define ARM2THUMB_GLUE_ENTRY_NAME   "__%s_from_arm"
2129 
2130 #define VFP11_ERRATUM_VENEER_SECTION_NAME ".vfp11_veneer"
2131 #define VFP11_ERRATUM_VENEER_ENTRY_NAME   "__vfp11_veneer_%x"
2132 
2133 #define STM32L4XX_ERRATUM_VENEER_SECTION_NAME ".text.stm32l4xx_veneer"
2134 #define STM32L4XX_ERRATUM_VENEER_ENTRY_NAME   "__stm32l4xx_veneer_%x"
2135 
2136 #define ARM_BX_GLUE_SECTION_NAME ".v4_bx"
2137 #define ARM_BX_GLUE_ENTRY_NAME   "__bx_r%d"
2138 
2139 #define STUB_ENTRY_NAME   "__%s_veneer"
2140 
2141 /* The name of the dynamic interpreter.  This is put in the .interp
2142    section.  */
2143 #define ELF_DYNAMIC_INTERPRETER     "/usr/lib/ld.so.1"
2144 
2145 static const unsigned long tls_trampoline [] =
2146 {
2147   0xe08e0000,		/* add r0, lr, r0 */
2148   0xe5901004,		/* ldr r1, [r0,#4] */
2149   0xe12fff11,		/* bx  r1 */
2150 };
2151 
2152 static const unsigned long dl_tlsdesc_lazy_trampoline [] =
2153 {
2154   0xe52d2004, /*	push    {r2}			*/
2155   0xe59f200c, /*      ldr     r2, [pc, #3f - . - 8]	*/
2156   0xe59f100c, /*      ldr     r1, [pc, #4f - . - 8]	*/
2157   0xe79f2002, /* 1:   ldr     r2, [pc, r2]		*/
2158   0xe081100f, /* 2:   add     r1, pc			*/
2159   0xe12fff12, /*      bx      r2			*/
2160   0x00000014, /* 3:   .word  _GLOBAL_OFFSET_TABLE_ - 1b - 8
2161 				+ dl_tlsdesc_lazy_resolver(GOT)   */
2162   0x00000018, /* 4:   .word  _GLOBAL_OFFSET_TABLE_ - 2b - 8 */
2163 };
2164 
2165 #ifdef FOUR_WORD_PLT
2166 
2167 /* The first entry in a procedure linkage table looks like
2168    this.  It is set up so that any shared library function that is
2169    called before the relocation has been set up calls the dynamic
2170    linker first.  */
2171 static const bfd_vma elf32_arm_plt0_entry [] =
2172 {
2173   0xe52de004,		/* str   lr, [sp, #-4]! */
2174   0xe59fe010,		/* ldr   lr, [pc, #16]  */
2175   0xe08fe00e,		/* add   lr, pc, lr     */
2176   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2177 };
2178 
2179 /* Subsequent entries in a procedure linkage table look like
2180    this.  */
2181 static const bfd_vma elf32_arm_plt_entry [] =
2182 {
2183   0xe28fc600,		/* add   ip, pc, #NN	*/
2184   0xe28cca00,		/* add	 ip, ip, #NN	*/
2185   0xe5bcf000,		/* ldr	 pc, [ip, #NN]! */
2186   0x00000000,		/* unused		*/
2187 };
2188 
2189 #else /* not FOUR_WORD_PLT */
2190 
2191 /* The first entry in a procedure linkage table looks like
2192    this.  It is set up so that any shared library function that is
2193    called before the relocation has been set up calls the dynamic
2194    linker first.  */
2195 static const bfd_vma elf32_arm_plt0_entry [] =
2196 {
2197   0xe52de004,		/* str   lr, [sp, #-4]! */
2198   0xe59fe004,		/* ldr   lr, [pc, #4]   */
2199   0xe08fe00e,		/* add   lr, pc, lr     */
2200   0xe5bef008,		/* ldr   pc, [lr, #8]!  */
2201   0x00000000,		/* &GOT[0] - .          */
2202 };
2203 
2204 /* By default subsequent entries in a procedure linkage table look like
2205    this. Offsets that don't fit into 28 bits will cause link error.  */
2206 static const bfd_vma elf32_arm_plt_entry_short [] =
2207 {
2208   0xe28fc600,		/* add   ip, pc, #0xNN00000 */
2209   0xe28cca00,		/* add	 ip, ip, #0xNN000   */
2210   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!  */
2211 };
2212 
2213 /* When explicitly asked, we'll use this "long" entry format
2214    which can cope with arbitrary displacements.  */
2215 static const bfd_vma elf32_arm_plt_entry_long [] =
2216 {
2217   0xe28fc200,           /* add   ip, pc, #0xN0000000 */
2218   0xe28cc600,		/* add   ip, ip, #0xNN00000  */
2219   0xe28cca00,		/* add	 ip, ip, #0xNN000    */
2220   0xe5bcf000,		/* ldr	 pc, [ip, #0xNNN]!   */
2221 };
2222 
2223 static bfd_boolean elf32_arm_use_long_plt_entry = FALSE;
2224 
2225 #endif /* not FOUR_WORD_PLT */
2226 
2227 /* The first entry in a procedure linkage table looks like this.
2228    It is set up so that any shared library function that is called before the
2229    relocation has been set up calls the dynamic linker first.  */
2230 static const bfd_vma elf32_thumb2_plt0_entry [] =
2231 {
2232   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2233      an instruction maybe encoded to one or two array elements.  */
2234   0xf8dfb500,		/* push    {lr}          */
2235   0x44fee008,		/* ldr.w   lr, [pc, #8]  */
2236 			/* add     lr, pc        */
2237   0xff08f85e,		/* ldr.w   pc, [lr, #8]! */
2238   0x00000000,		/* &GOT[0] - .           */
2239 };
2240 
2241 /* Subsequent entries in a procedure linkage table for thumb only target
2242    look like this.  */
2243 static const bfd_vma elf32_thumb2_plt_entry [] =
2244 {
2245   /* NOTE: As this is a mixture of 16-bit and 32-bit instructions,
2246      an instruction maybe encoded to one or two array elements.  */
2247   0x0c00f240,		/* movw    ip, #0xNNNN    */
2248   0x0c00f2c0,		/* movt    ip, #0xNNNN    */
2249   0xf8dc44fc,           /* add     ip, pc         */
2250   0xbf00f000            /* ldr.w   pc, [ip]       */
2251 			/* nop                    */
2252 };
2253 
2254 /* The format of the first entry in the procedure linkage table
2255    for a VxWorks executable.  */
2256 static const bfd_vma elf32_arm_vxworks_exec_plt0_entry[] =
2257 {
2258   0xe52dc008,	        /* str    ip,[sp,#-8]!			*/
2259   0xe59fc000,   	/* ldr    ip,[pc]			*/
2260   0xe59cf008,   	/* ldr    pc,[ip,#8]			*/
2261   0x00000000,   	/* .long  _GLOBAL_OFFSET_TABLE_		*/
2262 };
2263 
2264 /* The format of subsequent entries in a VxWorks executable.  */
2265 static const bfd_vma elf32_arm_vxworks_exec_plt_entry[] =
2266 {
2267   0xe59fc000,         /* ldr    ip,[pc]			*/
2268   0xe59cf000,         /* ldr    pc,[ip]			*/
2269   0x00000000,         /* .long  @got				*/
2270   0xe59fc000,         /* ldr    ip,[pc]			*/
2271   0xea000000,         /* b      _PLT				*/
2272   0x00000000,         /* .long  @pltindex*sizeof(Elf32_Rela)	*/
2273 };
2274 
2275 /* The format of entries in a VxWorks shared library.  */
2276 static const bfd_vma elf32_arm_vxworks_shared_plt_entry[] =
2277 {
2278   0xe59fc000,         /* ldr    ip,[pc]			*/
2279   0xe79cf009,         /* ldr    pc,[ip,r9]			*/
2280   0x00000000,         /* .long  @got				*/
2281   0xe59fc000,         /* ldr    ip,[pc]			*/
2282   0xe599f008,         /* ldr    pc,[r9,#8]			*/
2283   0x00000000,         /* .long  @pltindex*sizeof(Elf32_Rela)	*/
2284 };
2285 
2286 /* An initial stub used if the PLT entry is referenced from Thumb code.  */
2287 #define PLT_THUMB_STUB_SIZE 4
2288 static const bfd_vma elf32_arm_plt_thumb_stub [] =
2289 {
2290   0x4778,		/* bx pc */
2291   0x46c0		/* nop   */
2292 };
2293 
2294 /* The entries in a PLT when using a DLL-based target with multiple
2295    address spaces.  */
2296 static const bfd_vma elf32_arm_symbian_plt_entry [] =
2297 {
2298   0xe51ff004,         /* ldr   pc, [pc, #-4] */
2299   0x00000000,         /* dcd   R_ARM_GLOB_DAT(X) */
2300 };
2301 
2302 /* The first entry in a procedure linkage table looks like
2303    this.  It is set up so that any shared library function that is
2304    called before the relocation has been set up calls the dynamic
2305    linker first.  */
2306 static const bfd_vma elf32_arm_nacl_plt0_entry [] =
2307 {
2308   /* First bundle: */
2309   0xe300c000,		/* movw	ip, #:lower16:&GOT[2]-.+8	*/
2310   0xe340c000,		/* movt	ip, #:upper16:&GOT[2]-.+8	*/
2311   0xe08cc00f,		/* add	ip, ip, pc			*/
2312   0xe52dc008,		/* str	ip, [sp, #-8]!			*/
2313   /* Second bundle: */
2314   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2315   0xe59cc000,		/* ldr	ip, [ip]			*/
2316   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2317   0xe12fff1c,		/* bx	ip				*/
2318   /* Third bundle: */
2319   0xe320f000,		/* nop					*/
2320   0xe320f000,		/* nop					*/
2321   0xe320f000,		/* nop					*/
2322   /* .Lplt_tail: */
2323   0xe50dc004,		/* str	ip, [sp, #-4]			*/
2324   /* Fourth bundle: */
2325   0xe3ccc103,		/* bic	ip, ip, #0xc0000000		*/
2326   0xe59cc000,		/* ldr	ip, [ip]			*/
2327   0xe3ccc13f,		/* bic	ip, ip, #0xc000000f		*/
2328   0xe12fff1c,		/* bx	ip				*/
2329 };
2330 #define ARM_NACL_PLT_TAIL_OFFSET	(11 * 4)
2331 
2332 /* Subsequent entries in a procedure linkage table look like this.  */
2333 static const bfd_vma elf32_arm_nacl_plt_entry [] =
2334 {
2335   0xe300c000,		/* movw	ip, #:lower16:&GOT[n]-.+8	*/
2336   0xe340c000,		/* movt	ip, #:upper16:&GOT[n]-.+8	*/
2337   0xe08cc00f,		/* add	ip, ip, pc			*/
2338   0xea000000,		/* b	.Lplt_tail			*/
2339 };
2340 
2341 #define ARM_MAX_FWD_BRANCH_OFFSET  ((((1 << 23) - 1) << 2) + 8)
2342 #define ARM_MAX_BWD_BRANCH_OFFSET  ((-((1 << 23) << 2)) + 8)
2343 #define THM_MAX_FWD_BRANCH_OFFSET  ((1 << 22) -2 + 4)
2344 #define THM_MAX_BWD_BRANCH_OFFSET  (-(1 << 22) + 4)
2345 #define THM2_MAX_FWD_BRANCH_OFFSET (((1 << 24) - 2) + 4)
2346 #define THM2_MAX_BWD_BRANCH_OFFSET (-(1 << 24) + 4)
2347 #define THM2_MAX_FWD_COND_BRANCH_OFFSET (((1 << 20) -2) + 4)
2348 #define THM2_MAX_BWD_COND_BRANCH_OFFSET (-(1 << 20) + 4)
2349 
2350 enum stub_insn_type
2351 {
2352   THUMB16_TYPE = 1,
2353   THUMB32_TYPE,
2354   ARM_TYPE,
2355   DATA_TYPE
2356 };
2357 
2358 #define THUMB16_INSN(X)		{(X), THUMB16_TYPE, R_ARM_NONE, 0}
2359 /* A bit of a hack.  A Thumb conditional branch, in which the proper condition
2360    is inserted in arm_build_one_stub().  */
2361 #define THUMB16_BCOND_INSN(X)	{(X), THUMB16_TYPE, R_ARM_NONE, 1}
2362 #define THUMB32_INSN(X)		{(X), THUMB32_TYPE, R_ARM_NONE, 0}
2363 #define THUMB32_B_INSN(X, Z)	{(X), THUMB32_TYPE, R_ARM_THM_JUMP24, (Z)}
2364 #define ARM_INSN(X)		{(X), ARM_TYPE, R_ARM_NONE, 0}
2365 #define ARM_REL_INSN(X, Z)	{(X), ARM_TYPE, R_ARM_JUMP24, (Z)}
2366 #define DATA_WORD(X,Y,Z)	{(X), DATA_TYPE, (Y), (Z)}
2367 
2368 typedef struct
2369 {
2370   bfd_vma              data;
2371   enum stub_insn_type  type;
2372   unsigned int         r_type;
2373   int                  reloc_addend;
2374 }  insn_sequence;
2375 
2376 /* Arm/Thumb -> Arm/Thumb long branch stub. On V5T and above, use blx
2377    to reach the stub if necessary.  */
2378 static const insn_sequence elf32_arm_stub_long_branch_any_any[] =
2379 {
2380   ARM_INSN (0xe51ff004),            /* ldr   pc, [pc, #-4] */
2381   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2382 };
2383 
2384 /* V4T Arm -> Thumb long branch stub. Used on V4T where blx is not
2385    available.  */
2386 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb[] =
2387 {
2388   ARM_INSN (0xe59fc000),            /* ldr   ip, [pc, #0] */
2389   ARM_INSN (0xe12fff1c),            /* bx    ip */
2390   DATA_WORD (0, R_ARM_ABS32, 0),    /* dcd   R_ARM_ABS32(X) */
2391 };
2392 
2393 /* Thumb -> Thumb long branch stub. Used on M-profile architectures.  */
2394 static const insn_sequence elf32_arm_stub_long_branch_thumb_only[] =
2395 {
2396   THUMB16_INSN (0xb401),             /* push {r0} */
2397   THUMB16_INSN (0x4802),             /* ldr  r0, [pc, #8] */
2398   THUMB16_INSN (0x4684),             /* mov  ip, r0 */
2399   THUMB16_INSN (0xbc01),             /* pop  {r0} */
2400   THUMB16_INSN (0x4760),             /* bx   ip */
2401   THUMB16_INSN (0xbf00),             /* nop */
2402   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2403 };
2404 
2405 /* Thumb -> Thumb long branch stub in thumb2 encoding.  Used on armv7.  */
2406 static const insn_sequence elf32_arm_stub_long_branch_thumb2_only[] =
2407 {
2408   THUMB32_INSN (0xf85ff000),         /* ldr.w  pc, [pc, #-0] */
2409   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(x) */
2410 };
2411 
2412 /* V4T Thumb -> Thumb long branch stub. Using the stack is not
2413    allowed.  */
2414 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb[] =
2415 {
2416   THUMB16_INSN (0x4778),             /* bx   pc */
2417   THUMB16_INSN (0x46c0),             /* nop */
2418   ARM_INSN (0xe59fc000),             /* ldr  ip, [pc, #0] */
2419   ARM_INSN (0xe12fff1c),             /* bx   ip */
2420   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd  R_ARM_ABS32(X) */
2421 };
2422 
2423 /* V4T Thumb -> ARM long branch stub. Used on V4T where blx is not
2424    available.  */
2425 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm[] =
2426 {
2427   THUMB16_INSN (0x4778),             /* bx   pc */
2428   THUMB16_INSN (0x46c0),             /* nop   */
2429   ARM_INSN (0xe51ff004),             /* ldr   pc, [pc, #-4] */
2430   DATA_WORD (0, R_ARM_ABS32, 0),     /* dcd   R_ARM_ABS32(X) */
2431 };
2432 
2433 /* V4T Thumb -> ARM short branch stub. Shorter variant of the above
2434    one, when the destination is close enough.  */
2435 static const insn_sequence elf32_arm_stub_short_branch_v4t_thumb_arm[] =
2436 {
2437   THUMB16_INSN (0x4778),             /* bx   pc */
2438   THUMB16_INSN (0x46c0),             /* nop   */
2439   ARM_REL_INSN (0xea000000, -8),     /* b    (X-8) */
2440 };
2441 
2442 /* ARM/Thumb -> ARM long branch stub, PIC.  On V5T and above, use
2443    blx to reach the stub if necessary.  */
2444 static const insn_sequence elf32_arm_stub_long_branch_any_arm_pic[] =
2445 {
2446   ARM_INSN (0xe59fc000),             /* ldr   ip, [pc] */
2447   ARM_INSN (0xe08ff00c),             /* add   pc, pc, ip */
2448   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2449 };
2450 
2451 /* ARM/Thumb -> Thumb long branch stub, PIC.  On V5T and above, use
2452    blx to reach the stub if necessary.  We can not add into pc;
2453    it is not guaranteed to mode switch (different in ARMv6 and
2454    ARMv7).  */
2455 static const insn_sequence elf32_arm_stub_long_branch_any_thumb_pic[] =
2456 {
2457   ARM_INSN (0xe59fc004),             /* ldr   ip, [pc, #4] */
2458   ARM_INSN (0xe08fc00c),             /* add   ip, pc, ip */
2459   ARM_INSN (0xe12fff1c),             /* bx    ip */
2460   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2461 };
2462 
2463 /* V4T ARM -> ARM long branch stub, PIC.  */
2464 static const insn_sequence elf32_arm_stub_long_branch_v4t_arm_thumb_pic[] =
2465 {
2466   ARM_INSN (0xe59fc004),             /* ldr   ip, [pc, #4] */
2467   ARM_INSN (0xe08fc00c),             /* add   ip, pc, ip */
2468   ARM_INSN (0xe12fff1c),             /* bx    ip */
2469   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd   R_ARM_REL32(X) */
2470 };
2471 
2472 /* V4T Thumb -> ARM long branch stub, PIC.  */
2473 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_arm_pic[] =
2474 {
2475   THUMB16_INSN (0x4778),             /* bx   pc */
2476   THUMB16_INSN (0x46c0),             /* nop  */
2477   ARM_INSN (0xe59fc000),             /* ldr  ip, [pc, #0] */
2478   ARM_INSN (0xe08cf00f),             /* add  pc, ip, pc */
2479   DATA_WORD (0, R_ARM_REL32, -4),     /* dcd  R_ARM_REL32(X) */
2480 };
2481 
2482 /* Thumb -> Thumb long branch stub, PIC. Used on M-profile
2483    architectures.  */
2484 static const insn_sequence elf32_arm_stub_long_branch_thumb_only_pic[] =
2485 {
2486   THUMB16_INSN (0xb401),             /* push {r0} */
2487   THUMB16_INSN (0x4802),             /* ldr  r0, [pc, #8] */
2488   THUMB16_INSN (0x46fc),             /* mov  ip, pc */
2489   THUMB16_INSN (0x4484),             /* add  ip, r0 */
2490   THUMB16_INSN (0xbc01),             /* pop  {r0} */
2491   THUMB16_INSN (0x4760),             /* bx   ip */
2492   DATA_WORD (0, R_ARM_REL32, 4),     /* dcd  R_ARM_REL32(X) */
2493 };
2494 
2495 /* V4T Thumb -> Thumb long branch stub, PIC. Using the stack is not
2496    allowed.  */
2497 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_thumb_pic[] =
2498 {
2499   THUMB16_INSN (0x4778),             /* bx   pc */
2500   THUMB16_INSN (0x46c0),             /* nop */
2501   ARM_INSN (0xe59fc004),             /* ldr  ip, [pc, #4] */
2502   ARM_INSN (0xe08fc00c),             /* add   ip, pc, ip */
2503   ARM_INSN (0xe12fff1c),             /* bx   ip */
2504   DATA_WORD (0, R_ARM_REL32, 0),     /* dcd  R_ARM_REL32(X) */
2505 };
2506 
2507 /* Thumb2/ARM -> TLS trampoline.  Lowest common denominator, which is a
2508    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2509 static const insn_sequence elf32_arm_stub_long_branch_any_tls_pic[] =
2510 {
2511   ARM_INSN (0xe59f1000),             /* ldr   r1, [pc] */
2512   ARM_INSN (0xe08ff001),             /* add   pc, pc, r1 */
2513   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd   R_ARM_REL32(X-4) */
2514 };
2515 
2516 /* V4T Thumb -> TLS trampoline.  lowest common denominator, which is a
2517    long PIC stub.  We can use r1 as a scratch -- and cannot use ip.  */
2518 static const insn_sequence elf32_arm_stub_long_branch_v4t_thumb_tls_pic[] =
2519 {
2520   THUMB16_INSN (0x4778),             /* bx   pc */
2521   THUMB16_INSN (0x46c0),             /* nop */
2522   ARM_INSN (0xe59f1000),             /* ldr  r1, [pc, #0] */
2523   ARM_INSN (0xe081f00f),             /* add  pc, r1, pc */
2524   DATA_WORD (0, R_ARM_REL32, -4),    /* dcd  R_ARM_REL32(X) */
2525 };
2526 
2527 /* NaCl ARM -> ARM long branch stub.  */
2528 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl[] =
2529 {
2530   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2531   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2532   ARM_INSN (0xe12fff1c),                /* bx	ip */
2533   ARM_INSN (0xe320f000),                /* nop */
2534   ARM_INSN (0xe125be70),                /* bkpt	0x5be0 */
2535   DATA_WORD (0, R_ARM_ABS32, 0),        /* dcd	R_ARM_ABS32(X) */
2536   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2537   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2538 };
2539 
2540 /* NaCl ARM -> ARM long branch stub, PIC.  */
2541 static const insn_sequence elf32_arm_stub_long_branch_arm_nacl_pic[] =
2542 {
2543   ARM_INSN (0xe59fc00c),		/* ldr	ip, [pc, #12] */
2544   ARM_INSN (0xe08cc00f),                /* add	ip, ip, pc */
2545   ARM_INSN (0xe3ccc13f),		/* bic	ip, ip, #0xc000000f */
2546   ARM_INSN (0xe12fff1c),                /* bx	ip */
2547   ARM_INSN (0xe125be70),                /* bkpt	0x5be0 */
2548   DATA_WORD (0, R_ARM_REL32, 8),        /* dcd	R_ARM_REL32(X+8) */
2549   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2550   DATA_WORD (0, R_ARM_NONE, 0),         /* .word 0 */
2551 };
2552 
2553 
2554 /* Cortex-A8 erratum-workaround stubs.  */
2555 
2556 /* Stub used for conditional branches (which may be beyond +/-1MB away, so we
2557    can't use a conditional branch to reach this stub).  */
2558 
2559 static const insn_sequence elf32_arm_stub_a8_veneer_b_cond[] =
2560 {
2561   THUMB16_BCOND_INSN (0xd001),         /* b<cond>.n true.  */
2562   THUMB32_B_INSN (0xf000b800, -4),     /* b.w insn_after_original_branch.  */
2563   THUMB32_B_INSN (0xf000b800, -4)      /* true: b.w original_branch_dest.  */
2564 };
2565 
2566 /* Stub used for b.w and bl.w instructions.  */
2567 
2568 static const insn_sequence elf32_arm_stub_a8_veneer_b[] =
2569 {
2570   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2571 };
2572 
2573 static const insn_sequence elf32_arm_stub_a8_veneer_bl[] =
2574 {
2575   THUMB32_B_INSN (0xf000b800, -4)	/* b.w original_branch_dest.  */
2576 };
2577 
2578 /* Stub used for Thumb-2 blx.w instructions.  We modified the original blx.w
2579    instruction (which switches to ARM mode) to point to this stub.  Jump to the
2580    real destination using an ARM-mode branch.  */
2581 
2582 static const insn_sequence elf32_arm_stub_a8_veneer_blx[] =
2583 {
2584   ARM_REL_INSN (0xea000000, -8)	/* b original_branch_dest.  */
2585 };
2586 
2587 /* For each section group there can be a specially created linker section
2588    to hold the stubs for that group.  The name of the stub section is based
2589    upon the name of another section within that group with the suffix below
2590    applied.
2591 
2592    PR 13049: STUB_SUFFIX used to be ".stub", but this allowed the user to
2593    create what appeared to be a linker stub section when it actually
2594    contained user code/data.  For example, consider this fragment:
2595 
2596      const char * stubborn_problems[] = { "np" };
2597 
2598    If this is compiled with "-fPIC -fdata-sections" then gcc produces a
2599    section called:
2600 
2601      .data.rel.local.stubborn_problems
2602 
2603    This then causes problems in arm32_arm_build_stubs() as it triggers:
2604 
2605       // Ignore non-stub sections.
2606       if (!strstr (stub_sec->name, STUB_SUFFIX))
2607 	continue;
2608 
2609    And so the section would be ignored instead of being processed.  Hence
2610    the change in definition of STUB_SUFFIX to a name that cannot be a valid
2611    C identifier.  */
2612 #define STUB_SUFFIX ".__stub"
2613 
2614 /* One entry per long/short branch stub defined above.  */
2615 #define DEF_STUBS \
2616   DEF_STUB(long_branch_any_any)	\
2617   DEF_STUB(long_branch_v4t_arm_thumb) \
2618   DEF_STUB(long_branch_thumb_only) \
2619   DEF_STUB(long_branch_v4t_thumb_thumb)	\
2620   DEF_STUB(long_branch_v4t_thumb_arm) \
2621   DEF_STUB(short_branch_v4t_thumb_arm) \
2622   DEF_STUB(long_branch_any_arm_pic) \
2623   DEF_STUB(long_branch_any_thumb_pic) \
2624   DEF_STUB(long_branch_v4t_thumb_thumb_pic) \
2625   DEF_STUB(long_branch_v4t_arm_thumb_pic) \
2626   DEF_STUB(long_branch_v4t_thumb_arm_pic) \
2627   DEF_STUB(long_branch_thumb_only_pic) \
2628   DEF_STUB(long_branch_any_tls_pic) \
2629   DEF_STUB(long_branch_v4t_thumb_tls_pic) \
2630   DEF_STUB(long_branch_arm_nacl) \
2631   DEF_STUB(long_branch_arm_nacl_pic) \
2632   DEF_STUB(a8_veneer_b_cond) \
2633   DEF_STUB(a8_veneer_b) \
2634   DEF_STUB(a8_veneer_bl) \
2635   DEF_STUB(a8_veneer_blx) \
2636   DEF_STUB(long_branch_thumb2_only) \
2637 
2638 #define DEF_STUB(x) arm_stub_##x,
2639 enum elf32_arm_stub_type
2640 {
2641   arm_stub_none,
2642   DEF_STUBS
2643   max_stub_type
2644 };
2645 #undef DEF_STUB
2646 
2647 /* Note the first a8_veneer type.  */
2648 const unsigned arm_stub_a8_veneer_lwm = arm_stub_a8_veneer_b_cond;
2649 
2650 typedef struct
2651 {
2652   const insn_sequence* template_sequence;
2653   int template_size;
2654 } stub_def;
2655 
2656 #define DEF_STUB(x) {elf32_arm_stub_##x, ARRAY_SIZE(elf32_arm_stub_##x)},
2657 static const stub_def stub_definitions[] =
2658 {
2659   {NULL, 0},
2660   DEF_STUBS
2661 };
2662 
2663 struct elf32_arm_stub_hash_entry
2664 {
2665   /* Base hash table entry structure.  */
2666   struct bfd_hash_entry root;
2667 
2668   /* The stub section.  */
2669   asection *stub_sec;
2670 
2671   /* Offset within stub_sec of the beginning of this stub.  */
2672   bfd_vma stub_offset;
2673 
2674   /* Given the symbol's value and its section we can determine its final
2675      value when building the stubs (so the stub knows where to jump).  */
2676   bfd_vma target_value;
2677   asection *target_section;
2678 
2679   /* Same as above but for the source of the branch to the stub.  Used for
2680      Cortex-A8 erratum workaround to patch it to branch to the stub.  As
2681      such, source section does not need to be recorded since Cortex-A8 erratum
2682      workaround stubs are only generated when both source and target are in the
2683      same section.  */
2684   bfd_vma source_value;
2685 
2686   /* The instruction which caused this stub to be generated (only valid for
2687      Cortex-A8 erratum workaround stubs at present).  */
2688   unsigned long orig_insn;
2689 
2690   /* The stub type.  */
2691   enum elf32_arm_stub_type stub_type;
2692   /* Its encoding size in bytes.  */
2693   int stub_size;
2694   /* Its template.  */
2695   const insn_sequence *stub_template;
2696   /* The size of the template (number of entries).  */
2697   int stub_template_size;
2698 
2699   /* The symbol table entry, if any, that this was derived from.  */
2700   struct elf32_arm_link_hash_entry *h;
2701 
2702   /* Type of branch.  */
2703   enum arm_st_branch_type branch_type;
2704 
2705   /* Where this stub is being called from, or, in the case of combined
2706      stub sections, the first input section in the group.  */
2707   asection *id_sec;
2708 
2709   /* The name for the local symbol at the start of this stub.  The
2710      stub name in the hash table has to be unique; this does not, so
2711      it can be friendlier.  */
2712   char *output_name;
2713 };
2714 
2715 /* Used to build a map of a section.  This is required for mixed-endian
2716    code/data.  */
2717 
2718 typedef struct elf32_elf_section_map
2719 {
2720   bfd_vma vma;
2721   char type;
2722 }
2723 elf32_arm_section_map;
2724 
2725 /* Information about a VFP11 erratum veneer, or a branch to such a veneer.  */
2726 
2727 typedef enum
2728 {
2729   VFP11_ERRATUM_BRANCH_TO_ARM_VENEER,
2730   VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER,
2731   VFP11_ERRATUM_ARM_VENEER,
2732   VFP11_ERRATUM_THUMB_VENEER
2733 }
2734 elf32_vfp11_erratum_type;
2735 
2736 typedef struct elf32_vfp11_erratum_list
2737 {
2738   struct elf32_vfp11_erratum_list *next;
2739   bfd_vma vma;
2740   union
2741   {
2742     struct
2743     {
2744       struct elf32_vfp11_erratum_list *veneer;
2745       unsigned int vfp_insn;
2746     } b;
2747     struct
2748     {
2749       struct elf32_vfp11_erratum_list *branch;
2750       unsigned int id;
2751     } v;
2752   } u;
2753   elf32_vfp11_erratum_type type;
2754 }
2755 elf32_vfp11_erratum_list;
2756 
2757 /* Information about a STM32L4XX erratum veneer, or a branch to such a
2758    veneer.  */
2759 typedef enum
2760 {
2761   STM32L4XX_ERRATUM_BRANCH_TO_VENEER,
2762   STM32L4XX_ERRATUM_VENEER
2763 }
2764 elf32_stm32l4xx_erratum_type;
2765 
2766 typedef struct elf32_stm32l4xx_erratum_list
2767 {
2768   struct elf32_stm32l4xx_erratum_list *next;
2769   bfd_vma vma;
2770   union
2771   {
2772     struct
2773     {
2774       struct elf32_stm32l4xx_erratum_list *veneer;
2775       unsigned int insn;
2776     } b;
2777     struct
2778     {
2779       struct elf32_stm32l4xx_erratum_list *branch;
2780       unsigned int id;
2781     } v;
2782   } u;
2783   elf32_stm32l4xx_erratum_type type;
2784 }
2785 elf32_stm32l4xx_erratum_list;
2786 
2787 typedef enum
2788 {
2789   DELETE_EXIDX_ENTRY,
2790   INSERT_EXIDX_CANTUNWIND_AT_END
2791 }
2792 arm_unwind_edit_type;
2793 
2794 /* A (sorted) list of edits to apply to an unwind table.  */
2795 typedef struct arm_unwind_table_edit
2796 {
2797   arm_unwind_edit_type type;
2798   /* Note: we sometimes want to insert an unwind entry corresponding to a
2799      section different from the one we're currently writing out, so record the
2800      (text) section this edit relates to here.  */
2801   asection *linked_section;
2802   unsigned int index;
2803   struct arm_unwind_table_edit *next;
2804 }
2805 arm_unwind_table_edit;
2806 
2807 typedef struct _arm_elf_section_data
2808 {
2809   /* Information about mapping symbols.  */
2810   struct bfd_elf_section_data elf;
2811   unsigned int mapcount;
2812   unsigned int mapsize;
2813   elf32_arm_section_map *map;
2814   /* Information about CPU errata.  */
2815   unsigned int erratumcount;
2816   elf32_vfp11_erratum_list *erratumlist;
2817   unsigned int stm32l4xx_erratumcount;
2818   elf32_stm32l4xx_erratum_list *stm32l4xx_erratumlist;
2819   unsigned int additional_reloc_count;
2820   /* Information about unwind tables.  */
2821   union
2822   {
2823     /* Unwind info attached to a text section.  */
2824     struct
2825     {
2826       asection *arm_exidx_sec;
2827     } text;
2828 
2829     /* Unwind info attached to an .ARM.exidx section.  */
2830     struct
2831     {
2832       arm_unwind_table_edit *unwind_edit_list;
2833       arm_unwind_table_edit *unwind_edit_tail;
2834     } exidx;
2835   } u;
2836 }
2837 _arm_elf_section_data;
2838 
2839 #define elf32_arm_section_data(sec) \
2840   ((_arm_elf_section_data *) elf_section_data (sec))
2841 
2842 /* A fix which might be required for Cortex-A8 Thumb-2 branch/TLB erratum.
2843    These fixes are subject to a relaxation procedure (in elf32_arm_size_stubs),
2844    so may be created multiple times: we use an array of these entries whilst
2845    relaxing which we can refresh easily, then create stubs for each potentially
2846    erratum-triggering instruction once we've settled on a solution.  */
2847 
2848 struct a8_erratum_fix
2849 {
2850   bfd *input_bfd;
2851   asection *section;
2852   bfd_vma offset;
2853   bfd_vma target_offset;
2854   unsigned long orig_insn;
2855   char *stub_name;
2856   enum elf32_arm_stub_type stub_type;
2857   enum arm_st_branch_type branch_type;
2858 };
2859 
2860 /* A table of relocs applied to branches which might trigger Cortex-A8
2861    erratum.  */
2862 
2863 struct a8_erratum_reloc
2864 {
2865   bfd_vma from;
2866   bfd_vma destination;
2867   struct elf32_arm_link_hash_entry *hash;
2868   const char *sym_name;
2869   unsigned int r_type;
2870   enum arm_st_branch_type branch_type;
2871   bfd_boolean non_a8_stub;
2872 };
2873 
2874 /* The size of the thread control block.  */
2875 #define TCB_SIZE	8
2876 
2877 /* ARM-specific information about a PLT entry, over and above the usual
2878    gotplt_union.  */
2879 struct arm_plt_info
2880 {
2881   /* We reference count Thumb references to a PLT entry separately,
2882      so that we can emit the Thumb trampoline only if needed.  */
2883   bfd_signed_vma thumb_refcount;
2884 
2885   /* Some references from Thumb code may be eliminated by BL->BLX
2886      conversion, so record them separately.  */
2887   bfd_signed_vma maybe_thumb_refcount;
2888 
2889   /* How many of the recorded PLT accesses were from non-call relocations.
2890      This information is useful when deciding whether anything takes the
2891      address of an STT_GNU_IFUNC PLT.  A value of 0 means that all
2892      non-call references to the function should resolve directly to the
2893      real runtime target.  */
2894   unsigned int noncall_refcount;
2895 
2896   /* Since PLT entries have variable size if the Thumb prologue is
2897      used, we need to record the index into .got.plt instead of
2898      recomputing it from the PLT offset.  */
2899   bfd_signed_vma got_offset;
2900 };
2901 
2902 /* Information about an .iplt entry for a local STT_GNU_IFUNC symbol.  */
2903 struct arm_local_iplt_info
2904 {
2905   /* The information that is usually found in the generic ELF part of
2906      the hash table entry.  */
2907   union gotplt_union root;
2908 
2909   /* The information that is usually found in the ARM-specific part of
2910      the hash table entry.  */
2911   struct arm_plt_info arm;
2912 
2913   /* A list of all potential dynamic relocations against this symbol.  */
2914   struct elf_dyn_relocs *dyn_relocs;
2915 };
2916 
2917 struct elf_arm_obj_tdata
2918 {
2919   struct elf_obj_tdata root;
2920 
2921   /* tls_type for each local got entry.  */
2922   char *local_got_tls_type;
2923 
2924   /* GOTPLT entries for TLS descriptors.  */
2925   bfd_vma *local_tlsdesc_gotent;
2926 
2927   /* Information for local symbols that need entries in .iplt.  */
2928   struct arm_local_iplt_info **local_iplt;
2929 
2930   /* Zero to warn when linking objects with incompatible enum sizes.  */
2931   int no_enum_size_warning;
2932 
2933   /* Zero to warn when linking objects with incompatible wchar_t sizes.  */
2934   int no_wchar_size_warning;
2935 };
2936 
2937 #define elf_arm_tdata(bfd) \
2938   ((struct elf_arm_obj_tdata *) (bfd)->tdata.any)
2939 
2940 #define elf32_arm_local_got_tls_type(bfd) \
2941   (elf_arm_tdata (bfd)->local_got_tls_type)
2942 
2943 #define elf32_arm_local_tlsdesc_gotent(bfd) \
2944   (elf_arm_tdata (bfd)->local_tlsdesc_gotent)
2945 
2946 #define elf32_arm_local_iplt(bfd) \
2947   (elf_arm_tdata (bfd)->local_iplt)
2948 
2949 #define is_arm_elf(bfd) \
2950   (bfd_get_flavour (bfd) == bfd_target_elf_flavour \
2951    && elf_tdata (bfd) != NULL \
2952    && elf_object_id (bfd) == ARM_ELF_DATA)
2953 
2954 static bfd_boolean
2955 elf32_arm_mkobject (bfd *abfd)
2956 {
2957   return bfd_elf_allocate_object (abfd, sizeof (struct elf_arm_obj_tdata),
2958 				  ARM_ELF_DATA);
2959 }
2960 
2961 #define elf32_arm_hash_entry(ent) ((struct elf32_arm_link_hash_entry *)(ent))
2962 
2963 /* Arm ELF linker hash entry.  */
2964 struct elf32_arm_link_hash_entry
2965 {
2966   struct elf_link_hash_entry root;
2967 
2968   /* Track dynamic relocs copied for this symbol.  */
2969   struct elf_dyn_relocs *dyn_relocs;
2970 
2971   /* ARM-specific PLT information.  */
2972   struct arm_plt_info plt;
2973 
2974 #define GOT_UNKNOWN	0
2975 #define GOT_NORMAL	1
2976 #define GOT_TLS_GD	2
2977 #define GOT_TLS_IE	4
2978 #define GOT_TLS_GDESC	8
2979 #define GOT_TLS_GD_ANY_P(type)	((type & GOT_TLS_GD) || (type & GOT_TLS_GDESC))
2980   unsigned int tls_type : 8;
2981 
2982   /* True if the symbol's PLT entry is in .iplt rather than .plt.  */
2983   unsigned int is_iplt : 1;
2984 
2985   unsigned int unused : 23;
2986 
2987   /* Offset of the GOTPLT entry reserved for the TLS descriptor,
2988      starting at the end of the jump table.  */
2989   bfd_vma tlsdesc_got;
2990 
2991   /* The symbol marking the real symbol location for exported thumb
2992      symbols with Arm stubs.  */
2993   struct elf_link_hash_entry *export_glue;
2994 
2995   /* A pointer to the most recently used stub hash entry against this
2996      symbol.  */
2997   struct elf32_arm_stub_hash_entry *stub_cache;
2998 };
2999 
3000 /* Traverse an arm ELF linker hash table.  */
3001 #define elf32_arm_link_hash_traverse(table, func, info)			\
3002   (elf_link_hash_traverse						\
3003    (&(table)->root,							\
3004     (bfd_boolean (*) (struct elf_link_hash_entry *, void *)) (func),	\
3005     (info)))
3006 
3007 /* Get the ARM elf linker hash table from a link_info structure.  */
3008 #define elf32_arm_hash_table(info) \
3009   (elf_hash_table_id ((struct elf_link_hash_table *) ((info)->hash)) \
3010   == ARM_ELF_DATA ? ((struct elf32_arm_link_hash_table *) ((info)->hash)) : NULL)
3011 
3012 #define arm_stub_hash_lookup(table, string, create, copy) \
3013   ((struct elf32_arm_stub_hash_entry *) \
3014    bfd_hash_lookup ((table), (string), (create), (copy)))
3015 
3016 /* Array to keep track of which stub sections have been created, and
3017    information on stub grouping.  */
3018 struct map_stub
3019 {
3020   /* This is the section to which stubs in the group will be
3021      attached.  */
3022   asection *link_sec;
3023   /* The stub section.  */
3024   asection *stub_sec;
3025 };
3026 
3027 #define elf32_arm_compute_jump_table_size(htab) \
3028   ((htab)->next_tls_desc_index * 4)
3029 
3030 /* ARM ELF linker hash table.  */
3031 struct elf32_arm_link_hash_table
3032 {
3033   /* The main hash table.  */
3034   struct elf_link_hash_table root;
3035 
3036   /* The size in bytes of the section containing the Thumb-to-ARM glue.  */
3037   bfd_size_type thumb_glue_size;
3038 
3039   /* The size in bytes of the section containing the ARM-to-Thumb glue.  */
3040   bfd_size_type arm_glue_size;
3041 
3042   /* The size in bytes of section containing the ARMv4 BX veneers.  */
3043   bfd_size_type bx_glue_size;
3044 
3045   /* Offsets of ARMv4 BX veneers.  Bit1 set if present, and Bit0 set when
3046      veneer has been populated.  */
3047   bfd_vma bx_glue_offset[15];
3048 
3049   /* The size in bytes of the section containing glue for VFP11 erratum
3050      veneers.  */
3051   bfd_size_type vfp11_erratum_glue_size;
3052 
3053  /* The size in bytes of the section containing glue for STM32L4XX erratum
3054      veneers.  */
3055   bfd_size_type stm32l4xx_erratum_glue_size;
3056 
3057   /* A table of fix locations for Cortex-A8 Thumb-2 branch/TLB erratum.  This
3058      holds Cortex-A8 erratum fix locations between elf32_arm_size_stubs() and
3059      elf32_arm_write_section().  */
3060   struct a8_erratum_fix *a8_erratum_fixes;
3061   unsigned int num_a8_erratum_fixes;
3062 
3063   /* An arbitrary input BFD chosen to hold the glue sections.  */
3064   bfd * bfd_of_glue_owner;
3065 
3066   /* Nonzero to output a BE8 image.  */
3067   int byteswap_code;
3068 
3069   /* Zero if R_ARM_TARGET1 means R_ARM_ABS32.
3070      Nonzero if R_ARM_TARGET1 means R_ARM_REL32.  */
3071   int target1_is_rel;
3072 
3073   /* The relocation to use for R_ARM_TARGET2 relocations.  */
3074   int target2_reloc;
3075 
3076   /* 0 = Ignore R_ARM_V4BX.
3077      1 = Convert BX to MOV PC.
3078      2 = Generate v4 interworing stubs.  */
3079   int fix_v4bx;
3080 
3081   /* Whether we should fix the Cortex-A8 Thumb-2 branch/TLB erratum.  */
3082   int fix_cortex_a8;
3083 
3084   /* Whether we should fix the ARM1176 BLX immediate issue.  */
3085   int fix_arm1176;
3086 
3087   /* Nonzero if the ARM/Thumb BLX instructions are available for use.  */
3088   int use_blx;
3089 
3090   /* What sort of code sequences we should look for which may trigger the
3091      VFP11 denorm erratum.  */
3092   bfd_arm_vfp11_fix vfp11_fix;
3093 
3094   /* Global counter for the number of fixes we have emitted.  */
3095   int num_vfp11_fixes;
3096 
3097   /* What sort of code sequences we should look for which may trigger the
3098      STM32L4XX erratum.  */
3099   bfd_arm_stm32l4xx_fix stm32l4xx_fix;
3100 
3101   /* Global counter for the number of fixes we have emitted.  */
3102   int num_stm32l4xx_fixes;
3103 
3104   /* Nonzero to force PIC branch veneers.  */
3105   int pic_veneer;
3106 
3107   /* The number of bytes in the initial entry in the PLT.  */
3108   bfd_size_type plt_header_size;
3109 
3110   /* The number of bytes in the subsequent PLT etries.  */
3111   bfd_size_type plt_entry_size;
3112 
3113   /* True if the target system is VxWorks.  */
3114   int vxworks_p;
3115 
3116   /* True if the target system is Symbian OS.  */
3117   int symbian_p;
3118 
3119   /* True if the target system is Native Client.  */
3120   int nacl_p;
3121 
3122   /* True if the target uses REL relocations.  */
3123   int use_rel;
3124 
3125   /* The index of the next unused R_ARM_TLS_DESC slot in .rel.plt.  */
3126   bfd_vma next_tls_desc_index;
3127 
3128   /* How many R_ARM_TLS_DESC relocations were generated so far.  */
3129   bfd_vma num_tls_desc;
3130 
3131   /* Short-cuts to get to dynamic linker sections.  */
3132   asection *sdynbss;
3133   asection *srelbss;
3134 
3135   /* The (unloaded but important) VxWorks .rela.plt.unloaded section.  */
3136   asection *srelplt2;
3137 
3138   /* The offset into splt of the PLT entry for the TLS descriptor
3139      resolver.  Special values are 0, if not necessary (or not found
3140      to be necessary yet), and -1 if needed but not determined
3141      yet.  */
3142   bfd_vma dt_tlsdesc_plt;
3143 
3144   /* The offset into sgot of the GOT entry used by the PLT entry
3145      above.  */
3146   bfd_vma dt_tlsdesc_got;
3147 
3148   /* Offset in .plt section of tls_arm_trampoline.  */
3149   bfd_vma tls_trampoline;
3150 
3151   /* Data for R_ARM_TLS_LDM32 relocations.  */
3152   union
3153   {
3154     bfd_signed_vma refcount;
3155     bfd_vma offset;
3156   } tls_ldm_got;
3157 
3158   /* Small local sym cache.  */
3159   struct sym_cache sym_cache;
3160 
3161   /* For convenience in allocate_dynrelocs.  */
3162   bfd * obfd;
3163 
3164   /* The amount of space used by the reserved portion of the sgotplt
3165      section, plus whatever space is used by the jump slots.  */
3166   bfd_vma sgotplt_jump_table_size;
3167 
3168   /* The stub hash table.  */
3169   struct bfd_hash_table stub_hash_table;
3170 
3171   /* Linker stub bfd.  */
3172   bfd *stub_bfd;
3173 
3174   /* Linker call-backs.  */
3175   asection * (*add_stub_section) (const char *, asection *, asection *,
3176 				  unsigned int);
3177   void (*layout_sections_again) (void);
3178 
3179   /* Array to keep track of which stub sections have been created, and
3180      information on stub grouping.  */
3181   struct map_stub *stub_group;
3182 
3183   /* Number of elements in stub_group.  */
3184   unsigned int top_id;
3185 
3186   /* Assorted information used by elf32_arm_size_stubs.  */
3187   unsigned int bfd_count;
3188   unsigned int top_index;
3189   asection **input_list;
3190 };
3191 
3192 static inline int
3193 ctz (unsigned int mask)
3194 {
3195 #if GCC_VERSION >= 3004
3196   return __builtin_ctz (mask);
3197 #else
3198   unsigned int i;
3199 
3200   for (i = 0; i < 8 * sizeof (mask); i++)
3201     {
3202       if (mask & 0x1)
3203 	break;
3204       mask = (mask >> 1);
3205     }
3206   return i;
3207 #endif
3208 }
3209 
3210 #if !defined (__NetBSD__) || (__NetBSD_Version__ < 600000000)
3211 unsigned int
3212 popcount (unsigned int mask)
3213 {
3214 #if GCC_VERSION >= 3004
3215   return __builtin_popcount (mask);
3216 #else
3217   unsigned int i, sum = 0;
3218 
3219   for (i = 0; i < 8 * sizeof (mask); i++)
3220     {
3221       if (mask & 0x1)
3222 	sum++;
3223       mask = (mask >> 1);
3224     }
3225   return sum;
3226 #endif
3227 }
3228 #endif
3229 
3230 /* Create an entry in an ARM ELF linker hash table.  */
3231 
3232 static struct bfd_hash_entry *
3233 elf32_arm_link_hash_newfunc (struct bfd_hash_entry * entry,
3234 			     struct bfd_hash_table * table,
3235 			     const char * string)
3236 {
3237   struct elf32_arm_link_hash_entry * ret =
3238     (struct elf32_arm_link_hash_entry *) entry;
3239 
3240   /* Allocate the structure if it has not already been allocated by a
3241      subclass.  */
3242   if (ret == NULL)
3243     ret = (struct elf32_arm_link_hash_entry *)
3244 	bfd_hash_allocate (table, sizeof (struct elf32_arm_link_hash_entry));
3245   if (ret == NULL)
3246     return (struct bfd_hash_entry *) ret;
3247 
3248   /* Call the allocation method of the superclass.  */
3249   ret = ((struct elf32_arm_link_hash_entry *)
3250 	 _bfd_elf_link_hash_newfunc ((struct bfd_hash_entry *) ret,
3251 				     table, string));
3252   if (ret != NULL)
3253     {
3254       ret->dyn_relocs = NULL;
3255       ret->tls_type = GOT_UNKNOWN;
3256       ret->tlsdesc_got = (bfd_vma) -1;
3257       ret->plt.thumb_refcount = 0;
3258       ret->plt.maybe_thumb_refcount = 0;
3259       ret->plt.noncall_refcount = 0;
3260       ret->plt.got_offset = -1;
3261       ret->is_iplt = FALSE;
3262       ret->export_glue = NULL;
3263 
3264       ret->stub_cache = NULL;
3265     }
3266 
3267   return (struct bfd_hash_entry *) ret;
3268 }
3269 
3270 /* Ensure that we have allocated bookkeeping structures for ABFD's local
3271    symbols.  */
3272 
3273 static bfd_boolean
3274 elf32_arm_allocate_local_sym_info (bfd *abfd)
3275 {
3276   if (elf_local_got_refcounts (abfd) == NULL)
3277     {
3278       bfd_size_type num_syms;
3279       bfd_size_type size;
3280       char *data;
3281 
3282       num_syms = elf_tdata (abfd)->symtab_hdr.sh_info;
3283       size = num_syms * (sizeof (bfd_signed_vma)
3284 			 + sizeof (struct arm_local_iplt_info *)
3285 			 + sizeof (bfd_vma)
3286 			 + sizeof (char));
3287       data = bfd_zalloc (abfd, size);
3288       if (data == NULL)
3289 	return FALSE;
3290 
3291       elf_local_got_refcounts (abfd) = (bfd_signed_vma *) data;
3292       data += num_syms * sizeof (bfd_signed_vma);
3293 
3294       elf32_arm_local_iplt (abfd) = (struct arm_local_iplt_info **) data;
3295       data += num_syms * sizeof (struct arm_local_iplt_info *);
3296 
3297       elf32_arm_local_tlsdesc_gotent (abfd) = (bfd_vma *) data;
3298       data += num_syms * sizeof (bfd_vma);
3299 
3300       elf32_arm_local_got_tls_type (abfd) = data;
3301     }
3302   return TRUE;
3303 }
3304 
3305 /* Return the .iplt information for local symbol R_SYMNDX, which belongs
3306    to input bfd ABFD.  Create the information if it doesn't already exist.
3307    Return null if an allocation fails.  */
3308 
3309 static struct arm_local_iplt_info *
3310 elf32_arm_create_local_iplt (bfd *abfd, unsigned long r_symndx)
3311 {
3312   struct arm_local_iplt_info **ptr;
3313 
3314   if (!elf32_arm_allocate_local_sym_info (abfd))
3315     return NULL;
3316 
3317   BFD_ASSERT (r_symndx < elf_tdata (abfd)->symtab_hdr.sh_info);
3318   ptr = &elf32_arm_local_iplt (abfd)[r_symndx];
3319   if (*ptr == NULL)
3320     *ptr = bfd_zalloc (abfd, sizeof (**ptr));
3321   return *ptr;
3322 }
3323 
3324 /* Try to obtain PLT information for the symbol with index R_SYMNDX
3325    in ABFD's symbol table.  If the symbol is global, H points to its
3326    hash table entry, otherwise H is null.
3327 
3328    Return true if the symbol does have PLT information.  When returning
3329    true, point *ROOT_PLT at the target-independent reference count/offset
3330    union and *ARM_PLT at the ARM-specific information.  */
3331 
3332 static bfd_boolean
3333 elf32_arm_get_plt_info (bfd *abfd, struct elf32_arm_link_hash_entry *h,
3334 			unsigned long r_symndx, union gotplt_union **root_plt,
3335 			struct arm_plt_info **arm_plt)
3336 {
3337   struct arm_local_iplt_info *local_iplt;
3338 
3339   if (h != NULL)
3340     {
3341       *root_plt = &h->root.plt;
3342       *arm_plt = &h->plt;
3343       return TRUE;
3344     }
3345 
3346   if (elf32_arm_local_iplt (abfd) == NULL)
3347     return FALSE;
3348 
3349   local_iplt = elf32_arm_local_iplt (abfd)[r_symndx];
3350   if (local_iplt == NULL)
3351     return FALSE;
3352 
3353   *root_plt = &local_iplt->root;
3354   *arm_plt = &local_iplt->arm;
3355   return TRUE;
3356 }
3357 
3358 /* Return true if the PLT described by ARM_PLT requires a Thumb stub
3359    before it.  */
3360 
3361 static bfd_boolean
3362 elf32_arm_plt_needs_thumb_stub_p (struct bfd_link_info *info,
3363 				  struct arm_plt_info *arm_plt)
3364 {
3365   struct elf32_arm_link_hash_table *htab;
3366 
3367   htab = elf32_arm_hash_table (info);
3368   return (arm_plt->thumb_refcount != 0
3369 	  || (!htab->use_blx && arm_plt->maybe_thumb_refcount != 0));
3370 }
3371 
3372 /* Return a pointer to the head of the dynamic reloc list that should
3373    be used for local symbol ISYM, which is symbol number R_SYMNDX in
3374    ABFD's symbol table.  Return null if an error occurs.  */
3375 
3376 static struct elf_dyn_relocs **
3377 elf32_arm_get_local_dynreloc_list (bfd *abfd, unsigned long r_symndx,
3378 				   Elf_Internal_Sym *isym)
3379 {
3380   if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
3381     {
3382       struct arm_local_iplt_info *local_iplt;
3383 
3384       local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
3385       if (local_iplt == NULL)
3386 	return NULL;
3387       return &local_iplt->dyn_relocs;
3388     }
3389   else
3390     {
3391       /* Track dynamic relocs needed for local syms too.
3392 	 We really need local syms available to do this
3393 	 easily.  Oh well.  */
3394       asection *s;
3395       void *vpp;
3396 
3397       s = bfd_section_from_elf_index (abfd, isym->st_shndx);
3398       if (s == NULL)
3399 	abort ();
3400 
3401       vpp = &elf_section_data (s)->local_dynrel;
3402       return (struct elf_dyn_relocs **) vpp;
3403     }
3404 }
3405 
3406 /* Initialize an entry in the stub hash table.  */
3407 
3408 static struct bfd_hash_entry *
3409 stub_hash_newfunc (struct bfd_hash_entry *entry,
3410 		   struct bfd_hash_table *table,
3411 		   const char *string)
3412 {
3413   /* Allocate the structure if it has not already been allocated by a
3414      subclass.  */
3415   if (entry == NULL)
3416     {
3417       entry = (struct bfd_hash_entry *)
3418 	  bfd_hash_allocate (table, sizeof (struct elf32_arm_stub_hash_entry));
3419       if (entry == NULL)
3420 	return entry;
3421     }
3422 
3423   /* Call the allocation method of the superclass.  */
3424   entry = bfd_hash_newfunc (entry, table, string);
3425   if (entry != NULL)
3426     {
3427       struct elf32_arm_stub_hash_entry *eh;
3428 
3429       /* Initialize the local fields.  */
3430       eh = (struct elf32_arm_stub_hash_entry *) entry;
3431       eh->stub_sec = NULL;
3432       eh->stub_offset = 0;
3433       eh->source_value = 0;
3434       eh->target_value = 0;
3435       eh->target_section = NULL;
3436       eh->orig_insn = 0;
3437       eh->stub_type = arm_stub_none;
3438       eh->stub_size = 0;
3439       eh->stub_template = NULL;
3440       eh->stub_template_size = 0;
3441       eh->h = NULL;
3442       eh->id_sec = NULL;
3443       eh->output_name = NULL;
3444     }
3445 
3446   return entry;
3447 }
3448 
3449 /* Create .got, .gotplt, and .rel(a).got sections in DYNOBJ, and set up
3450    shortcuts to them in our hash table.  */
3451 
3452 static bfd_boolean
3453 create_got_section (bfd *dynobj, struct bfd_link_info *info)
3454 {
3455   struct elf32_arm_link_hash_table *htab;
3456 
3457   htab = elf32_arm_hash_table (info);
3458   if (htab == NULL)
3459     return FALSE;
3460 
3461   /* BPABI objects never have a GOT, or associated sections.  */
3462   if (htab->symbian_p)
3463     return TRUE;
3464 
3465   if (! _bfd_elf_create_got_section (dynobj, info))
3466     return FALSE;
3467 
3468   return TRUE;
3469 }
3470 
3471 /* Create the .iplt, .rel(a).iplt and .igot.plt sections.  */
3472 
3473 static bfd_boolean
3474 create_ifunc_sections (struct bfd_link_info *info)
3475 {
3476   struct elf32_arm_link_hash_table *htab;
3477   const struct elf_backend_data *bed;
3478   bfd *dynobj;
3479   asection *s;
3480   flagword flags;
3481 
3482   htab = elf32_arm_hash_table (info);
3483   dynobj = htab->root.dynobj;
3484   bed = get_elf_backend_data (dynobj);
3485   flags = bed->dynamic_sec_flags;
3486 
3487   if (htab->root.iplt == NULL)
3488     {
3489       s = bfd_make_section_anyway_with_flags (dynobj, ".iplt",
3490 					      flags | SEC_READONLY | SEC_CODE);
3491       if (s == NULL
3492 	  || !bfd_set_section_alignment (dynobj, s, bed->plt_alignment))
3493 	return FALSE;
3494       htab->root.iplt = s;
3495     }
3496 
3497   if (htab->root.irelplt == NULL)
3498     {
3499       s = bfd_make_section_anyway_with_flags (dynobj,
3500 					      RELOC_SECTION (htab, ".iplt"),
3501 					      flags | SEC_READONLY);
3502       if (s == NULL
3503 	  || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3504 	return FALSE;
3505       htab->root.irelplt = s;
3506     }
3507 
3508   if (htab->root.igotplt == NULL)
3509     {
3510       s = bfd_make_section_anyway_with_flags (dynobj, ".igot.plt", flags);
3511       if (s == NULL
3512 	  || !bfd_set_section_alignment (dynobj, s, bed->s->log_file_align))
3513 	return FALSE;
3514       htab->root.igotplt = s;
3515     }
3516   return TRUE;
3517 }
3518 
3519 /* Determine if we're dealing with a Thumb only architecture.  */
3520 
3521 static bfd_boolean
3522 using_thumb_only (struct elf32_arm_link_hash_table *globals)
3523 {
3524   int arch;
3525   int profile = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3526 					  Tag_CPU_arch_profile);
3527 
3528   if (profile)
3529     return profile == 'M';
3530 
3531   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3532 
3533   /* Force return logic to be reviewed for each new architecture.  */
3534   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3535 	      || arch == TAG_CPU_ARCH_V8M_BASE
3536 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3537 
3538   if (arch == TAG_CPU_ARCH_V6_M
3539       || arch == TAG_CPU_ARCH_V6S_M
3540       || arch == TAG_CPU_ARCH_V7E_M
3541       || arch == TAG_CPU_ARCH_V8M_BASE
3542       || arch == TAG_CPU_ARCH_V8M_MAIN)
3543     return TRUE;
3544 
3545   return FALSE;
3546 }
3547 
3548 /* Determine if we're dealing with a Thumb-2 object.  */
3549 
3550 static bfd_boolean
3551 using_thumb2 (struct elf32_arm_link_hash_table *globals)
3552 {
3553   int arch;
3554   int thumb_isa = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3555 					    Tag_THUMB_ISA_use);
3556 
3557   if (thumb_isa)
3558     return thumb_isa == 2;
3559 
3560   arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC, Tag_CPU_arch);
3561 
3562   /* Force return logic to be reviewed for each new architecture.  */
3563   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3564 	      || arch == TAG_CPU_ARCH_V8M_BASE
3565 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3566 
3567   return (arch == TAG_CPU_ARCH_V6T2
3568 	  || arch == TAG_CPU_ARCH_V7
3569 	  || arch == TAG_CPU_ARCH_V7E_M
3570 	  || arch == TAG_CPU_ARCH_V8
3571 	  || arch == TAG_CPU_ARCH_V8M_MAIN);
3572 }
3573 
3574 /* Create .plt, .rel(a).plt, .got, .got.plt, .rel(a).got, .dynbss, and
3575    .rel(a).bss sections in DYNOBJ, and set up shortcuts to them in our
3576    hash table.  */
3577 
3578 static bfd_boolean
3579 elf32_arm_create_dynamic_sections (bfd *dynobj, struct bfd_link_info *info)
3580 {
3581   struct elf32_arm_link_hash_table *htab;
3582 
3583   htab = elf32_arm_hash_table (info);
3584   if (htab == NULL)
3585     return FALSE;
3586 
3587   if (!htab->root.sgot && !create_got_section (dynobj, info))
3588     return FALSE;
3589 
3590   if (!_bfd_elf_create_dynamic_sections (dynobj, info))
3591     return FALSE;
3592 
3593   htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss");
3594   if (!bfd_link_pic (info))
3595     htab->srelbss = bfd_get_linker_section (dynobj,
3596 					    RELOC_SECTION (htab, ".bss"));
3597 
3598   if (htab->vxworks_p)
3599     {
3600       if (!elf_vxworks_create_dynamic_sections (dynobj, info, &htab->srelplt2))
3601 	return FALSE;
3602 
3603       if (bfd_link_pic (info))
3604 	{
3605 	  htab->plt_header_size = 0;
3606 	  htab->plt_entry_size
3607 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_shared_plt_entry);
3608 	}
3609       else
3610 	{
3611 	  htab->plt_header_size
3612 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt0_entry);
3613 	  htab->plt_entry_size
3614 	    = 4 * ARRAY_SIZE (elf32_arm_vxworks_exec_plt_entry);
3615 	}
3616 
3617       if (elf_elfheader (dynobj))
3618 	elf_elfheader (dynobj)->e_ident[EI_CLASS] = ELFCLASS32;
3619     }
3620   else
3621     {
3622       /* PR ld/16017
3623 	 Test for thumb only architectures.  Note - we cannot just call
3624 	 using_thumb_only() as the attributes in the output bfd have not been
3625 	 initialised at this point, so instead we use the input bfd.  */
3626       bfd * saved_obfd = htab->obfd;
3627 
3628       htab->obfd = dynobj;
3629       if (using_thumb_only (htab))
3630 	{
3631 	  htab->plt_header_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
3632 	  htab->plt_entry_size  = 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
3633 	}
3634       htab->obfd = saved_obfd;
3635     }
3636 
3637   if (!htab->root.splt
3638       || !htab->root.srelplt
3639       || !htab->sdynbss
3640       || (!bfd_link_pic (info) && !htab->srelbss))
3641     abort ();
3642 
3643   return TRUE;
3644 }
3645 
3646 /* Copy the extra info we tack onto an elf_link_hash_entry.  */
3647 
3648 static void
3649 elf32_arm_copy_indirect_symbol (struct bfd_link_info *info,
3650 				struct elf_link_hash_entry *dir,
3651 				struct elf_link_hash_entry *ind)
3652 {
3653   struct elf32_arm_link_hash_entry *edir, *eind;
3654 
3655   edir = (struct elf32_arm_link_hash_entry *) dir;
3656   eind = (struct elf32_arm_link_hash_entry *) ind;
3657 
3658   if (eind->dyn_relocs != NULL)
3659     {
3660       if (edir->dyn_relocs != NULL)
3661 	{
3662 	  struct elf_dyn_relocs **pp;
3663 	  struct elf_dyn_relocs *p;
3664 
3665 	  /* Add reloc counts against the indirect sym to the direct sym
3666 	     list.  Merge any entries against the same section.  */
3667 	  for (pp = &eind->dyn_relocs; (p = *pp) != NULL; )
3668 	    {
3669 	      struct elf_dyn_relocs *q;
3670 
3671 	      for (q = edir->dyn_relocs; q != NULL; q = q->next)
3672 		if (q->sec == p->sec)
3673 		  {
3674 		    q->pc_count += p->pc_count;
3675 		    q->count += p->count;
3676 		    *pp = p->next;
3677 		    break;
3678 		  }
3679 	      if (q == NULL)
3680 		pp = &p->next;
3681 	    }
3682 	  *pp = edir->dyn_relocs;
3683 	}
3684 
3685       edir->dyn_relocs = eind->dyn_relocs;
3686       eind->dyn_relocs = NULL;
3687     }
3688 
3689   if (ind->root.type == bfd_link_hash_indirect)
3690     {
3691       /* Copy over PLT info.  */
3692       edir->plt.thumb_refcount += eind->plt.thumb_refcount;
3693       eind->plt.thumb_refcount = 0;
3694       edir->plt.maybe_thumb_refcount += eind->plt.maybe_thumb_refcount;
3695       eind->plt.maybe_thumb_refcount = 0;
3696       edir->plt.noncall_refcount += eind->plt.noncall_refcount;
3697       eind->plt.noncall_refcount = 0;
3698 
3699       /* We should only allocate a function to .iplt once the final
3700 	 symbol information is known.  */
3701       BFD_ASSERT (!eind->is_iplt);
3702 
3703       if (dir->got.refcount <= 0)
3704 	{
3705 	  edir->tls_type = eind->tls_type;
3706 	  eind->tls_type = GOT_UNKNOWN;
3707 	}
3708     }
3709 
3710   _bfd_elf_link_hash_copy_indirect (info, dir, ind);
3711 }
3712 
3713 /* Destroy an ARM elf linker hash table.  */
3714 
3715 static void
3716 elf32_arm_link_hash_table_free (bfd *obfd)
3717 {
3718   struct elf32_arm_link_hash_table *ret
3719     = (struct elf32_arm_link_hash_table *) obfd->link.hash;
3720 
3721   bfd_hash_table_free (&ret->stub_hash_table);
3722   _bfd_elf_link_hash_table_free (obfd);
3723 }
3724 
3725 /* Create an ARM elf linker hash table.  */
3726 
3727 static struct bfd_link_hash_table *
3728 elf32_arm_link_hash_table_create (bfd *abfd)
3729 {
3730   struct elf32_arm_link_hash_table *ret;
3731   bfd_size_type amt = sizeof (struct elf32_arm_link_hash_table);
3732 
3733   ret = (struct elf32_arm_link_hash_table *) bfd_zmalloc (amt);
3734   if (ret == NULL)
3735     return NULL;
3736 
3737   if (!_bfd_elf_link_hash_table_init (& ret->root, abfd,
3738 				      elf32_arm_link_hash_newfunc,
3739 				      sizeof (struct elf32_arm_link_hash_entry),
3740 				      ARM_ELF_DATA))
3741     {
3742       free (ret);
3743       return NULL;
3744     }
3745 
3746   ret->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
3747   ret->stm32l4xx_fix = BFD_ARM_STM32L4XX_FIX_NONE;
3748 #ifdef FOUR_WORD_PLT
3749   ret->plt_header_size = 16;
3750   ret->plt_entry_size = 16;
3751 #else
3752   ret->plt_header_size = 20;
3753   ret->plt_entry_size = elf32_arm_use_long_plt_entry ? 16 : 12;
3754 #endif
3755   ret->use_rel = 1;
3756   ret->obfd = abfd;
3757 
3758   if (!bfd_hash_table_init (&ret->stub_hash_table, stub_hash_newfunc,
3759 			    sizeof (struct elf32_arm_stub_hash_entry)))
3760     {
3761       _bfd_elf_link_hash_table_free (abfd);
3762       return NULL;
3763     }
3764   ret->root.root.hash_table_free = elf32_arm_link_hash_table_free;
3765 
3766   return &ret->root.root;
3767 }
3768 
3769 /* Determine what kind of NOPs are available.  */
3770 
3771 static bfd_boolean
3772 arch_has_arm_nop (struct elf32_arm_link_hash_table *globals)
3773 {
3774   const int arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
3775 					     Tag_CPU_arch);
3776 
3777   /* Force return logic to be reviewed for each new architecture.  */
3778   BFD_ASSERT (arch <= TAG_CPU_ARCH_V8
3779 	      || arch == TAG_CPU_ARCH_V8M_BASE
3780 	      || arch == TAG_CPU_ARCH_V8M_MAIN);
3781 
3782   return (arch == TAG_CPU_ARCH_V6T2
3783 	  || arch == TAG_CPU_ARCH_V6K
3784 	  || arch == TAG_CPU_ARCH_V7
3785 	  || arch == TAG_CPU_ARCH_V8);
3786 }
3787 
3788 static bfd_boolean
3789 arm_stub_is_thumb (enum elf32_arm_stub_type stub_type)
3790 {
3791   switch (stub_type)
3792     {
3793     case arm_stub_long_branch_thumb_only:
3794     case arm_stub_long_branch_thumb2_only:
3795     case arm_stub_long_branch_v4t_thumb_arm:
3796     case arm_stub_short_branch_v4t_thumb_arm:
3797     case arm_stub_long_branch_v4t_thumb_arm_pic:
3798     case arm_stub_long_branch_v4t_thumb_tls_pic:
3799     case arm_stub_long_branch_thumb_only_pic:
3800       return TRUE;
3801     case arm_stub_none:
3802       BFD_FAIL ();
3803       return FALSE;
3804       break;
3805     default:
3806       return FALSE;
3807     }
3808 }
3809 
3810 /* Determine the type of stub needed, if any, for a call.  */
3811 
3812 static enum elf32_arm_stub_type
3813 arm_type_of_stub (struct bfd_link_info *info,
3814 		  asection *input_sec,
3815 		  const Elf_Internal_Rela *rel,
3816 		  unsigned char st_type,
3817 		  enum arm_st_branch_type *actual_branch_type,
3818 		  struct elf32_arm_link_hash_entry *hash,
3819 		  bfd_vma destination,
3820 		  asection *sym_sec,
3821 		  bfd *input_bfd,
3822 		  const char *name)
3823 {
3824   bfd_vma location;
3825   bfd_signed_vma branch_offset;
3826   unsigned int r_type;
3827   struct elf32_arm_link_hash_table * globals;
3828   int thumb2;
3829   int thumb_only;
3830   enum elf32_arm_stub_type stub_type = arm_stub_none;
3831   int use_plt = 0;
3832   enum arm_st_branch_type branch_type = *actual_branch_type;
3833   union gotplt_union *root_plt;
3834   struct arm_plt_info *arm_plt;
3835 
3836   if (branch_type == ST_BRANCH_LONG)
3837     return stub_type;
3838 
3839   globals = elf32_arm_hash_table (info);
3840   if (globals == NULL)
3841     return stub_type;
3842 
3843   thumb_only = using_thumb_only (globals);
3844 
3845   thumb2 = using_thumb2 (globals);
3846 
3847   /* Determine where the call point is.  */
3848   location = (input_sec->output_offset
3849 	      + input_sec->output_section->vma
3850 	      + rel->r_offset);
3851 
3852   r_type = ELF32_R_TYPE (rel->r_info);
3853 
3854   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
3855      are considering a function call relocation.  */
3856   if (thumb_only && (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3857                      || r_type == R_ARM_THM_JUMP19)
3858       && branch_type == ST_BRANCH_TO_ARM)
3859     branch_type = ST_BRANCH_TO_THUMB;
3860 
3861   /* For TLS call relocs, it is the caller's responsibility to provide
3862      the address of the appropriate trampoline.  */
3863   if (r_type != R_ARM_TLS_CALL
3864       && r_type != R_ARM_THM_TLS_CALL
3865       && elf32_arm_get_plt_info (input_bfd, hash, ELF32_R_SYM (rel->r_info),
3866 				 &root_plt, &arm_plt)
3867       && root_plt->offset != (bfd_vma) -1)
3868     {
3869       asection *splt;
3870 
3871       if (hash == NULL || hash->is_iplt)
3872 	splt = globals->root.iplt;
3873       else
3874 	splt = globals->root.splt;
3875       if (splt != NULL)
3876 	{
3877 	  use_plt = 1;
3878 
3879 	  /* Note when dealing with PLT entries: the main PLT stub is in
3880 	     ARM mode, so if the branch is in Thumb mode, another
3881 	     Thumb->ARM stub will be inserted later just before the ARM
3882 	     PLT stub. We don't take this extra distance into account
3883 	     here, because if a long branch stub is needed, we'll add a
3884 	     Thumb->Arm one and branch directly to the ARM PLT entry
3885 	     because it avoids spreading offset corrections in several
3886 	     places.  */
3887 
3888 	  destination = (splt->output_section->vma
3889 			 + splt->output_offset
3890 			 + root_plt->offset);
3891 	  st_type = STT_FUNC;
3892 	  branch_type = ST_BRANCH_TO_ARM;
3893 	}
3894     }
3895   /* Calls to STT_GNU_IFUNC symbols should go through a PLT.  */
3896   BFD_ASSERT (st_type != STT_GNU_IFUNC);
3897 
3898   branch_offset = (bfd_signed_vma)(destination - location);
3899 
3900   if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24
3901       || r_type == R_ARM_THM_TLS_CALL || r_type == R_ARM_THM_JUMP19)
3902     {
3903       /* Handle cases where:
3904 	 - this call goes too far (different Thumb/Thumb2 max
3905 	   distance)
3906 	 - it's a Thumb->Arm call and blx is not available, or it's a
3907 	   Thumb->Arm branch (not bl). A stub is needed in this case,
3908 	   but only if this call is not through a PLT entry. Indeed,
3909 	   PLT stubs handle mode switching already.
3910       */
3911       if ((!thumb2
3912 	    && (branch_offset > THM_MAX_FWD_BRANCH_OFFSET
3913 		|| (branch_offset < THM_MAX_BWD_BRANCH_OFFSET)))
3914 	  || (thumb2
3915 	      && (branch_offset > THM2_MAX_FWD_BRANCH_OFFSET
3916 		  || (branch_offset < THM2_MAX_BWD_BRANCH_OFFSET)))
3917 	  || (thumb2
3918 	      && (branch_offset > THM2_MAX_FWD_COND_BRANCH_OFFSET
3919 		  || (branch_offset < THM2_MAX_BWD_COND_BRANCH_OFFSET))
3920 	      && (r_type == R_ARM_THM_JUMP19))
3921 	  || (branch_type == ST_BRANCH_TO_ARM
3922 	      && (((r_type == R_ARM_THM_CALL
3923 		    || r_type == R_ARM_THM_TLS_CALL) && !globals->use_blx)
3924 		  || (r_type == R_ARM_THM_JUMP24)
3925                   || (r_type == R_ARM_THM_JUMP19))
3926 	      && !use_plt))
3927 	{
3928 	  if (branch_type == ST_BRANCH_TO_THUMB)
3929 	    {
3930 	      /* Thumb to thumb.  */
3931 	      if (!thumb_only)
3932 		{
3933 		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3934 		    /* PIC stubs.  */
3935 		    ? ((globals->use_blx
3936 			&& (r_type == R_ARM_THM_CALL))
3937 		       /* V5T and above. Stub starts with ARM code, so
3938 			  we must be able to switch mode before
3939 			  reaching it, which is only possible for 'bl'
3940 			  (ie R_ARM_THM_CALL relocation).  */
3941 		       ? arm_stub_long_branch_any_thumb_pic
3942 		       /* On V4T, use Thumb code only.  */
3943 		       : arm_stub_long_branch_v4t_thumb_thumb_pic)
3944 
3945 		    /* non-PIC stubs.  */
3946 		    : ((globals->use_blx
3947 			&& (r_type == R_ARM_THM_CALL))
3948 		       /* V5T and above.  */
3949 		       ? arm_stub_long_branch_any_any
3950 		       /* V4T.  */
3951 		       : arm_stub_long_branch_v4t_thumb_thumb);
3952 		}
3953 	      else
3954 		{
3955 		  stub_type = (bfd_link_pic (info) | globals->pic_veneer)
3956 		    /* PIC stub.  */
3957 		    ? arm_stub_long_branch_thumb_only_pic
3958 		    /* non-PIC stub.  */
3959 		    : (thumb2 ? arm_stub_long_branch_thumb2_only
3960 			      : arm_stub_long_branch_thumb_only);
3961 		}
3962 	    }
3963 	  else
3964 	    {
3965 	      /* Thumb to arm.  */
3966 	      if (sym_sec != NULL
3967 		  && sym_sec->owner != NULL
3968 		  && !INTERWORK_FLAG (sym_sec->owner))
3969 		{
3970 		  (*_bfd_error_handler)
3971 		    (_("%B(%s): warning: interworking not enabled.\n"
3972 		       "  first occurrence: %B: Thumb call to ARM"),
3973 		     sym_sec->owner, input_bfd, name);
3974 		}
3975 
3976 	      stub_type =
3977 		(bfd_link_pic (info) | globals->pic_veneer)
3978 		/* PIC stubs.  */
3979 		? (r_type == R_ARM_THM_TLS_CALL
3980 		   /* TLS PIC stubs.  */
3981 		   ? (globals->use_blx ? arm_stub_long_branch_any_tls_pic
3982 		      : arm_stub_long_branch_v4t_thumb_tls_pic)
3983 		   : ((globals->use_blx && r_type == R_ARM_THM_CALL)
3984 		      /* V5T PIC and above.  */
3985 		      ? arm_stub_long_branch_any_arm_pic
3986 		      /* V4T PIC stub.  */
3987 		      : arm_stub_long_branch_v4t_thumb_arm_pic))
3988 
3989 		/* non-PIC stubs.  */
3990 		: ((globals->use_blx && r_type == R_ARM_THM_CALL)
3991 		   /* V5T and above.  */
3992 		   ? arm_stub_long_branch_any_any
3993 		   /* V4T.  */
3994 		   : arm_stub_long_branch_v4t_thumb_arm);
3995 
3996 	      /* Handle v4t short branches.  */
3997 	      if ((stub_type == arm_stub_long_branch_v4t_thumb_arm)
3998 		  && (branch_offset <= THM_MAX_FWD_BRANCH_OFFSET)
3999 		  && (branch_offset >= THM_MAX_BWD_BRANCH_OFFSET))
4000 		stub_type = arm_stub_short_branch_v4t_thumb_arm;
4001 	    }
4002 	}
4003     }
4004   else if (r_type == R_ARM_CALL
4005 	   || r_type == R_ARM_JUMP24
4006 	   || r_type == R_ARM_PLT32
4007 	   || r_type == R_ARM_TLS_CALL)
4008     {
4009       if (branch_type == ST_BRANCH_TO_THUMB)
4010 	{
4011 	  /* Arm to thumb.  */
4012 
4013 	  if (sym_sec != NULL
4014 	      && sym_sec->owner != NULL
4015 	      && !INTERWORK_FLAG (sym_sec->owner))
4016 	    {
4017 	      (*_bfd_error_handler)
4018 		(_("%B(%s): warning: interworking not enabled.\n"
4019 		   "  first occurrence: %B: ARM call to Thumb"),
4020 		 sym_sec->owner, input_bfd, name);
4021 	    }
4022 
4023 	  /* We have an extra 2-bytes reach because of
4024 	     the mode change (bit 24 (H) of BLX encoding).  */
4025 	  if (branch_offset > (ARM_MAX_FWD_BRANCH_OFFSET + 2)
4026 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET)
4027 	      || (r_type == R_ARM_CALL && !globals->use_blx)
4028 	      || (r_type == R_ARM_JUMP24)
4029 	      || (r_type == R_ARM_PLT32))
4030 	    {
4031 	      stub_type = (bfd_link_pic (info) | globals->pic_veneer)
4032 		/* PIC stubs.  */
4033 		? ((globals->use_blx)
4034 		   /* V5T and above.  */
4035 		   ? arm_stub_long_branch_any_thumb_pic
4036 		   /* V4T stub.  */
4037 		   : arm_stub_long_branch_v4t_arm_thumb_pic)
4038 
4039 		/* non-PIC stubs.  */
4040 		: ((globals->use_blx)
4041 		   /* V5T and above.  */
4042 		   ? arm_stub_long_branch_any_any
4043 		   /* V4T.  */
4044 		   : arm_stub_long_branch_v4t_arm_thumb);
4045 	    }
4046 	}
4047       else
4048 	{
4049 	  /* Arm to arm.  */
4050 	  if (branch_offset > ARM_MAX_FWD_BRANCH_OFFSET
4051 	      || (branch_offset < ARM_MAX_BWD_BRANCH_OFFSET))
4052 	    {
4053 	      stub_type =
4054 		(bfd_link_pic (info) | globals->pic_veneer)
4055 		/* PIC stubs.  */
4056 		? (r_type == R_ARM_TLS_CALL
4057 		   /* TLS PIC Stub.  */
4058 		   ? arm_stub_long_branch_any_tls_pic
4059 		   : (globals->nacl_p
4060 		      ? arm_stub_long_branch_arm_nacl_pic
4061 		      : arm_stub_long_branch_any_arm_pic))
4062 		/* non-PIC stubs.  */
4063 		: (globals->nacl_p
4064 		   ? arm_stub_long_branch_arm_nacl
4065 		   : arm_stub_long_branch_any_any);
4066 	    }
4067 	}
4068     }
4069 
4070   /* If a stub is needed, record the actual destination type.  */
4071   if (stub_type != arm_stub_none)
4072     *actual_branch_type = branch_type;
4073 
4074   return stub_type;
4075 }
4076 
4077 /* Build a name for an entry in the stub hash table.  */
4078 
4079 static char *
4080 elf32_arm_stub_name (const asection *input_section,
4081 		     const asection *sym_sec,
4082 		     const struct elf32_arm_link_hash_entry *hash,
4083 		     const Elf_Internal_Rela *rel,
4084 		     enum elf32_arm_stub_type stub_type)
4085 {
4086   char *stub_name;
4087   bfd_size_type len;
4088 
4089   if (hash)
4090     {
4091       len = 8 + 1 + strlen (hash->root.root.root.string) + 1 + 8 + 1 + 2 + 1;
4092       stub_name = (char *) bfd_malloc (len);
4093       if (stub_name != NULL)
4094 	sprintf (stub_name, "%08x_%s+%x_%d",
4095 		 input_section->id & 0xffffffff,
4096 		 hash->root.root.root.string,
4097 		 (int) rel->r_addend & 0xffffffff,
4098 		 (int) stub_type);
4099     }
4100   else
4101     {
4102       len = 8 + 1 + 8 + 1 + 8 + 1 + 8 + 1 + 2 + 1;
4103       stub_name = (char *) bfd_malloc (len);
4104       if (stub_name != NULL)
4105 	sprintf (stub_name, "%08x_%x:%x+%x_%d",
4106 		 input_section->id & 0xffffffff,
4107 		 sym_sec->id & 0xffffffff,
4108 		 ELF32_R_TYPE (rel->r_info) == R_ARM_TLS_CALL
4109 		 || ELF32_R_TYPE (rel->r_info) == R_ARM_THM_TLS_CALL
4110 		 ? 0 : (int) ELF32_R_SYM (rel->r_info) & 0xffffffff,
4111 		 (int) rel->r_addend & 0xffffffff,
4112 		 (int) stub_type);
4113     }
4114 
4115   return stub_name;
4116 }
4117 
4118 /* Look up an entry in the stub hash.  Stub entries are cached because
4119    creating the stub name takes a bit of time.  */
4120 
4121 static struct elf32_arm_stub_hash_entry *
4122 elf32_arm_get_stub_entry (const asection *input_section,
4123 			  const asection *sym_sec,
4124 			  struct elf_link_hash_entry *hash,
4125 			  const Elf_Internal_Rela *rel,
4126 			  struct elf32_arm_link_hash_table *htab,
4127 			  enum elf32_arm_stub_type stub_type)
4128 {
4129   struct elf32_arm_stub_hash_entry *stub_entry;
4130   struct elf32_arm_link_hash_entry *h = (struct elf32_arm_link_hash_entry *) hash;
4131   const asection *id_sec;
4132 
4133   if ((input_section->flags & SEC_CODE) == 0)
4134     return NULL;
4135 
4136   /* If this input section is part of a group of sections sharing one
4137      stub section, then use the id of the first section in the group.
4138      Stub names need to include a section id, as there may well be
4139      more than one stub used to reach say, printf, and we need to
4140      distinguish between them.  */
4141   id_sec = htab->stub_group[input_section->id].link_sec;
4142 
4143   if (h != NULL && h->stub_cache != NULL
4144       && h->stub_cache->h == h
4145       && h->stub_cache->id_sec == id_sec
4146       && h->stub_cache->stub_type == stub_type)
4147     {
4148       stub_entry = h->stub_cache;
4149     }
4150   else
4151     {
4152       char *stub_name;
4153 
4154       stub_name = elf32_arm_stub_name (id_sec, sym_sec, h, rel, stub_type);
4155       if (stub_name == NULL)
4156 	return NULL;
4157 
4158       stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table,
4159 					stub_name, FALSE, FALSE);
4160       if (h != NULL)
4161 	h->stub_cache = stub_entry;
4162 
4163       free (stub_name);
4164     }
4165 
4166   return stub_entry;
4167 }
4168 
4169 /* Whether veneers of type STUB_TYPE require to be in a dedicated output
4170    section.  */
4171 
4172 static bfd_boolean
4173 arm_dedicated_stub_output_section_required (enum elf32_arm_stub_type stub_type)
4174 {
4175   if (stub_type >= max_stub_type)
4176     abort ();  /* Should be unreachable.  */
4177 
4178   return FALSE;
4179 }
4180 
4181 /* Required alignment (as a power of 2) for the dedicated section holding
4182    veneers of type STUB_TYPE, or 0 if veneers of this type are interspersed
4183    with input sections.  */
4184 
4185 static int
4186 arm_dedicated_stub_output_section_required_alignment
4187   (enum elf32_arm_stub_type stub_type)
4188 {
4189   if (stub_type >= max_stub_type)
4190     abort ();  /* Should be unreachable.  */
4191 
4192   BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4193   return 0;
4194 }
4195 
4196 /* Name of the dedicated output section to put veneers of type STUB_TYPE, or
4197    NULL if veneers of this type are interspersed with input sections.  */
4198 
4199 static const char *
4200 arm_dedicated_stub_output_section_name (enum elf32_arm_stub_type stub_type)
4201 {
4202   if (stub_type >= max_stub_type)
4203     abort ();  /* Should be unreachable.  */
4204 
4205   BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4206   return NULL;
4207 }
4208 
4209 /* If veneers of type STUB_TYPE should go in a dedicated output section,
4210    returns the address of the hash table field in HTAB holding a pointer to the
4211    corresponding input section.  Otherwise, returns NULL.  */
4212 
4213 static asection **
4214 arm_dedicated_stub_input_section_ptr
4215   (struct elf32_arm_link_hash_table *htab ATTRIBUTE_UNUSED,
4216    enum elf32_arm_stub_type stub_type)
4217 {
4218   if (stub_type >= max_stub_type)
4219     abort ();  /* Should be unreachable.  */
4220 
4221   BFD_ASSERT (!arm_dedicated_stub_output_section_required (stub_type));
4222   return NULL;
4223 }
4224 
4225 /* Find or create a stub section to contain a stub of type STUB_TYPE.  SECTION
4226    is the section that branch into veneer and can be NULL if stub should go in
4227    a dedicated output section.  Returns a pointer to the stub section, and the
4228    section to which the stub section will be attached (in *LINK_SEC_P).
4229    LINK_SEC_P may be NULL.  */
4230 
4231 static asection *
4232 elf32_arm_create_or_find_stub_sec (asection **link_sec_p, asection *section,
4233 				   struct elf32_arm_link_hash_table *htab,
4234 				   enum elf32_arm_stub_type stub_type)
4235 {
4236   asection *link_sec, *out_sec, **stub_sec_p;
4237   const char *stub_sec_prefix;
4238   bfd_boolean dedicated_output_section =
4239     arm_dedicated_stub_output_section_required (stub_type);
4240   int align;
4241 
4242   if (dedicated_output_section)
4243     {
4244       bfd *output_bfd = htab->obfd;
4245       const char *out_sec_name =
4246 	arm_dedicated_stub_output_section_name (stub_type);
4247       link_sec = NULL;
4248       stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
4249       stub_sec_prefix = out_sec_name;
4250       align = arm_dedicated_stub_output_section_required_alignment (stub_type);
4251       out_sec = bfd_get_section_by_name (output_bfd, out_sec_name);
4252       if (out_sec == NULL)
4253 	{
4254 	  (*_bfd_error_handler) (_("No address assigned to the veneers output "
4255 				   "section %s"), out_sec_name);
4256 	  return NULL;
4257 	}
4258     }
4259   else
4260     {
4261       link_sec = htab->stub_group[section->id].link_sec;
4262       BFD_ASSERT (link_sec != NULL);
4263       stub_sec_p = &htab->stub_group[section->id].stub_sec;
4264       if (*stub_sec_p == NULL)
4265 	stub_sec_p = &htab->stub_group[link_sec->id].stub_sec;
4266       stub_sec_prefix = link_sec->name;
4267       out_sec = link_sec->output_section;
4268       align = htab->nacl_p ? 4 : 3;
4269     }
4270 
4271   if (*stub_sec_p == NULL)
4272     {
4273       size_t namelen;
4274       bfd_size_type len;
4275       char *s_name;
4276 
4277       namelen = strlen (stub_sec_prefix);
4278       len = namelen + sizeof (STUB_SUFFIX);
4279       s_name = (char *) bfd_alloc (htab->stub_bfd, len);
4280       if (s_name == NULL)
4281 	return NULL;
4282 
4283       memcpy (s_name, stub_sec_prefix, namelen);
4284       memcpy (s_name + namelen, STUB_SUFFIX, sizeof (STUB_SUFFIX));
4285       *stub_sec_p = (*htab->add_stub_section) (s_name, out_sec, link_sec,
4286 					       align);
4287       if (*stub_sec_p == NULL)
4288 	return NULL;
4289 
4290       out_sec->flags |= SEC_ALLOC | SEC_LOAD | SEC_READONLY | SEC_CODE
4291 			| SEC_HAS_CONTENTS | SEC_RELOC | SEC_IN_MEMORY
4292 			| SEC_KEEP;
4293     }
4294 
4295   if (!dedicated_output_section)
4296     htab->stub_group[section->id].stub_sec = *stub_sec_p;
4297 
4298   if (link_sec_p)
4299     *link_sec_p = link_sec;
4300 
4301   return *stub_sec_p;
4302 }
4303 
4304 /* Add a new stub entry to the stub hash.  Not all fields of the new
4305    stub entry are initialised.  */
4306 
4307 static struct elf32_arm_stub_hash_entry *
4308 elf32_arm_add_stub (const char *stub_name, asection *section,
4309 		    struct elf32_arm_link_hash_table *htab,
4310 		    enum elf32_arm_stub_type stub_type)
4311 {
4312   asection *link_sec;
4313   asection *stub_sec;
4314   struct elf32_arm_stub_hash_entry *stub_entry;
4315 
4316   stub_sec = elf32_arm_create_or_find_stub_sec (&link_sec, section, htab,
4317 						stub_type);
4318   if (stub_sec == NULL)
4319     return NULL;
4320 
4321   /* Enter this entry into the linker stub hash table.  */
4322   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
4323 				     TRUE, FALSE);
4324   if (stub_entry == NULL)
4325     {
4326       if (section == NULL)
4327 	section = stub_sec;
4328       (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
4329 			     section->owner,
4330 			     stub_name);
4331       return NULL;
4332     }
4333 
4334   stub_entry->stub_sec = stub_sec;
4335   stub_entry->stub_offset = 0;
4336   stub_entry->id_sec = link_sec;
4337 
4338   return stub_entry;
4339 }
4340 
4341 /* Store an Arm insn into an output section not processed by
4342    elf32_arm_write_section.  */
4343 
4344 static void
4345 put_arm_insn (struct elf32_arm_link_hash_table * htab,
4346 	      bfd * output_bfd, bfd_vma val, void * ptr)
4347 {
4348   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4349     bfd_putl32 (val, ptr);
4350   else
4351     bfd_putb32 (val, ptr);
4352 }
4353 
4354 /* Store a 16-bit Thumb insn into an output section not processed by
4355    elf32_arm_write_section.  */
4356 
4357 static void
4358 put_thumb_insn (struct elf32_arm_link_hash_table * htab,
4359 		bfd * output_bfd, bfd_vma val, void * ptr)
4360 {
4361   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4362     bfd_putl16 (val, ptr);
4363   else
4364     bfd_putb16 (val, ptr);
4365 }
4366 
4367 /* Store a Thumb2 insn into an output section not processed by
4368    elf32_arm_write_section.  */
4369 
4370 static void
4371 put_thumb2_insn (struct elf32_arm_link_hash_table * htab,
4372 		 bfd * output_bfd, bfd_vma val, bfd_byte * ptr)
4373 {
4374   /* T2 instructions are 16-bit streamed.  */
4375   if (htab->byteswap_code != bfd_little_endian (output_bfd))
4376     {
4377       bfd_putl16 ((val >> 16) & 0xffff, ptr);
4378       bfd_putl16 ((val & 0xffff), ptr + 2);
4379     }
4380   else
4381     {
4382       bfd_putb16 ((val >> 16) & 0xffff, ptr);
4383       bfd_putb16 ((val & 0xffff), ptr + 2);
4384     }
4385 }
4386 
4387 /* If it's possible to change R_TYPE to a more efficient access
4388    model, return the new reloc type.  */
4389 
4390 static unsigned
4391 elf32_arm_tls_transition (struct bfd_link_info *info, int r_type,
4392 			  struct elf_link_hash_entry *h)
4393 {
4394   int is_local = (h == NULL);
4395 
4396   if (bfd_link_pic (info)
4397       || (h && h->root.type == bfd_link_hash_undefweak))
4398     return r_type;
4399 
4400   /* We do not support relaxations for Old TLS models.  */
4401   switch (r_type)
4402     {
4403     case R_ARM_TLS_GOTDESC:
4404     case R_ARM_TLS_CALL:
4405     case R_ARM_THM_TLS_CALL:
4406     case R_ARM_TLS_DESCSEQ:
4407     case R_ARM_THM_TLS_DESCSEQ:
4408       return is_local ? R_ARM_TLS_LE32 : R_ARM_TLS_IE32;
4409     }
4410 
4411   return r_type;
4412 }
4413 
4414 static bfd_reloc_status_type elf32_arm_final_link_relocate
4415   (reloc_howto_type *, bfd *, bfd *, asection *, bfd_byte *,
4416    Elf_Internal_Rela *, bfd_vma, struct bfd_link_info *, asection *,
4417    const char *, unsigned char, enum arm_st_branch_type,
4418    struct elf_link_hash_entry *, bfd_boolean *, char **);
4419 
4420 static unsigned int
4421 arm_stub_required_alignment (enum elf32_arm_stub_type stub_type)
4422 {
4423   switch (stub_type)
4424     {
4425     case arm_stub_a8_veneer_b_cond:
4426     case arm_stub_a8_veneer_b:
4427     case arm_stub_a8_veneer_bl:
4428       return 2;
4429 
4430     case arm_stub_long_branch_any_any:
4431     case arm_stub_long_branch_v4t_arm_thumb:
4432     case arm_stub_long_branch_thumb_only:
4433     case arm_stub_long_branch_thumb2_only:
4434     case arm_stub_long_branch_v4t_thumb_thumb:
4435     case arm_stub_long_branch_v4t_thumb_arm:
4436     case arm_stub_short_branch_v4t_thumb_arm:
4437     case arm_stub_long_branch_any_arm_pic:
4438     case arm_stub_long_branch_any_thumb_pic:
4439     case arm_stub_long_branch_v4t_thumb_thumb_pic:
4440     case arm_stub_long_branch_v4t_arm_thumb_pic:
4441     case arm_stub_long_branch_v4t_thumb_arm_pic:
4442     case arm_stub_long_branch_thumb_only_pic:
4443     case arm_stub_long_branch_any_tls_pic:
4444     case arm_stub_long_branch_v4t_thumb_tls_pic:
4445     case arm_stub_a8_veneer_blx:
4446       return 4;
4447 
4448     case arm_stub_long_branch_arm_nacl:
4449     case arm_stub_long_branch_arm_nacl_pic:
4450       return 16;
4451 
4452     default:
4453       abort ();  /* Should be unreachable.  */
4454     }
4455 }
4456 
4457 /* Returns whether stubs of type STUB_TYPE take over the symbol they are
4458    veneering (TRUE) or have their own symbol (FALSE).  */
4459 
4460 static bfd_boolean
4461 arm_stub_sym_claimed (enum elf32_arm_stub_type stub_type)
4462 {
4463   if (stub_type >= max_stub_type)
4464     abort ();  /* Should be unreachable.  */
4465 
4466   return FALSE;
4467 }
4468 
4469 /* Returns the padding needed for the dedicated section used stubs of type
4470    STUB_TYPE.  */
4471 
4472 static int
4473 arm_dedicated_stub_section_padding (enum elf32_arm_stub_type stub_type)
4474 {
4475   if (stub_type >= max_stub_type)
4476     abort ();  /* Should be unreachable.  */
4477 
4478   return 0;
4479 }
4480 
4481 static bfd_boolean
4482 arm_build_one_stub (struct bfd_hash_entry *gen_entry,
4483 		    void * in_arg)
4484 {
4485 #define MAXRELOCS 3
4486   struct elf32_arm_stub_hash_entry *stub_entry;
4487   struct elf32_arm_link_hash_table *globals;
4488   struct bfd_link_info *info;
4489   asection *stub_sec;
4490   bfd *stub_bfd;
4491   bfd_byte *loc;
4492   bfd_vma sym_value;
4493   int template_size;
4494   int size;
4495   const insn_sequence *template_sequence;
4496   int i;
4497   int stub_reloc_idx[MAXRELOCS] = {-1, -1};
4498   int stub_reloc_offset[MAXRELOCS] = {0, 0};
4499   int nrelocs = 0;
4500 
4501   /* Massage our args to the form they really have.  */
4502   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4503   info = (struct bfd_link_info *) in_arg;
4504 
4505   globals = elf32_arm_hash_table (info);
4506   if (globals == NULL)
4507     return FALSE;
4508 
4509   stub_sec = stub_entry->stub_sec;
4510 
4511   if ((globals->fix_cortex_a8 < 0)
4512       != (arm_stub_required_alignment (stub_entry->stub_type) == 2))
4513     /* We have to do less-strictly-aligned fixes last.  */
4514     return TRUE;
4515 
4516   /* Make a note of the offset within the stubs for this entry.  */
4517   stub_entry->stub_offset = stub_sec->size;
4518   loc = stub_sec->contents + stub_entry->stub_offset;
4519 
4520   stub_bfd = stub_sec->owner;
4521 
4522   /* This is the address of the stub destination.  */
4523   sym_value = (stub_entry->target_value
4524 	       + stub_entry->target_section->output_offset
4525 	       + stub_entry->target_section->output_section->vma);
4526 
4527   template_sequence = stub_entry->stub_template;
4528   template_size = stub_entry->stub_template_size;
4529 
4530   size = 0;
4531   for (i = 0; i < template_size; i++)
4532     {
4533       switch (template_sequence[i].type)
4534 	{
4535 	case THUMB16_TYPE:
4536 	  {
4537 	    bfd_vma data = (bfd_vma) template_sequence[i].data;
4538 	    if (template_sequence[i].reloc_addend != 0)
4539 	      {
4540 		/* We've borrowed the reloc_addend field to mean we should
4541 		   insert a condition code into this (Thumb-1 branch)
4542 		   instruction.  See THUMB16_BCOND_INSN.  */
4543 		BFD_ASSERT ((data & 0xff00) == 0xd000);
4544 		data |= ((stub_entry->orig_insn >> 22) & 0xf) << 8;
4545 	      }
4546 	    bfd_put_16 (stub_bfd, data, loc + size);
4547 	    size += 2;
4548 	  }
4549 	  break;
4550 
4551 	case THUMB32_TYPE:
4552 	  bfd_put_16 (stub_bfd,
4553 		      (template_sequence[i].data >> 16) & 0xffff,
4554 		      loc + size);
4555 	  bfd_put_16 (stub_bfd, template_sequence[i].data & 0xffff,
4556 		      loc + size + 2);
4557 	  if (template_sequence[i].r_type != R_ARM_NONE)
4558 	    {
4559 	      stub_reloc_idx[nrelocs] = i;
4560 	      stub_reloc_offset[nrelocs++] = size;
4561 	    }
4562 	  size += 4;
4563 	  break;
4564 
4565 	case ARM_TYPE:
4566 	  bfd_put_32 (stub_bfd, template_sequence[i].data,
4567 		      loc + size);
4568 	  /* Handle cases where the target is encoded within the
4569 	     instruction.  */
4570 	  if (template_sequence[i].r_type == R_ARM_JUMP24)
4571 	    {
4572 	      stub_reloc_idx[nrelocs] = i;
4573 	      stub_reloc_offset[nrelocs++] = size;
4574 	    }
4575 	  size += 4;
4576 	  break;
4577 
4578 	case DATA_TYPE:
4579 	  bfd_put_32 (stub_bfd, template_sequence[i].data, loc + size);
4580 	  stub_reloc_idx[nrelocs] = i;
4581 	  stub_reloc_offset[nrelocs++] = size;
4582 	  size += 4;
4583 	  break;
4584 
4585 	default:
4586 	  BFD_FAIL ();
4587 	  return FALSE;
4588 	}
4589     }
4590 
4591   stub_sec->size += size;
4592 
4593   /* Stub size has already been computed in arm_size_one_stub. Check
4594      consistency.  */
4595   BFD_ASSERT (size == stub_entry->stub_size);
4596 
4597   /* Destination is Thumb. Force bit 0 to 1 to reflect this.  */
4598   if (stub_entry->branch_type == ST_BRANCH_TO_THUMB)
4599     sym_value |= 1;
4600 
4601   /* Assume there is at least one and at most MAXRELOCS entries to relocate
4602      in each stub.  */
4603   BFD_ASSERT (nrelocs != 0 && nrelocs <= MAXRELOCS);
4604 
4605   for (i = 0; i < nrelocs; i++)
4606     {
4607       Elf_Internal_Rela rel;
4608       bfd_boolean unresolved_reloc;
4609       char *error_message;
4610       bfd_vma points_to =
4611 	sym_value + template_sequence[stub_reloc_idx[i]].reloc_addend;
4612 
4613       rel.r_offset = stub_entry->stub_offset + stub_reloc_offset[i];
4614       rel.r_info = ELF32_R_INFO (0,
4615 				 template_sequence[stub_reloc_idx[i]].r_type);
4616       rel.r_addend = 0;
4617 
4618       if (stub_entry->stub_type == arm_stub_a8_veneer_b_cond && i == 0)
4619 	/* The first relocation in the elf32_arm_stub_a8_veneer_b_cond[]
4620 	   template should refer back to the instruction after the original
4621 	   branch.  We use target_section as Cortex-A8 erratum workaround stubs
4622 	   are only generated when both source and target are in the same
4623 	   section.  */
4624 	points_to = stub_entry->target_section->output_section->vma
4625 		    + stub_entry->target_section->output_offset
4626 		    + stub_entry->source_value;
4627 
4628       elf32_arm_final_link_relocate (elf32_arm_howto_from_type
4629 	  (template_sequence[stub_reloc_idx[i]].r_type),
4630 	   stub_bfd, info->output_bfd, stub_sec, stub_sec->contents, &rel,
4631 	   points_to, info, stub_entry->target_section, "", STT_FUNC,
4632 	   stub_entry->branch_type,
4633 	   (struct elf_link_hash_entry *) stub_entry->h, &unresolved_reloc,
4634 	   &error_message);
4635     }
4636 
4637   return TRUE;
4638 #undef MAXRELOCS
4639 }
4640 
4641 /* Calculate the template, template size and instruction size for a stub.
4642    Return value is the instruction size.  */
4643 
4644 static unsigned int
4645 find_stub_size_and_template (enum elf32_arm_stub_type stub_type,
4646 			     const insn_sequence **stub_template,
4647 			     int *stub_template_size)
4648 {
4649   const insn_sequence *template_sequence = NULL;
4650   int template_size = 0, i;
4651   unsigned int size;
4652 
4653   template_sequence = stub_definitions[stub_type].template_sequence;
4654   if (stub_template)
4655     *stub_template = template_sequence;
4656 
4657   template_size = stub_definitions[stub_type].template_size;
4658   if (stub_template_size)
4659     *stub_template_size = template_size;
4660 
4661   size = 0;
4662   for (i = 0; i < template_size; i++)
4663     {
4664       switch (template_sequence[i].type)
4665 	{
4666 	case THUMB16_TYPE:
4667 	  size += 2;
4668 	  break;
4669 
4670 	case ARM_TYPE:
4671 	case THUMB32_TYPE:
4672 	case DATA_TYPE:
4673 	  size += 4;
4674 	  break;
4675 
4676 	default:
4677 	  BFD_FAIL ();
4678 	  return 0;
4679 	}
4680     }
4681 
4682   return size;
4683 }
4684 
4685 /* As above, but don't actually build the stub.  Just bump offset so
4686    we know stub section sizes.  */
4687 
4688 static bfd_boolean
4689 arm_size_one_stub (struct bfd_hash_entry *gen_entry,
4690 		   void *in_arg ATTRIBUTE_UNUSED)
4691 {
4692   struct elf32_arm_stub_hash_entry *stub_entry;
4693   const insn_sequence *template_sequence;
4694   int template_size, size;
4695 
4696   /* Massage our args to the form they really have.  */
4697   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
4698 
4699   BFD_ASSERT((stub_entry->stub_type > arm_stub_none)
4700 	     && stub_entry->stub_type < ARRAY_SIZE(stub_definitions));
4701 
4702   size = find_stub_size_and_template (stub_entry->stub_type, &template_sequence,
4703 				      &template_size);
4704 
4705   stub_entry->stub_size = size;
4706   stub_entry->stub_template = template_sequence;
4707   stub_entry->stub_template_size = template_size;
4708 
4709   size = (size + 7) & ~7;
4710   stub_entry->stub_sec->size += size;
4711 
4712   return TRUE;
4713 }
4714 
4715 /* External entry points for sizing and building linker stubs.  */
4716 
4717 /* Set up various things so that we can make a list of input sections
4718    for each output section included in the link.  Returns -1 on error,
4719    0 when no stubs will be needed, and 1 on success.  */
4720 
4721 int
4722 elf32_arm_setup_section_lists (bfd *output_bfd,
4723 			       struct bfd_link_info *info)
4724 {
4725   bfd *input_bfd;
4726   unsigned int bfd_count;
4727   unsigned int top_id, top_index;
4728   asection *section;
4729   asection **input_list, **list;
4730   bfd_size_type amt;
4731   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4732 
4733   if (htab == NULL)
4734     return 0;
4735   if (! is_elf_hash_table (htab))
4736     return 0;
4737 
4738   /* Count the number of input BFDs and find the top input section id.  */
4739   for (input_bfd = info->input_bfds, bfd_count = 0, top_id = 0;
4740        input_bfd != NULL;
4741        input_bfd = input_bfd->link.next)
4742     {
4743       bfd_count += 1;
4744       for (section = input_bfd->sections;
4745 	   section != NULL;
4746 	   section = section->next)
4747 	{
4748 	  if (top_id < section->id)
4749 	    top_id = section->id;
4750 	}
4751     }
4752   htab->bfd_count = bfd_count;
4753 
4754   amt = sizeof (struct map_stub) * (top_id + 1);
4755   htab->stub_group = (struct map_stub *) bfd_zmalloc (amt);
4756   if (htab->stub_group == NULL)
4757     return -1;
4758   htab->top_id = top_id;
4759 
4760   /* We can't use output_bfd->section_count here to find the top output
4761      section index as some sections may have been removed, and
4762      _bfd_strip_section_from_output doesn't renumber the indices.  */
4763   for (section = output_bfd->sections, top_index = 0;
4764        section != NULL;
4765        section = section->next)
4766     {
4767       if (top_index < section->index)
4768 	top_index = section->index;
4769     }
4770 
4771   htab->top_index = top_index;
4772   amt = sizeof (asection *) * (top_index + 1);
4773   input_list = (asection **) bfd_malloc (amt);
4774   htab->input_list = input_list;
4775   if (input_list == NULL)
4776     return -1;
4777 
4778   /* For sections we aren't interested in, mark their entries with a
4779      value we can check later.  */
4780   list = input_list + top_index;
4781   do
4782     *list = bfd_abs_section_ptr;
4783   while (list-- != input_list);
4784 
4785   for (section = output_bfd->sections;
4786        section != NULL;
4787        section = section->next)
4788     {
4789       if ((section->flags & SEC_CODE) != 0)
4790 	input_list[section->index] = NULL;
4791     }
4792 
4793   return 1;
4794 }
4795 
4796 /* The linker repeatedly calls this function for each input section,
4797    in the order that input sections are linked into output sections.
4798    Build lists of input sections to determine groupings between which
4799    we may insert linker stubs.  */
4800 
4801 void
4802 elf32_arm_next_input_section (struct bfd_link_info *info,
4803 			      asection *isec)
4804 {
4805   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4806 
4807   if (htab == NULL)
4808     return;
4809 
4810   if (isec->output_section->index <= htab->top_index)
4811     {
4812       asection **list = htab->input_list + isec->output_section->index;
4813 
4814       if (*list != bfd_abs_section_ptr && (isec->flags & SEC_CODE) != 0)
4815 	{
4816 	  /* Steal the link_sec pointer for our list.  */
4817 #define PREV_SEC(sec) (htab->stub_group[(sec)->id].link_sec)
4818 	  /* This happens to make the list in reverse order,
4819 	     which we reverse later.  */
4820 	  PREV_SEC (isec) = *list;
4821 	  *list = isec;
4822 	}
4823     }
4824 }
4825 
4826 /* See whether we can group stub sections together.  Grouping stub
4827    sections may result in fewer stubs.  More importantly, we need to
4828    put all .init* and .fini* stubs at the end of the .init or
4829    .fini output sections respectively, because glibc splits the
4830    _init and _fini functions into multiple parts.  Putting a stub in
4831    the middle of a function is not a good idea.  */
4832 
4833 static void
4834 group_sections (struct elf32_arm_link_hash_table *htab,
4835 		bfd_size_type stub_group_size,
4836 		bfd_boolean stubs_always_after_branch)
4837 {
4838   asection **list = htab->input_list;
4839 
4840   do
4841     {
4842       asection *tail = *list;
4843       asection *head;
4844 
4845       if (tail == bfd_abs_section_ptr)
4846 	continue;
4847 
4848       /* Reverse the list: we must avoid placing stubs at the
4849 	 beginning of the section because the beginning of the text
4850 	 section may be required for an interrupt vector in bare metal
4851 	 code.  */
4852 #define NEXT_SEC PREV_SEC
4853       head = NULL;
4854       while (tail != NULL)
4855 	{
4856 	  /* Pop from tail.  */
4857 	  asection *item = tail;
4858 	  tail = PREV_SEC (item);
4859 
4860 	  /* Push on head.  */
4861 	  NEXT_SEC (item) = head;
4862 	  head = item;
4863 	}
4864 
4865       while (head != NULL)
4866 	{
4867 	  asection *curr;
4868 	  asection *next;
4869 	  bfd_vma stub_group_start = head->output_offset;
4870 	  bfd_vma end_of_next;
4871 
4872 	  curr = head;
4873 	  while (NEXT_SEC (curr) != NULL)
4874 	    {
4875 	      next = NEXT_SEC (curr);
4876 	      end_of_next = next->output_offset + next->size;
4877 	      if (end_of_next - stub_group_start >= stub_group_size)
4878 		/* End of NEXT is too far from start, so stop.  */
4879 		break;
4880 	      /* Add NEXT to the group.  */
4881 	      curr = next;
4882 	    }
4883 
4884 	  /* OK, the size from the start to the start of CURR is less
4885 	     than stub_group_size and thus can be handled by one stub
4886 	     section.  (Or the head section is itself larger than
4887 	     stub_group_size, in which case we may be toast.)
4888 	     We should really be keeping track of the total size of
4889 	     stubs added here, as stubs contribute to the final output
4890 	     section size.  */
4891 	  do
4892 	    {
4893 	      next = NEXT_SEC (head);
4894 	      /* Set up this stub group.  */
4895 	      htab->stub_group[head->id].link_sec = curr;
4896 	    }
4897 	  while (head != curr && (head = next) != NULL);
4898 
4899 	  /* But wait, there's more!  Input sections up to stub_group_size
4900 	     bytes after the stub section can be handled by it too.  */
4901 	  if (!stubs_always_after_branch)
4902 	    {
4903 	      stub_group_start = curr->output_offset + curr->size;
4904 
4905 	      while (next != NULL)
4906 		{
4907 		  end_of_next = next->output_offset + next->size;
4908 		  if (end_of_next - stub_group_start >= stub_group_size)
4909 		    /* End of NEXT is too far from stubs, so stop.  */
4910 		    break;
4911 		  /* Add NEXT to the stub group.  */
4912 		  head = next;
4913 		  next = NEXT_SEC (head);
4914 		  htab->stub_group[head->id].link_sec = curr;
4915 		}
4916 	    }
4917 	  head = next;
4918 	}
4919     }
4920   while (list++ != htab->input_list + htab->top_index);
4921 
4922   free (htab->input_list);
4923 #undef PREV_SEC
4924 #undef NEXT_SEC
4925 }
4926 
4927 /* Comparison function for sorting/searching relocations relating to Cortex-A8
4928    erratum fix.  */
4929 
4930 static int
4931 a8_reloc_compare (const void *a, const void *b)
4932 {
4933   const struct a8_erratum_reloc *ra = (const struct a8_erratum_reloc *) a;
4934   const struct a8_erratum_reloc *rb = (const struct a8_erratum_reloc *) b;
4935 
4936   if (ra->from < rb->from)
4937     return -1;
4938   else if (ra->from > rb->from)
4939     return 1;
4940   else
4941     return 0;
4942 }
4943 
4944 static struct elf_link_hash_entry *find_thumb_glue (struct bfd_link_info *,
4945 						    const char *, char **);
4946 
4947 /* Helper function to scan code for sequences which might trigger the Cortex-A8
4948    branch/TLB erratum.  Fill in the table described by A8_FIXES_P,
4949    NUM_A8_FIXES_P, A8_FIX_TABLE_SIZE_P.  Returns true if an error occurs, false
4950    otherwise.  */
4951 
4952 static bfd_boolean
4953 cortex_a8_erratum_scan (bfd *input_bfd,
4954 			struct bfd_link_info *info,
4955 			struct a8_erratum_fix **a8_fixes_p,
4956 			unsigned int *num_a8_fixes_p,
4957 			unsigned int *a8_fix_table_size_p,
4958 			struct a8_erratum_reloc *a8_relocs,
4959 			unsigned int num_a8_relocs,
4960 			unsigned prev_num_a8_fixes,
4961 			bfd_boolean *stub_changed_p)
4962 {
4963   asection *section;
4964   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
4965   struct a8_erratum_fix *a8_fixes = *a8_fixes_p;
4966   unsigned int num_a8_fixes = *num_a8_fixes_p;
4967   unsigned int a8_fix_table_size = *a8_fix_table_size_p;
4968 
4969   if (htab == NULL)
4970     return FALSE;
4971 
4972   for (section = input_bfd->sections;
4973        section != NULL;
4974        section = section->next)
4975     {
4976       bfd_byte *contents = NULL;
4977       struct _arm_elf_section_data *sec_data;
4978       unsigned int span;
4979       bfd_vma base_vma;
4980 
4981       if (elf_section_type (section) != SHT_PROGBITS
4982 	  || (elf_section_flags (section) & SHF_EXECINSTR) == 0
4983 	  || (section->flags & SEC_EXCLUDE) != 0
4984 	  || (section->sec_info_type == SEC_INFO_TYPE_JUST_SYMS)
4985 	  || (section->output_section == bfd_abs_section_ptr))
4986 	continue;
4987 
4988       base_vma = section->output_section->vma + section->output_offset;
4989 
4990       if (elf_section_data (section)->this_hdr.contents != NULL)
4991 	contents = elf_section_data (section)->this_hdr.contents;
4992       else if (! bfd_malloc_and_get_section (input_bfd, section, &contents))
4993 	return TRUE;
4994 
4995       sec_data = elf32_arm_section_data (section);
4996 
4997       for (span = 0; span < sec_data->mapcount; span++)
4998 	{
4999 	  unsigned int span_start = sec_data->map[span].vma;
5000 	  unsigned int span_end = (span == sec_data->mapcount - 1)
5001 	    ? section->size : sec_data->map[span + 1].vma;
5002 	  unsigned int i;
5003 	  char span_type = sec_data->map[span].type;
5004 	  bfd_boolean last_was_32bit = FALSE, last_was_branch = FALSE;
5005 
5006 	  if (span_type != 't')
5007 	    continue;
5008 
5009 	  /* Span is entirely within a single 4KB region: skip scanning.  */
5010 	  if (((base_vma + span_start) & ~0xfff)
5011 	      == ((base_vma + span_end) & ~0xfff))
5012 	    continue;
5013 
5014 	  /* Scan for 32-bit Thumb-2 branches which span two 4K regions, where:
5015 
5016 	       * The opcode is BLX.W, BL.W, B.W, Bcc.W
5017 	       * The branch target is in the same 4KB region as the
5018 		 first half of the branch.
5019 	       * The instruction before the branch is a 32-bit
5020 		 length non-branch instruction.  */
5021 	  for (i = span_start; i < span_end;)
5022 	    {
5023 	      unsigned int insn = bfd_getl16 (&contents[i]);
5024 	      bfd_boolean insn_32bit = FALSE, is_blx = FALSE, is_b = FALSE;
5025 	      bfd_boolean is_bl = FALSE, is_bcc = FALSE, is_32bit_branch;
5026 
5027 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
5028 		insn_32bit = TRUE;
5029 
5030 	      if (insn_32bit)
5031 		{
5032 		  /* Load the rest of the insn (in manual-friendly order).  */
5033 		  insn = (insn << 16) | bfd_getl16 (&contents[i + 2]);
5034 
5035 		  /* Encoding T4: B<c>.W.  */
5036 		  is_b = (insn & 0xf800d000) == 0xf0009000;
5037 		  /* Encoding T1: BL<c>.W.  */
5038 		  is_bl = (insn & 0xf800d000) == 0xf000d000;
5039 		  /* Encoding T2: BLX<c>.W.  */
5040 		  is_blx = (insn & 0xf800d000) == 0xf000c000;
5041 		  /* Encoding T3: B<c>.W (not permitted in IT block).  */
5042 		  is_bcc = (insn & 0xf800d000) == 0xf0008000
5043 			   && (insn & 0x07f00000) != 0x03800000;
5044 		}
5045 
5046 	      is_32bit_branch = is_b || is_bl || is_blx || is_bcc;
5047 
5048 	      if (((base_vma + i) & 0xfff) == 0xffe
5049 		  && insn_32bit
5050 		  && is_32bit_branch
5051 		  && last_was_32bit
5052 		  && ! last_was_branch)
5053 		{
5054 		  bfd_signed_vma offset = 0;
5055 		  bfd_boolean force_target_arm = FALSE;
5056 		  bfd_boolean force_target_thumb = FALSE;
5057 		  bfd_vma target;
5058 		  enum elf32_arm_stub_type stub_type = arm_stub_none;
5059 		  struct a8_erratum_reloc key, *found;
5060 		  bfd_boolean use_plt = FALSE;
5061 
5062 		  key.from = base_vma + i;
5063 		  found = (struct a8_erratum_reloc *)
5064 		      bsearch (&key, a8_relocs, num_a8_relocs,
5065 			       sizeof (struct a8_erratum_reloc),
5066 			       &a8_reloc_compare);
5067 
5068 		  if (found)
5069 		    {
5070 		      char *error_message = NULL;
5071 		      struct elf_link_hash_entry *entry;
5072 
5073 		      /* We don't care about the error returned from this
5074 			 function, only if there is glue or not.  */
5075 		      entry = find_thumb_glue (info, found->sym_name,
5076 					       &error_message);
5077 
5078 		      if (entry)
5079 			found->non_a8_stub = TRUE;
5080 
5081 		      /* Keep a simpler condition, for the sake of clarity.  */
5082 		      if (htab->root.splt != NULL && found->hash != NULL
5083 			  && found->hash->root.plt.offset != (bfd_vma) -1)
5084 			use_plt = TRUE;
5085 
5086 		      if (found->r_type == R_ARM_THM_CALL)
5087 			{
5088 			  if (found->branch_type == ST_BRANCH_TO_ARM
5089 			      || use_plt)
5090 			    force_target_arm = TRUE;
5091 			  else
5092 			    force_target_thumb = TRUE;
5093 			}
5094 		    }
5095 
5096 		  /* Check if we have an offending branch instruction.  */
5097 
5098 		  if (found && found->non_a8_stub)
5099 		    /* We've already made a stub for this instruction, e.g.
5100 		       it's a long branch or a Thumb->ARM stub.  Assume that
5101 		       stub will suffice to work around the A8 erratum (see
5102 		       setting of always_after_branch above).  */
5103 		    ;
5104 		  else if (is_bcc)
5105 		    {
5106 		      offset = (insn & 0x7ff) << 1;
5107 		      offset |= (insn & 0x3f0000) >> 4;
5108 		      offset |= (insn & 0x2000) ? 0x40000 : 0;
5109 		      offset |= (insn & 0x800) ? 0x80000 : 0;
5110 		      offset |= (insn & 0x4000000) ? 0x100000 : 0;
5111 		      if (offset & 0x100000)
5112 			offset |= ~ ((bfd_signed_vma) 0xfffff);
5113 		      stub_type = arm_stub_a8_veneer_b_cond;
5114 		    }
5115 		  else if (is_b || is_bl || is_blx)
5116 		    {
5117 		      int s = (insn & 0x4000000) != 0;
5118 		      int j1 = (insn & 0x2000) != 0;
5119 		      int j2 = (insn & 0x800) != 0;
5120 		      int i1 = !(j1 ^ s);
5121 		      int i2 = !(j2 ^ s);
5122 
5123 		      offset = (insn & 0x7ff) << 1;
5124 		      offset |= (insn & 0x3ff0000) >> 4;
5125 		      offset |= i2 << 22;
5126 		      offset |= i1 << 23;
5127 		      offset |= s << 24;
5128 		      if (offset & 0x1000000)
5129 			offset |= ~ ((bfd_signed_vma) 0xffffff);
5130 
5131 		      if (is_blx)
5132 			offset &= ~ ((bfd_signed_vma) 3);
5133 
5134 		      stub_type = is_blx ? arm_stub_a8_veneer_blx :
5135 			is_bl ? arm_stub_a8_veneer_bl : arm_stub_a8_veneer_b;
5136 		    }
5137 
5138 		  if (stub_type != arm_stub_none)
5139 		    {
5140 		      bfd_vma pc_for_insn = base_vma + i + 4;
5141 
5142 		      /* The original instruction is a BL, but the target is
5143 			 an ARM instruction.  If we were not making a stub,
5144 			 the BL would have been converted to a BLX.  Use the
5145 			 BLX stub instead in that case.  */
5146 		      if (htab->use_blx && force_target_arm
5147 			  && stub_type == arm_stub_a8_veneer_bl)
5148 			{
5149 			  stub_type = arm_stub_a8_veneer_blx;
5150 			  is_blx = TRUE;
5151 			  is_bl = FALSE;
5152 			}
5153 		      /* Conversely, if the original instruction was
5154 			 BLX but the target is Thumb mode, use the BL
5155 			 stub.  */
5156 		      else if (force_target_thumb
5157 			       && stub_type == arm_stub_a8_veneer_blx)
5158 			{
5159 			  stub_type = arm_stub_a8_veneer_bl;
5160 			  is_blx = FALSE;
5161 			  is_bl = TRUE;
5162 			}
5163 
5164 		      if (is_blx)
5165 			pc_for_insn &= ~ ((bfd_vma) 3);
5166 
5167 		      /* If we found a relocation, use the proper destination,
5168 			 not the offset in the (unrelocated) instruction.
5169 			 Note this is always done if we switched the stub type
5170 			 above.  */
5171 		      if (found)
5172 			offset =
5173 			  (bfd_signed_vma) (found->destination - pc_for_insn);
5174 
5175 		      /* If the stub will use a Thumb-mode branch to a
5176 			 PLT target, redirect it to the preceding Thumb
5177 			 entry point.  */
5178 		      if (stub_type != arm_stub_a8_veneer_blx && use_plt)
5179 			offset -= PLT_THUMB_STUB_SIZE;
5180 
5181 		      target = pc_for_insn + offset;
5182 
5183 		      /* The BLX stub is ARM-mode code.  Adjust the offset to
5184 			 take the different PC value (+8 instead of +4) into
5185 			 account.  */
5186 		      if (stub_type == arm_stub_a8_veneer_blx)
5187 			offset += 4;
5188 
5189 		      if (((base_vma + i) & ~0xfff) == (target & ~0xfff))
5190 			{
5191 			  char *stub_name = NULL;
5192 
5193 			  if (num_a8_fixes == a8_fix_table_size)
5194 			    {
5195 			      a8_fix_table_size *= 2;
5196 			      a8_fixes = (struct a8_erratum_fix *)
5197 				  bfd_realloc (a8_fixes,
5198 					       sizeof (struct a8_erratum_fix)
5199 					       * a8_fix_table_size);
5200 			    }
5201 
5202 			  if (num_a8_fixes < prev_num_a8_fixes)
5203 			    {
5204 			      /* If we're doing a subsequent scan,
5205 				 check if we've found the same fix as
5206 				 before, and try and reuse the stub
5207 				 name.  */
5208 			      stub_name = a8_fixes[num_a8_fixes].stub_name;
5209 			      if ((a8_fixes[num_a8_fixes].section != section)
5210 				  || (a8_fixes[num_a8_fixes].offset != i))
5211 				{
5212 				  free (stub_name);
5213 				  stub_name = NULL;
5214 				  *stub_changed_p = TRUE;
5215 				}
5216 			    }
5217 
5218 			  if (!stub_name)
5219 			    {
5220 			      stub_name = (char *) bfd_malloc (8 + 1 + 8 + 1);
5221 			      if (stub_name != NULL)
5222 				sprintf (stub_name, "%x:%x", section->id, i);
5223 			    }
5224 
5225 			  a8_fixes[num_a8_fixes].input_bfd = input_bfd;
5226 			  a8_fixes[num_a8_fixes].section = section;
5227 			  a8_fixes[num_a8_fixes].offset = i;
5228 			  a8_fixes[num_a8_fixes].target_offset =
5229 			    target - base_vma;
5230 			  a8_fixes[num_a8_fixes].orig_insn = insn;
5231 			  a8_fixes[num_a8_fixes].stub_name = stub_name;
5232 			  a8_fixes[num_a8_fixes].stub_type = stub_type;
5233 			  a8_fixes[num_a8_fixes].branch_type =
5234 			    is_blx ? ST_BRANCH_TO_ARM : ST_BRANCH_TO_THUMB;
5235 
5236 			  num_a8_fixes++;
5237 			}
5238 		    }
5239 		}
5240 
5241 	      i += insn_32bit ? 4 : 2;
5242 	      last_was_32bit = insn_32bit;
5243 	      last_was_branch = is_32bit_branch;
5244 	    }
5245 	}
5246 
5247       if (elf_section_data (section)->this_hdr.contents == NULL)
5248 	free (contents);
5249     }
5250 
5251   *a8_fixes_p = a8_fixes;
5252   *num_a8_fixes_p = num_a8_fixes;
5253   *a8_fix_table_size_p = a8_fix_table_size;
5254 
5255   return FALSE;
5256 }
5257 
5258 /* Create or update a stub entry depending on whether the stub can already be
5259    found in HTAB.  The stub is identified by:
5260    - its type STUB_TYPE
5261    - its source branch (note that several can share the same stub) whose
5262      section and relocation (if any) are given by SECTION and IRELA
5263      respectively
5264    - its target symbol whose input section, hash, name, value and branch type
5265      are given in SYM_SEC, HASH, SYM_NAME, SYM_VALUE and BRANCH_TYPE
5266      respectively
5267 
5268    If found, the value of the stub's target symbol is updated from SYM_VALUE
5269    and *NEW_STUB is set to FALSE.  Otherwise, *NEW_STUB is set to
5270    TRUE and the stub entry is initialized.
5271 
5272    Returns whether the stub could be successfully created or updated, or FALSE
5273    if an error occured.  */
5274 
5275 static bfd_boolean
5276 elf32_arm_create_stub (struct elf32_arm_link_hash_table *htab,
5277 		       enum elf32_arm_stub_type stub_type, asection *section,
5278 		       Elf_Internal_Rela *irela, asection *sym_sec,
5279 		       struct elf32_arm_link_hash_entry *hash, char *sym_name,
5280 		       bfd_vma sym_value, enum arm_st_branch_type branch_type,
5281 		       bfd_boolean *new_stub)
5282 {
5283   const asection *id_sec;
5284   char *stub_name;
5285   struct elf32_arm_stub_hash_entry *stub_entry;
5286   unsigned int r_type;
5287   bfd_boolean sym_claimed = arm_stub_sym_claimed (stub_type);
5288 
5289   BFD_ASSERT (stub_type != arm_stub_none);
5290   *new_stub = FALSE;
5291 
5292   if (sym_claimed)
5293     stub_name = sym_name;
5294   else
5295     {
5296       BFD_ASSERT (irela);
5297       BFD_ASSERT (section);
5298 
5299       /* Support for grouping stub sections.  */
5300       id_sec = htab->stub_group[section->id].link_sec;
5301 
5302       /* Get the name of this stub.  */
5303       stub_name = elf32_arm_stub_name (id_sec, sym_sec, hash, irela,
5304 				       stub_type);
5305       if (!stub_name)
5306 	return FALSE;
5307     }
5308 
5309   stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name, FALSE,
5310 				     FALSE);
5311   /* The proper stub has already been created, just update its value.  */
5312   if (stub_entry != NULL)
5313     {
5314       if (!sym_claimed)
5315 	free (stub_name);
5316       stub_entry->target_value = sym_value;
5317       return TRUE;
5318     }
5319 
5320   stub_entry = elf32_arm_add_stub (stub_name, section, htab, stub_type);
5321   if (stub_entry == NULL)
5322     {
5323       if (!sym_claimed)
5324 	free (stub_name);
5325       return FALSE;
5326     }
5327 
5328   stub_entry->target_value = sym_value;
5329   stub_entry->target_section = sym_sec;
5330   stub_entry->stub_type = stub_type;
5331   stub_entry->h = hash;
5332   stub_entry->branch_type = branch_type;
5333 
5334   if (sym_claimed)
5335     stub_entry->output_name = sym_name;
5336   else
5337     {
5338       if (sym_name == NULL)
5339 	sym_name = "unnamed";
5340       stub_entry->output_name = (char *)
5341 	bfd_alloc (htab->stub_bfd, sizeof (THUMB2ARM_GLUE_ENTRY_NAME)
5342 				   + strlen (sym_name));
5343       if (stub_entry->output_name == NULL)
5344 	{
5345 	  free (stub_name);
5346 	  return FALSE;
5347 	}
5348 
5349       /* For historical reasons, use the existing names for ARM-to-Thumb and
5350 	 Thumb-to-ARM stubs.  */
5351       r_type = ELF32_R_TYPE (irela->r_info);
5352       if ((r_type == (unsigned int) R_ARM_THM_CALL
5353 	   || r_type == (unsigned int) R_ARM_THM_JUMP24
5354 	   || r_type == (unsigned int) R_ARM_THM_JUMP19)
5355 	  && branch_type == ST_BRANCH_TO_ARM)
5356 	sprintf (stub_entry->output_name, THUMB2ARM_GLUE_ENTRY_NAME, sym_name);
5357       else if ((r_type == (unsigned int) R_ARM_CALL
5358 		|| r_type == (unsigned int) R_ARM_JUMP24)
5359 	       && branch_type == ST_BRANCH_TO_THUMB)
5360 	sprintf (stub_entry->output_name, ARM2THUMB_GLUE_ENTRY_NAME, sym_name);
5361       else
5362 	sprintf (stub_entry->output_name, STUB_ENTRY_NAME, sym_name);
5363     }
5364 
5365   *new_stub = TRUE;
5366   return TRUE;
5367 }
5368 
5369 /* Determine and set the size of the stub section for a final link.
5370 
5371    The basic idea here is to examine all the relocations looking for
5372    PC-relative calls to a target that is unreachable with a "bl"
5373    instruction.  */
5374 
5375 bfd_boolean
5376 elf32_arm_size_stubs (bfd *output_bfd,
5377 		      bfd *stub_bfd,
5378 		      struct bfd_link_info *info,
5379 		      bfd_signed_vma group_size,
5380 		      asection * (*add_stub_section) (const char *, asection *,
5381 						      asection *,
5382 						      unsigned int),
5383 		      void (*layout_sections_again) (void))
5384 {
5385   bfd_size_type stub_group_size;
5386   bfd_boolean stubs_always_after_branch;
5387   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
5388   struct a8_erratum_fix *a8_fixes = NULL;
5389   unsigned int num_a8_fixes = 0, a8_fix_table_size = 10;
5390   struct a8_erratum_reloc *a8_relocs = NULL;
5391   unsigned int num_a8_relocs = 0, a8_reloc_table_size = 10, i;
5392 
5393   if (htab == NULL)
5394     return FALSE;
5395 
5396   if (htab->fix_cortex_a8)
5397     {
5398       a8_fixes = (struct a8_erratum_fix *)
5399 	  bfd_zmalloc (sizeof (struct a8_erratum_fix) * a8_fix_table_size);
5400       a8_relocs = (struct a8_erratum_reloc *)
5401 	  bfd_zmalloc (sizeof (struct a8_erratum_reloc) * a8_reloc_table_size);
5402     }
5403 
5404   /* Propagate mach to stub bfd, because it may not have been
5405      finalized when we created stub_bfd.  */
5406   bfd_set_arch_mach (stub_bfd, bfd_get_arch (output_bfd),
5407 		     bfd_get_mach (output_bfd));
5408 
5409   /* Stash our params away.  */
5410   htab->stub_bfd = stub_bfd;
5411   htab->add_stub_section = add_stub_section;
5412   htab->layout_sections_again = layout_sections_again;
5413   stubs_always_after_branch = group_size < 0;
5414 
5415   /* The Cortex-A8 erratum fix depends on stubs not being in the same 4K page
5416      as the first half of a 32-bit branch straddling two 4K pages.  This is a
5417      crude way of enforcing that.  */
5418   if (htab->fix_cortex_a8)
5419     stubs_always_after_branch = 1;
5420 
5421   if (group_size < 0)
5422     stub_group_size = -group_size;
5423   else
5424     stub_group_size = group_size;
5425 
5426   if (stub_group_size == 1)
5427     {
5428       /* Default values.  */
5429       /* Thumb branch range is +-4MB has to be used as the default
5430 	 maximum size (a given section can contain both ARM and Thumb
5431 	 code, so the worst case has to be taken into account).
5432 
5433 	 This value is 24K less than that, which allows for 2025
5434 	 12-byte stubs.  If we exceed that, then we will fail to link.
5435 	 The user will have to relink with an explicit group size
5436 	 option.  */
5437       stub_group_size = 4170000;
5438     }
5439 
5440   group_sections (htab, stub_group_size, stubs_always_after_branch);
5441 
5442   /* If we're applying the cortex A8 fix, we need to determine the
5443      program header size now, because we cannot change it later --
5444      that could alter section placements.  Notice the A8 erratum fix
5445      ends up requiring the section addresses to remain unchanged
5446      modulo the page size.  That's something we cannot represent
5447      inside BFD, and we don't want to force the section alignment to
5448      be the page size.  */
5449   if (htab->fix_cortex_a8)
5450     (*htab->layout_sections_again) ();
5451 
5452   while (1)
5453     {
5454       bfd *input_bfd;
5455       unsigned int bfd_indx;
5456       asection *stub_sec;
5457       enum elf32_arm_stub_type stub_type;
5458       bfd_boolean stub_changed = FALSE;
5459       unsigned prev_num_a8_fixes = num_a8_fixes;
5460 
5461       num_a8_fixes = 0;
5462       for (input_bfd = info->input_bfds, bfd_indx = 0;
5463 	   input_bfd != NULL;
5464 	   input_bfd = input_bfd->link.next, bfd_indx++)
5465 	{
5466 	  Elf_Internal_Shdr *symtab_hdr;
5467 	  asection *section;
5468 	  Elf_Internal_Sym *local_syms = NULL;
5469 
5470 	  if (!is_arm_elf (input_bfd))
5471 	    continue;
5472 
5473 	  num_a8_relocs = 0;
5474 
5475 	  /* We'll need the symbol table in a second.  */
5476 	  symtab_hdr = &elf_tdata (input_bfd)->symtab_hdr;
5477 	  if (symtab_hdr->sh_info == 0)
5478 	    continue;
5479 
5480 	  /* Walk over each section attached to the input bfd.  */
5481 	  for (section = input_bfd->sections;
5482 	       section != NULL;
5483 	       section = section->next)
5484 	    {
5485 	      Elf_Internal_Rela *internal_relocs, *irelaend, *irela;
5486 
5487 	      /* If there aren't any relocs, then there's nothing more
5488 		 to do.  */
5489 	      if ((section->flags & SEC_RELOC) == 0
5490 		  || section->reloc_count == 0
5491 		  || (section->flags & SEC_CODE) == 0)
5492 		continue;
5493 
5494 	      /* If this section is a link-once section that will be
5495 		 discarded, then don't create any stubs.  */
5496 	      if (section->output_section == NULL
5497 		  || section->output_section->owner != output_bfd)
5498 		continue;
5499 
5500 	      /* Get the relocs.  */
5501 	      internal_relocs
5502 		= _bfd_elf_link_read_relocs (input_bfd, section, NULL,
5503 					     NULL, info->keep_memory);
5504 	      if (internal_relocs == NULL)
5505 		goto error_ret_free_local;
5506 
5507 	      /* Now examine each relocation.  */
5508 	      irela = internal_relocs;
5509 	      irelaend = irela + section->reloc_count;
5510 	      for (; irela < irelaend; irela++)
5511 		{
5512 		  unsigned int r_type, r_indx;
5513 		  asection *sym_sec;
5514 		  bfd_vma sym_value;
5515 		  bfd_vma destination;
5516 		  struct elf32_arm_link_hash_entry *hash;
5517 		  const char *sym_name;
5518 		  unsigned char st_type;
5519 		  enum arm_st_branch_type branch_type;
5520 		  bfd_boolean created_stub = FALSE;
5521 
5522 		  r_type = ELF32_R_TYPE (irela->r_info);
5523 		  r_indx = ELF32_R_SYM (irela->r_info);
5524 
5525 		  if (r_type >= (unsigned int) R_ARM_max)
5526 		    {
5527 		      bfd_set_error (bfd_error_bad_value);
5528 		    error_ret_free_internal:
5529 		      if (elf_section_data (section)->relocs == NULL)
5530 			free (internal_relocs);
5531 		    /* Fall through.  */
5532 		    error_ret_free_local:
5533 		      if (local_syms != NULL
5534 			  && (symtab_hdr->contents
5535 			      != (unsigned char *) local_syms))
5536 			free (local_syms);
5537 		      return FALSE;
5538 		    }
5539 
5540 		  hash = NULL;
5541 		  if (r_indx >= symtab_hdr->sh_info)
5542 		    hash = elf32_arm_hash_entry
5543 		      (elf_sym_hashes (input_bfd)
5544 		       [r_indx - symtab_hdr->sh_info]);
5545 
5546 		  /* Only look for stubs on branch instructions, or
5547 		     non-relaxed TLSCALL  */
5548 		  if ((r_type != (unsigned int) R_ARM_CALL)
5549 		      && (r_type != (unsigned int) R_ARM_THM_CALL)
5550 		      && (r_type != (unsigned int) R_ARM_JUMP24)
5551 		      && (r_type != (unsigned int) R_ARM_THM_JUMP19)
5552 		      && (r_type != (unsigned int) R_ARM_THM_XPC22)
5553 		      && (r_type != (unsigned int) R_ARM_THM_JUMP24)
5554 		      && (r_type != (unsigned int) R_ARM_PLT32)
5555 		      && !((r_type == (unsigned int) R_ARM_TLS_CALL
5556 			    || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5557 			   && r_type == elf32_arm_tls_transition
5558 			       (info, r_type, &hash->root)
5559 			   && ((hash ? hash->tls_type
5560 				: (elf32_arm_local_got_tls_type
5561 				   (input_bfd)[r_indx]))
5562 			       & GOT_TLS_GDESC) != 0))
5563 		    continue;
5564 
5565 		  /* Now determine the call target, its name, value,
5566 		     section.  */
5567 		  sym_sec = NULL;
5568 		  sym_value = 0;
5569 		  destination = 0;
5570 		  sym_name = NULL;
5571 
5572 		  if (r_type == (unsigned int) R_ARM_TLS_CALL
5573 		      || r_type == (unsigned int) R_ARM_THM_TLS_CALL)
5574 		    {
5575 		      /* A non-relaxed TLS call.  The target is the
5576 			 plt-resident trampoline and nothing to do
5577 			 with the symbol.  */
5578 		      BFD_ASSERT (htab->tls_trampoline > 0);
5579 		      sym_sec = htab->root.splt;
5580 		      sym_value = htab->tls_trampoline;
5581 		      hash = 0;
5582 		      st_type = STT_FUNC;
5583 		      branch_type = ST_BRANCH_TO_ARM;
5584 		    }
5585 		  else if (!hash)
5586 		    {
5587 		      /* It's a local symbol.  */
5588 		      Elf_Internal_Sym *sym;
5589 
5590 		      if (local_syms == NULL)
5591 			{
5592 			  local_syms
5593 			    = (Elf_Internal_Sym *) symtab_hdr->contents;
5594 			  if (local_syms == NULL)
5595 			    local_syms
5596 			      = bfd_elf_get_elf_syms (input_bfd, symtab_hdr,
5597 						      symtab_hdr->sh_info, 0,
5598 						      NULL, NULL, NULL);
5599 			  if (local_syms == NULL)
5600 			    goto error_ret_free_internal;
5601 			}
5602 
5603 		      sym = local_syms + r_indx;
5604 		      if (sym->st_shndx == SHN_UNDEF)
5605 			sym_sec = bfd_und_section_ptr;
5606 		      else if (sym->st_shndx == SHN_ABS)
5607 			sym_sec = bfd_abs_section_ptr;
5608 		      else if (sym->st_shndx == SHN_COMMON)
5609 			sym_sec = bfd_com_section_ptr;
5610 		      else
5611 			sym_sec =
5612 			  bfd_section_from_elf_index (input_bfd, sym->st_shndx);
5613 
5614 		      if (!sym_sec)
5615 			/* This is an undefined symbol.  It can never
5616 			   be resolved.  */
5617 			continue;
5618 
5619 		      if (ELF_ST_TYPE (sym->st_info) != STT_SECTION)
5620 			sym_value = sym->st_value;
5621 		      destination = (sym_value + irela->r_addend
5622 				     + sym_sec->output_offset
5623 				     + sym_sec->output_section->vma);
5624 		      st_type = ELF_ST_TYPE (sym->st_info);
5625 		      branch_type =
5626 			ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
5627 		      sym_name
5628 			= bfd_elf_string_from_elf_section (input_bfd,
5629 							   symtab_hdr->sh_link,
5630 							   sym->st_name);
5631 		    }
5632 		  else
5633 		    {
5634 		      /* It's an external symbol.  */
5635 		      while (hash->root.root.type == bfd_link_hash_indirect
5636 			     || hash->root.root.type == bfd_link_hash_warning)
5637 			hash = ((struct elf32_arm_link_hash_entry *)
5638 				hash->root.root.u.i.link);
5639 
5640 		      if (hash->root.root.type == bfd_link_hash_defined
5641 			  || hash->root.root.type == bfd_link_hash_defweak)
5642 			{
5643 			  sym_sec = hash->root.root.u.def.section;
5644 			  sym_value = hash->root.root.u.def.value;
5645 
5646 			  struct elf32_arm_link_hash_table *globals =
5647 						  elf32_arm_hash_table (info);
5648 
5649 			  /* For a destination in a shared library,
5650 			     use the PLT stub as target address to
5651 			     decide whether a branch stub is
5652 			     needed.  */
5653 			  if (globals != NULL
5654 			      && globals->root.splt != NULL
5655 			      && hash != NULL
5656 			      && hash->root.plt.offset != (bfd_vma) -1)
5657 			    {
5658 			      sym_sec = globals->root.splt;
5659 			      sym_value = hash->root.plt.offset;
5660 			      if (sym_sec->output_section != NULL)
5661 				destination = (sym_value
5662 					       + sym_sec->output_offset
5663 					       + sym_sec->output_section->vma);
5664 			    }
5665 			  else if (sym_sec->output_section != NULL)
5666 			    destination = (sym_value + irela->r_addend
5667 					   + sym_sec->output_offset
5668 					   + sym_sec->output_section->vma);
5669 			}
5670 		      else if ((hash->root.root.type == bfd_link_hash_undefined)
5671 			       || (hash->root.root.type == bfd_link_hash_undefweak))
5672 			{
5673 			  /* For a shared library, use the PLT stub as
5674 			     target address to decide whether a long
5675 			     branch stub is needed.
5676 			     For absolute code, they cannot be handled.  */
5677 			  struct elf32_arm_link_hash_table *globals =
5678 			    elf32_arm_hash_table (info);
5679 
5680 			  if (globals != NULL
5681 			      && globals->root.splt != NULL
5682 			      && hash != NULL
5683 			      && hash->root.plt.offset != (bfd_vma) -1)
5684 			    {
5685 			      sym_sec = globals->root.splt;
5686 			      sym_value = hash->root.plt.offset;
5687 			      if (sym_sec->output_section != NULL)
5688 				destination = (sym_value
5689 					       + sym_sec->output_offset
5690 					       + sym_sec->output_section->vma);
5691 			    }
5692 			  else
5693 			    continue;
5694 			}
5695 		      else
5696 			{
5697 			  bfd_set_error (bfd_error_bad_value);
5698 			  goto error_ret_free_internal;
5699 			}
5700 		      st_type = hash->root.type;
5701 		      branch_type =
5702 			ARM_GET_SYM_BRANCH_TYPE (hash->root.target_internal);
5703 		      sym_name = hash->root.root.root.string;
5704 		    }
5705 
5706 		  do
5707 		    {
5708 		      bfd_boolean new_stub;
5709 
5710 		      /* Determine what (if any) linker stub is needed.  */
5711 		      stub_type = arm_type_of_stub (info, section, irela,
5712 						    st_type, &branch_type,
5713 						    hash, destination, sym_sec,
5714 						    input_bfd, sym_name);
5715 		      if (stub_type == arm_stub_none)
5716 			break;
5717 
5718 		      /* We've either created a stub for this reloc already,
5719 			 or we are about to.  */
5720 		      created_stub =
5721 			elf32_arm_create_stub (htab, stub_type, section, irela,
5722 					       sym_sec, hash,
5723 					       (char *) sym_name, sym_value,
5724 					       branch_type, &new_stub);
5725 
5726 		      if (!created_stub)
5727 			goto error_ret_free_internal;
5728 		      else if (!new_stub)
5729 			break;
5730 		      else
5731 			stub_changed = TRUE;
5732 		    }
5733 		  while (0);
5734 
5735 		  /* Look for relocations which might trigger Cortex-A8
5736 		     erratum.  */
5737 		  if (htab->fix_cortex_a8
5738 		      && (r_type == (unsigned int) R_ARM_THM_JUMP24
5739 			  || r_type == (unsigned int) R_ARM_THM_JUMP19
5740 			  || r_type == (unsigned int) R_ARM_THM_CALL
5741 			  || r_type == (unsigned int) R_ARM_THM_XPC22))
5742 		    {
5743 		      bfd_vma from = section->output_section->vma
5744 				     + section->output_offset
5745 				     + irela->r_offset;
5746 
5747 		      if ((from & 0xfff) == 0xffe)
5748 			{
5749 			  /* Found a candidate.  Note we haven't checked the
5750 			     destination is within 4K here: if we do so (and
5751 			     don't create an entry in a8_relocs) we can't tell
5752 			     that a branch should have been relocated when
5753 			     scanning later.  */
5754 			  if (num_a8_relocs == a8_reloc_table_size)
5755 			    {
5756 			      a8_reloc_table_size *= 2;
5757 			      a8_relocs = (struct a8_erratum_reloc *)
5758 				  bfd_realloc (a8_relocs,
5759 					       sizeof (struct a8_erratum_reloc)
5760 					       * a8_reloc_table_size);
5761 			    }
5762 
5763 			  a8_relocs[num_a8_relocs].from = from;
5764 			  a8_relocs[num_a8_relocs].destination = destination;
5765 			  a8_relocs[num_a8_relocs].r_type = r_type;
5766 			  a8_relocs[num_a8_relocs].branch_type = branch_type;
5767 			  a8_relocs[num_a8_relocs].sym_name = sym_name;
5768 			  a8_relocs[num_a8_relocs].non_a8_stub = created_stub;
5769 			  a8_relocs[num_a8_relocs].hash = hash;
5770 
5771 			  num_a8_relocs++;
5772 			}
5773 		    }
5774 		}
5775 
5776 	      /* We're done with the internal relocs, free them.  */
5777 	      if (elf_section_data (section)->relocs == NULL)
5778 		free (internal_relocs);
5779 	    }
5780 
5781 	  if (htab->fix_cortex_a8)
5782 	    {
5783 	      /* Sort relocs which might apply to Cortex-A8 erratum.  */
5784 	      qsort (a8_relocs, num_a8_relocs,
5785 		     sizeof (struct a8_erratum_reloc),
5786 		     &a8_reloc_compare);
5787 
5788 	      /* Scan for branches which might trigger Cortex-A8 erratum.  */
5789 	      if (cortex_a8_erratum_scan (input_bfd, info, &a8_fixes,
5790 					  &num_a8_fixes, &a8_fix_table_size,
5791 					  a8_relocs, num_a8_relocs,
5792 					  prev_num_a8_fixes, &stub_changed)
5793 		  != 0)
5794 		goto error_ret_free_local;
5795 	    }
5796 
5797 	  if (local_syms != NULL
5798 	      && symtab_hdr->contents != (unsigned char *) local_syms)
5799 	    {
5800 	      if (!info->keep_memory)
5801 		free (local_syms);
5802 	      else
5803 		symtab_hdr->contents = (unsigned char *) local_syms;
5804 	    }
5805 	}
5806 
5807       if (prev_num_a8_fixes != num_a8_fixes)
5808 	stub_changed = TRUE;
5809 
5810       if (!stub_changed)
5811 	break;
5812 
5813       /* OK, we've added some stubs.  Find out the new size of the
5814 	 stub sections.  */
5815       for (stub_sec = htab->stub_bfd->sections;
5816 	   stub_sec != NULL;
5817 	   stub_sec = stub_sec->next)
5818 	{
5819 	  /* Ignore non-stub sections.  */
5820 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
5821 	    continue;
5822 
5823 	  stub_sec->size = 0;
5824 	}
5825 
5826       /* Compute stub section size, considering padding.  */
5827       bfd_hash_traverse (&htab->stub_hash_table, arm_size_one_stub, htab);
5828       for (stub_type = arm_stub_none + 1; stub_type < max_stub_type;
5829 	   stub_type++)
5830 	{
5831 	  int size, padding;
5832 	  asection **stub_sec_p;
5833 
5834 	  padding = arm_dedicated_stub_section_padding (stub_type);
5835 	  stub_sec_p = arm_dedicated_stub_input_section_ptr (htab, stub_type);
5836 	  /* Skip if no stub input section or no stub section padding
5837 	     required.  */
5838 	  if ((stub_sec_p != NULL && *stub_sec_p == NULL) || padding == 0)
5839 	    continue;
5840 	  /* Stub section padding required but no dedicated section.  */
5841 	  BFD_ASSERT (stub_sec_p);
5842 
5843 	  size = (*stub_sec_p)->size;
5844 	  size = (size + padding - 1) & ~(padding - 1);
5845 	  (*stub_sec_p)->size = size;
5846 	}
5847 
5848       /* Add Cortex-A8 erratum veneers to stub section sizes too.  */
5849       if (htab->fix_cortex_a8)
5850 	for (i = 0; i < num_a8_fixes; i++)
5851 	  {
5852 	    stub_sec = elf32_arm_create_or_find_stub_sec (NULL,
5853 			 a8_fixes[i].section, htab, a8_fixes[i].stub_type);
5854 
5855 	    if (stub_sec == NULL)
5856 	      return FALSE;
5857 
5858 	    stub_sec->size
5859 	      += find_stub_size_and_template (a8_fixes[i].stub_type, NULL,
5860 					      NULL);
5861 	  }
5862 
5863 
5864       /* Ask the linker to do its stuff.  */
5865       (*htab->layout_sections_again) ();
5866     }
5867 
5868   /* Add stubs for Cortex-A8 erratum fixes now.  */
5869   if (htab->fix_cortex_a8)
5870     {
5871       for (i = 0; i < num_a8_fixes; i++)
5872 	{
5873 	  struct elf32_arm_stub_hash_entry *stub_entry;
5874 	  char *stub_name = a8_fixes[i].stub_name;
5875 	  asection *section = a8_fixes[i].section;
5876 	  unsigned int section_id = a8_fixes[i].section->id;
5877 	  asection *link_sec = htab->stub_group[section_id].link_sec;
5878 	  asection *stub_sec = htab->stub_group[section_id].stub_sec;
5879 	  const insn_sequence *template_sequence;
5880 	  int template_size, size = 0;
5881 
5882 	  stub_entry = arm_stub_hash_lookup (&htab->stub_hash_table, stub_name,
5883 					     TRUE, FALSE);
5884 	  if (stub_entry == NULL)
5885 	    {
5886 	      (*_bfd_error_handler) (_("%s: cannot create stub entry %s"),
5887 				     section->owner,
5888 				     stub_name);
5889 	      return FALSE;
5890 	    }
5891 
5892 	  stub_entry->stub_sec = stub_sec;
5893 	  stub_entry->stub_offset = 0;
5894 	  stub_entry->id_sec = link_sec;
5895 	  stub_entry->stub_type = a8_fixes[i].stub_type;
5896 	  stub_entry->source_value = a8_fixes[i].offset;
5897 	  stub_entry->target_section = a8_fixes[i].section;
5898 	  stub_entry->target_value = a8_fixes[i].target_offset;
5899 	  stub_entry->orig_insn = a8_fixes[i].orig_insn;
5900 	  stub_entry->branch_type = a8_fixes[i].branch_type;
5901 
5902 	  size = find_stub_size_and_template (a8_fixes[i].stub_type,
5903 					      &template_sequence,
5904 					      &template_size);
5905 
5906 	  stub_entry->stub_size = size;
5907 	  stub_entry->stub_template = template_sequence;
5908 	  stub_entry->stub_template_size = template_size;
5909 	}
5910 
5911       /* Stash the Cortex-A8 erratum fix array for use later in
5912 	 elf32_arm_write_section().  */
5913       htab->a8_erratum_fixes = a8_fixes;
5914       htab->num_a8_erratum_fixes = num_a8_fixes;
5915     }
5916   else
5917     {
5918       htab->a8_erratum_fixes = NULL;
5919       htab->num_a8_erratum_fixes = 0;
5920     }
5921   return TRUE;
5922 }
5923 
5924 /* Build all the stubs associated with the current output file.  The
5925    stubs are kept in a hash table attached to the main linker hash
5926    table.  We also set up the .plt entries for statically linked PIC
5927    functions here.  This function is called via arm_elf_finish in the
5928    linker.  */
5929 
5930 bfd_boolean
5931 elf32_arm_build_stubs (struct bfd_link_info *info)
5932 {
5933   asection *stub_sec;
5934   struct bfd_hash_table *table;
5935   struct elf32_arm_link_hash_table *htab;
5936 
5937   htab = elf32_arm_hash_table (info);
5938   if (htab == NULL)
5939     return FALSE;
5940 
5941   for (stub_sec = htab->stub_bfd->sections;
5942        stub_sec != NULL;
5943        stub_sec = stub_sec->next)
5944     {
5945       bfd_size_type size;
5946 
5947       /* Ignore non-stub sections.  */
5948       if (!strstr (stub_sec->name, STUB_SUFFIX))
5949 	continue;
5950 
5951       /* Allocate memory to hold the linker stubs.  Zeroing the stub sections
5952 	 must at least be done for stub section requiring padding.  */
5953       size = stub_sec->size;
5954       stub_sec->contents = (unsigned char *) bfd_zalloc (htab->stub_bfd, size);
5955       if (stub_sec->contents == NULL && size != 0)
5956 	return FALSE;
5957       stub_sec->size = 0;
5958     }
5959 
5960   /* Build the stubs as directed by the stub hash table.  */
5961   table = &htab->stub_hash_table;
5962   bfd_hash_traverse (table, arm_build_one_stub, info);
5963   if (htab->fix_cortex_a8)
5964     {
5965       /* Place the cortex a8 stubs last.  */
5966       htab->fix_cortex_a8 = -1;
5967       bfd_hash_traverse (table, arm_build_one_stub, info);
5968     }
5969 
5970   return TRUE;
5971 }
5972 
5973 /* Locate the Thumb encoded calling stub for NAME.  */
5974 
5975 static struct elf_link_hash_entry *
5976 find_thumb_glue (struct bfd_link_info *link_info,
5977 		 const char *name,
5978 		 char **error_message)
5979 {
5980   char *tmp_name;
5981   struct elf_link_hash_entry *hash;
5982   struct elf32_arm_link_hash_table *hash_table;
5983 
5984   /* We need a pointer to the armelf specific hash table.  */
5985   hash_table = elf32_arm_hash_table (link_info);
5986   if (hash_table == NULL)
5987     return NULL;
5988 
5989   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
5990 				  + strlen (THUMB2ARM_GLUE_ENTRY_NAME) + 1);
5991 
5992   BFD_ASSERT (tmp_name);
5993 
5994   sprintf (tmp_name, THUMB2ARM_GLUE_ENTRY_NAME, name);
5995 
5996   hash = elf_link_hash_lookup
5997     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
5998 
5999   if (hash == NULL
6000       && asprintf (error_message, _("unable to find THUMB glue '%s' for '%s'"),
6001 		   tmp_name, name) == -1)
6002     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6003 
6004   free (tmp_name);
6005 
6006   return hash;
6007 }
6008 
6009 /* Locate the ARM encoded calling stub for NAME.  */
6010 
6011 static struct elf_link_hash_entry *
6012 find_arm_glue (struct bfd_link_info *link_info,
6013 	       const char *name,
6014 	       char **error_message)
6015 {
6016   char *tmp_name;
6017   struct elf_link_hash_entry *myh;
6018   struct elf32_arm_link_hash_table *hash_table;
6019 
6020   /* We need a pointer to the elfarm specific hash table.  */
6021   hash_table = elf32_arm_hash_table (link_info);
6022   if (hash_table == NULL)
6023     return NULL;
6024 
6025   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6026 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6027 
6028   BFD_ASSERT (tmp_name);
6029 
6030   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6031 
6032   myh = elf_link_hash_lookup
6033     (&(hash_table)->root, tmp_name, FALSE, FALSE, TRUE);
6034 
6035   if (myh == NULL
6036       && asprintf (error_message, _("unable to find ARM glue '%s' for '%s'"),
6037 		   tmp_name, name) == -1)
6038     *error_message = (char *) bfd_errmsg (bfd_error_system_call);
6039 
6040   free (tmp_name);
6041 
6042   return myh;
6043 }
6044 
6045 /* ARM->Thumb glue (static images):
6046 
6047    .arm
6048    __func_from_arm:
6049    ldr r12, __func_addr
6050    bx  r12
6051    __func_addr:
6052    .word func    @ behave as if you saw a ARM_32 reloc.
6053 
6054    (v5t static images)
6055    .arm
6056    __func_from_arm:
6057    ldr pc, __func_addr
6058    __func_addr:
6059    .word func    @ behave as if you saw a ARM_32 reloc.
6060 
6061    (relocatable images)
6062    .arm
6063    __func_from_arm:
6064    ldr r12, __func_offset
6065    add r12, r12, pc
6066    bx  r12
6067    __func_offset:
6068    .word func - .   */
6069 
6070 #define ARM2THUMB_STATIC_GLUE_SIZE 12
6071 static const insn32 a2t1_ldr_insn = 0xe59fc000;
6072 static const insn32 a2t2_bx_r12_insn = 0xe12fff1c;
6073 static const insn32 a2t3_func_addr_insn = 0x00000001;
6074 
6075 #define ARM2THUMB_V5_STATIC_GLUE_SIZE 8
6076 static const insn32 a2t1v5_ldr_insn = 0xe51ff004;
6077 static const insn32 a2t2v5_func_addr_insn = 0x00000001;
6078 
6079 #define ARM2THUMB_PIC_GLUE_SIZE 16
6080 static const insn32 a2t1p_ldr_insn = 0xe59fc004;
6081 static const insn32 a2t2p_add_pc_insn = 0xe08cc00f;
6082 static const insn32 a2t3p_bx_r12_insn = 0xe12fff1c;
6083 
6084 /* Thumb->ARM:                          Thumb->(non-interworking aware) ARM
6085 
6086      .thumb                             .thumb
6087      .align 2                           .align 2
6088  __func_from_thumb:                 __func_from_thumb:
6089      bx pc                              push {r6, lr}
6090      nop                                ldr  r6, __func_addr
6091      .arm                               mov  lr, pc
6092      b func                             bx   r6
6093 					.arm
6094 				    ;; back_to_thumb
6095 					ldmia r13! {r6, lr}
6096 					bx    lr
6097 				    __func_addr:
6098 					.word        func  */
6099 
6100 #define THUMB2ARM_GLUE_SIZE 8
6101 static const insn16 t2a1_bx_pc_insn = 0x4778;
6102 static const insn16 t2a2_noop_insn = 0x46c0;
6103 static const insn32 t2a3_b_insn = 0xea000000;
6104 
6105 #define VFP11_ERRATUM_VENEER_SIZE 8
6106 #define STM32L4XX_ERRATUM_LDM_VENEER_SIZE 16
6107 #define STM32L4XX_ERRATUM_VLDM_VENEER_SIZE 24
6108 
6109 #define ARM_BX_VENEER_SIZE 12
6110 static const insn32 armbx1_tst_insn = 0xe3100001;
6111 static const insn32 armbx2_moveq_insn = 0x01a0f000;
6112 static const insn32 armbx3_bx_insn = 0xe12fff10;
6113 
6114 #ifndef ELFARM_NABI_C_INCLUDED
6115 static void
6116 arm_allocate_glue_section_space (bfd * abfd, bfd_size_type size, const char * name)
6117 {
6118   asection * s;
6119   bfd_byte * contents;
6120 
6121   if (size == 0)
6122     {
6123       /* Do not include empty glue sections in the output.  */
6124       if (abfd != NULL)
6125 	{
6126 	  s = bfd_get_linker_section (abfd, name);
6127 	  if (s != NULL)
6128 	    s->flags |= SEC_EXCLUDE;
6129 	}
6130       return;
6131     }
6132 
6133   BFD_ASSERT (abfd != NULL);
6134 
6135   s = bfd_get_linker_section (abfd, name);
6136   BFD_ASSERT (s != NULL);
6137 
6138   contents = (bfd_byte *) bfd_alloc (abfd, size);
6139 
6140   BFD_ASSERT (s->size == size);
6141   s->contents = contents;
6142 }
6143 
6144 bfd_boolean
6145 bfd_elf32_arm_allocate_interworking_sections (struct bfd_link_info * info)
6146 {
6147   struct elf32_arm_link_hash_table * globals;
6148 
6149   globals = elf32_arm_hash_table (info);
6150   BFD_ASSERT (globals != NULL);
6151 
6152   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6153 				   globals->arm_glue_size,
6154 				   ARM2THUMB_GLUE_SECTION_NAME);
6155 
6156   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6157 				   globals->thumb_glue_size,
6158 				   THUMB2ARM_GLUE_SECTION_NAME);
6159 
6160   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6161 				   globals->vfp11_erratum_glue_size,
6162 				   VFP11_ERRATUM_VENEER_SECTION_NAME);
6163 
6164   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6165 				   globals->stm32l4xx_erratum_glue_size,
6166 				   STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6167 
6168   arm_allocate_glue_section_space (globals->bfd_of_glue_owner,
6169 				   globals->bx_glue_size,
6170 				   ARM_BX_GLUE_SECTION_NAME);
6171 
6172   return TRUE;
6173 }
6174 
6175 /* Allocate space and symbols for calling a Thumb function from Arm mode.
6176    returns the symbol identifying the stub.  */
6177 
6178 static struct elf_link_hash_entry *
6179 record_arm_to_thumb_glue (struct bfd_link_info * link_info,
6180 			  struct elf_link_hash_entry * h)
6181 {
6182   const char * name = h->root.root.string;
6183   asection * s;
6184   char * tmp_name;
6185   struct elf_link_hash_entry * myh;
6186   struct bfd_link_hash_entry * bh;
6187   struct elf32_arm_link_hash_table * globals;
6188   bfd_vma val;
6189   bfd_size_type size;
6190 
6191   globals = elf32_arm_hash_table (link_info);
6192   BFD_ASSERT (globals != NULL);
6193   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6194 
6195   s = bfd_get_linker_section
6196     (globals->bfd_of_glue_owner, ARM2THUMB_GLUE_SECTION_NAME);
6197 
6198   BFD_ASSERT (s != NULL);
6199 
6200   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen (name)
6201 				  + strlen (ARM2THUMB_GLUE_ENTRY_NAME) + 1);
6202 
6203   BFD_ASSERT (tmp_name);
6204 
6205   sprintf (tmp_name, ARM2THUMB_GLUE_ENTRY_NAME, name);
6206 
6207   myh = elf_link_hash_lookup
6208     (&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
6209 
6210   if (myh != NULL)
6211     {
6212       /* We've already seen this guy.  */
6213       free (tmp_name);
6214       return myh;
6215     }
6216 
6217   /* The only trick here is using hash_table->arm_glue_size as the value.
6218      Even though the section isn't allocated yet, this is where we will be
6219      putting it.  The +1 on the value marks that the stub has not been
6220      output yet - not that it is a Thumb function.  */
6221   bh = NULL;
6222   val = globals->arm_glue_size + 1;
6223   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6224 				    tmp_name, BSF_GLOBAL, s, val,
6225 				    NULL, TRUE, FALSE, &bh);
6226 
6227   myh = (struct elf_link_hash_entry *) bh;
6228   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6229   myh->forced_local = 1;
6230 
6231   free (tmp_name);
6232 
6233   if (bfd_link_pic (link_info)
6234       || globals->root.is_relocatable_executable
6235       || globals->pic_veneer)
6236     size = ARM2THUMB_PIC_GLUE_SIZE;
6237   else if (globals->use_blx)
6238     size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
6239   else
6240     size = ARM2THUMB_STATIC_GLUE_SIZE;
6241 
6242   s->size += size;
6243   globals->arm_glue_size += size;
6244 
6245   return myh;
6246 }
6247 
6248 /* Allocate space for ARMv4 BX veneers.  */
6249 
6250 static void
6251 record_arm_bx_glue (struct bfd_link_info * link_info, int reg)
6252 {
6253   asection * s;
6254   struct elf32_arm_link_hash_table *globals;
6255   char *tmp_name;
6256   struct elf_link_hash_entry *myh;
6257   struct bfd_link_hash_entry *bh;
6258   bfd_vma val;
6259 
6260   /* BX PC does not need a veneer.  */
6261   if (reg == 15)
6262     return;
6263 
6264   globals = elf32_arm_hash_table (link_info);
6265   BFD_ASSERT (globals != NULL);
6266   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
6267 
6268   /* Check if this veneer has already been allocated.  */
6269   if (globals->bx_glue_offset[reg])
6270     return;
6271 
6272   s = bfd_get_linker_section
6273     (globals->bfd_of_glue_owner, ARM_BX_GLUE_SECTION_NAME);
6274 
6275   BFD_ASSERT (s != NULL);
6276 
6277   /* Add symbol for veneer.  */
6278   tmp_name = (char *)
6279       bfd_malloc ((bfd_size_type) strlen (ARM_BX_GLUE_ENTRY_NAME) + 1);
6280 
6281   BFD_ASSERT (tmp_name);
6282 
6283   sprintf (tmp_name, ARM_BX_GLUE_ENTRY_NAME, reg);
6284 
6285   myh = elf_link_hash_lookup
6286     (&(globals)->root, tmp_name, FALSE, FALSE, FALSE);
6287 
6288   BFD_ASSERT (myh == NULL);
6289 
6290   bh = NULL;
6291   val = globals->bx_glue_size;
6292   _bfd_generic_link_add_one_symbol (link_info, globals->bfd_of_glue_owner,
6293 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6294 				    NULL, TRUE, FALSE, &bh);
6295 
6296   myh = (struct elf_link_hash_entry *) bh;
6297   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6298   myh->forced_local = 1;
6299 
6300   s->size += ARM_BX_VENEER_SIZE;
6301   globals->bx_glue_offset[reg] = globals->bx_glue_size | 2;
6302   globals->bx_glue_size += ARM_BX_VENEER_SIZE;
6303 }
6304 
6305 
6306 /* Add an entry to the code/data map for section SEC.  */
6307 
6308 static void
6309 elf32_arm_section_map_add (asection *sec, char type, bfd_vma vma)
6310 {
6311   struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
6312   unsigned int newidx;
6313 
6314   if (sec_data->map == NULL)
6315     {
6316       sec_data->map = (elf32_arm_section_map *)
6317 	  bfd_malloc (sizeof (elf32_arm_section_map));
6318       sec_data->mapcount = 0;
6319       sec_data->mapsize = 1;
6320     }
6321 
6322   newidx = sec_data->mapcount++;
6323 
6324   if (sec_data->mapcount > sec_data->mapsize)
6325     {
6326       sec_data->mapsize *= 2;
6327       sec_data->map = (elf32_arm_section_map *)
6328 	  bfd_realloc_or_free (sec_data->map, sec_data->mapsize
6329 			       * sizeof (elf32_arm_section_map));
6330     }
6331 
6332   if (sec_data->map)
6333     {
6334       sec_data->map[newidx].vma = vma;
6335       sec_data->map[newidx].type = type;
6336     }
6337 }
6338 
6339 
6340 /* Record information about a VFP11 denorm-erratum veneer.  Only ARM-mode
6341    veneers are handled for now.  */
6342 
6343 static bfd_vma
6344 record_vfp11_erratum_veneer (struct bfd_link_info *link_info,
6345 			     elf32_vfp11_erratum_list *branch,
6346 			     bfd *branch_bfd,
6347 			     asection *branch_sec,
6348 			     unsigned int offset)
6349 {
6350   asection *s;
6351   struct elf32_arm_link_hash_table *hash_table;
6352   char *tmp_name;
6353   struct elf_link_hash_entry *myh;
6354   struct bfd_link_hash_entry *bh;
6355   bfd_vma val;
6356   struct _arm_elf_section_data *sec_data;
6357   elf32_vfp11_erratum_list *newerr;
6358 
6359   hash_table = elf32_arm_hash_table (link_info);
6360   BFD_ASSERT (hash_table != NULL);
6361   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6362 
6363   s = bfd_get_linker_section
6364     (hash_table->bfd_of_glue_owner, VFP11_ERRATUM_VENEER_SECTION_NAME);
6365 
6366   sec_data = elf32_arm_section_data (s);
6367 
6368   BFD_ASSERT (s != NULL);
6369 
6370   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6371 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
6372 
6373   BFD_ASSERT (tmp_name);
6374 
6375   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
6376 	   hash_table->num_vfp11_fixes);
6377 
6378   myh = elf_link_hash_lookup
6379     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6380 
6381   BFD_ASSERT (myh == NULL);
6382 
6383   bh = NULL;
6384   val = hash_table->vfp11_erratum_glue_size;
6385   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6386 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6387 				    NULL, TRUE, FALSE, &bh);
6388 
6389   myh = (struct elf_link_hash_entry *) bh;
6390   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6391   myh->forced_local = 1;
6392 
6393   /* Link veneer back to calling location.  */
6394   sec_data->erratumcount += 1;
6395   newerr = (elf32_vfp11_erratum_list *)
6396       bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
6397 
6398   newerr->type = VFP11_ERRATUM_ARM_VENEER;
6399   newerr->vma = -1;
6400   newerr->u.v.branch = branch;
6401   newerr->u.v.id = hash_table->num_vfp11_fixes;
6402   branch->u.b.veneer = newerr;
6403 
6404   newerr->next = sec_data->erratumlist;
6405   sec_data->erratumlist = newerr;
6406 
6407   /* A symbol for the return from the veneer.  */
6408   sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
6409 	   hash_table->num_vfp11_fixes);
6410 
6411   myh = elf_link_hash_lookup
6412     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6413 
6414   if (myh != NULL)
6415     abort ();
6416 
6417   bh = NULL;
6418   val = offset + 4;
6419   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6420 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
6421 
6422   myh = (struct elf_link_hash_entry *) bh;
6423   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6424   myh->forced_local = 1;
6425 
6426   free (tmp_name);
6427 
6428   /* Generate a mapping symbol for the veneer section, and explicitly add an
6429      entry for that symbol to the code/data map for the section.  */
6430   if (hash_table->vfp11_erratum_glue_size == 0)
6431     {
6432       bh = NULL;
6433       /* FIXME: Creates an ARM symbol.  Thumb mode will need attention if it
6434 	 ever requires this erratum fix.  */
6435       _bfd_generic_link_add_one_symbol (link_info,
6436 					hash_table->bfd_of_glue_owner, "$a",
6437 					BSF_LOCAL, s, 0, NULL,
6438 					TRUE, FALSE, &bh);
6439 
6440       myh = (struct elf_link_hash_entry *) bh;
6441       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6442       myh->forced_local = 1;
6443 
6444       /* The elf32_arm_init_maps function only cares about symbols from input
6445 	 BFDs.  We must make a note of this generated mapping symbol
6446 	 ourselves so that code byteswapping works properly in
6447 	 elf32_arm_write_section.  */
6448       elf32_arm_section_map_add (s, 'a', 0);
6449     }
6450 
6451   s->size += VFP11_ERRATUM_VENEER_SIZE;
6452   hash_table->vfp11_erratum_glue_size += VFP11_ERRATUM_VENEER_SIZE;
6453   hash_table->num_vfp11_fixes++;
6454 
6455   /* The offset of the veneer.  */
6456   return val;
6457 }
6458 
6459 /* Record information about a STM32L4XX STM erratum veneer.  Only THUMB-mode
6460    veneers need to be handled because used only in Cortex-M.  */
6461 
6462 static bfd_vma
6463 record_stm32l4xx_erratum_veneer (struct bfd_link_info *link_info,
6464 				 elf32_stm32l4xx_erratum_list *branch,
6465 				 bfd *branch_bfd,
6466 				 asection *branch_sec,
6467 				 unsigned int offset,
6468 				 bfd_size_type veneer_size)
6469 {
6470   asection *s;
6471   struct elf32_arm_link_hash_table *hash_table;
6472   char *tmp_name;
6473   struct elf_link_hash_entry *myh;
6474   struct bfd_link_hash_entry *bh;
6475   bfd_vma val;
6476   struct _arm_elf_section_data *sec_data;
6477   elf32_stm32l4xx_erratum_list *newerr;
6478 
6479   hash_table = elf32_arm_hash_table (link_info);
6480   BFD_ASSERT (hash_table != NULL);
6481   BFD_ASSERT (hash_table->bfd_of_glue_owner != NULL);
6482 
6483   s = bfd_get_linker_section
6484     (hash_table->bfd_of_glue_owner, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6485 
6486   BFD_ASSERT (s != NULL);
6487 
6488   sec_data = elf32_arm_section_data (s);
6489 
6490   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
6491 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
6492 
6493   BFD_ASSERT (tmp_name);
6494 
6495   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
6496 	   hash_table->num_stm32l4xx_fixes);
6497 
6498   myh = elf_link_hash_lookup
6499     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6500 
6501   BFD_ASSERT (myh == NULL);
6502 
6503   bh = NULL;
6504   val = hash_table->stm32l4xx_erratum_glue_size;
6505   _bfd_generic_link_add_one_symbol (link_info, hash_table->bfd_of_glue_owner,
6506 				    tmp_name, BSF_FUNCTION | BSF_LOCAL, s, val,
6507 				    NULL, TRUE, FALSE, &bh);
6508 
6509   myh = (struct elf_link_hash_entry *) bh;
6510   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6511   myh->forced_local = 1;
6512 
6513   /* Link veneer back to calling location.  */
6514   sec_data->stm32l4xx_erratumcount += 1;
6515   newerr = (elf32_stm32l4xx_erratum_list *)
6516       bfd_zmalloc (sizeof (elf32_stm32l4xx_erratum_list));
6517 
6518   newerr->type = STM32L4XX_ERRATUM_VENEER;
6519   newerr->vma = -1;
6520   newerr->u.v.branch = branch;
6521   newerr->u.v.id = hash_table->num_stm32l4xx_fixes;
6522   branch->u.b.veneer = newerr;
6523 
6524   newerr->next = sec_data->stm32l4xx_erratumlist;
6525   sec_data->stm32l4xx_erratumlist = newerr;
6526 
6527   /* A symbol for the return from the veneer.  */
6528   sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
6529 	   hash_table->num_stm32l4xx_fixes);
6530 
6531   myh = elf_link_hash_lookup
6532     (&(hash_table)->root, tmp_name, FALSE, FALSE, FALSE);
6533 
6534   if (myh != NULL)
6535     abort ();
6536 
6537   bh = NULL;
6538   val = offset + 4;
6539   _bfd_generic_link_add_one_symbol (link_info, branch_bfd, tmp_name, BSF_LOCAL,
6540 				    branch_sec, val, NULL, TRUE, FALSE, &bh);
6541 
6542   myh = (struct elf_link_hash_entry *) bh;
6543   myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
6544   myh->forced_local = 1;
6545 
6546   free (tmp_name);
6547 
6548   /* Generate a mapping symbol for the veneer section, and explicitly add an
6549      entry for that symbol to the code/data map for the section.  */
6550   if (hash_table->stm32l4xx_erratum_glue_size == 0)
6551     {
6552       bh = NULL;
6553       /* Creates a THUMB symbol since there is no other choice.  */
6554       _bfd_generic_link_add_one_symbol (link_info,
6555 					hash_table->bfd_of_glue_owner, "$t",
6556 					BSF_LOCAL, s, 0, NULL,
6557 					TRUE, FALSE, &bh);
6558 
6559       myh = (struct elf_link_hash_entry *) bh;
6560       myh->type = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
6561       myh->forced_local = 1;
6562 
6563       /* The elf32_arm_init_maps function only cares about symbols from input
6564 	 BFDs.  We must make a note of this generated mapping symbol
6565 	 ourselves so that code byteswapping works properly in
6566 	 elf32_arm_write_section.  */
6567       elf32_arm_section_map_add (s, 't', 0);
6568     }
6569 
6570   s->size += veneer_size;
6571   hash_table->stm32l4xx_erratum_glue_size += veneer_size;
6572   hash_table->num_stm32l4xx_fixes++;
6573 
6574   /* The offset of the veneer.  */
6575   return val;
6576 }
6577 
6578 #define ARM_GLUE_SECTION_FLAGS \
6579   (SEC_ALLOC | SEC_LOAD | SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_CODE \
6580    | SEC_READONLY | SEC_LINKER_CREATED)
6581 
6582 /* Create a fake section for use by the ARM backend of the linker.  */
6583 
6584 static bfd_boolean
6585 arm_make_glue_section (bfd * abfd, const char * name)
6586 {
6587   asection * sec;
6588 
6589   sec = bfd_get_linker_section (abfd, name);
6590   if (sec != NULL)
6591     /* Already made.  */
6592     return TRUE;
6593 
6594   sec = bfd_make_section_anyway_with_flags (abfd, name, ARM_GLUE_SECTION_FLAGS);
6595 
6596   if (sec == NULL
6597       || !bfd_set_section_alignment (abfd, sec, 2))
6598     return FALSE;
6599 
6600   /* Set the gc mark to prevent the section from being removed by garbage
6601      collection, despite the fact that no relocs refer to this section.  */
6602   sec->gc_mark = 1;
6603 
6604   return TRUE;
6605 }
6606 
6607 /* Set size of .plt entries.  This function is called from the
6608    linker scripts in ld/emultempl/{armelf}.em.  */
6609 
6610 void
6611 bfd_elf32_arm_use_long_plt (void)
6612 {
6613   elf32_arm_use_long_plt_entry = TRUE;
6614 }
6615 
6616 /* Add the glue sections to ABFD.  This function is called from the
6617    linker scripts in ld/emultempl/{armelf}.em.  */
6618 
6619 bfd_boolean
6620 bfd_elf32_arm_add_glue_sections_to_bfd (bfd *abfd,
6621 					struct bfd_link_info *info)
6622 {
6623   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
6624   bfd_boolean dostm32l4xx = globals
6625     && globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE;
6626   bfd_boolean addglue;
6627 
6628   /* If we are only performing a partial
6629      link do not bother adding the glue.  */
6630   if (bfd_link_relocatable (info))
6631     return TRUE;
6632 
6633   addglue = arm_make_glue_section (abfd, ARM2THUMB_GLUE_SECTION_NAME)
6634     && arm_make_glue_section (abfd, THUMB2ARM_GLUE_SECTION_NAME)
6635     && arm_make_glue_section (abfd, VFP11_ERRATUM_VENEER_SECTION_NAME)
6636     && arm_make_glue_section (abfd, ARM_BX_GLUE_SECTION_NAME);
6637 
6638   if (!dostm32l4xx)
6639     return addglue;
6640 
6641   return addglue
6642     && arm_make_glue_section (abfd, STM32L4XX_ERRATUM_VENEER_SECTION_NAME);
6643 }
6644 
6645 /* Mark output sections of veneers needing a dedicated one with SEC_KEEP.  This
6646    ensures they are not marked for deletion by
6647    strip_excluded_output_sections () when veneers are going to be created
6648    later.  Not doing so would trigger assert on empty section size in
6649    lang_size_sections_1 ().  */
6650 
6651 void
6652 bfd_elf32_arm_keep_private_stub_output_sections (struct bfd_link_info *info)
6653 {
6654   enum elf32_arm_stub_type stub_type;
6655 
6656   /* If we are only performing a partial
6657      link do not bother adding the glue.  */
6658   if (bfd_link_relocatable (info))
6659     return;
6660 
6661   for (stub_type = arm_stub_none + 1; stub_type < max_stub_type; stub_type++)
6662     {
6663       asection *out_sec;
6664       const char *out_sec_name;
6665 
6666       if (!arm_dedicated_stub_output_section_required (stub_type))
6667 	continue;
6668 
6669      out_sec_name = arm_dedicated_stub_output_section_name (stub_type);
6670      out_sec = bfd_get_section_by_name (info->output_bfd, out_sec_name);
6671      if (out_sec != NULL)
6672 	out_sec->flags |= SEC_KEEP;
6673     }
6674 }
6675 
6676 /* Select a BFD to be used to hold the sections used by the glue code.
6677    This function is called from the linker scripts in ld/emultempl/
6678    {armelf/pe}.em.  */
6679 
6680 bfd_boolean
6681 bfd_elf32_arm_get_bfd_for_interworking (bfd *abfd, struct bfd_link_info *info)
6682 {
6683   struct elf32_arm_link_hash_table *globals;
6684 
6685   /* If we are only performing a partial link
6686      do not bother getting a bfd to hold the glue.  */
6687   if (bfd_link_relocatable (info))
6688     return TRUE;
6689 
6690   /* Make sure we don't attach the glue sections to a dynamic object.  */
6691   BFD_ASSERT (!(abfd->flags & DYNAMIC));
6692 
6693   globals = elf32_arm_hash_table (info);
6694   BFD_ASSERT (globals != NULL);
6695 
6696   if (globals->bfd_of_glue_owner != NULL)
6697     return TRUE;
6698 
6699   /* Save the bfd for later use.  */
6700   globals->bfd_of_glue_owner = abfd;
6701 
6702   return TRUE;
6703 }
6704 
6705 static void
6706 check_use_blx (struct elf32_arm_link_hash_table *globals)
6707 {
6708   int cpu_arch;
6709 
6710   cpu_arch = bfd_elf_get_obj_attr_int (globals->obfd, OBJ_ATTR_PROC,
6711 				       Tag_CPU_arch);
6712 
6713   if (globals->fix_arm1176)
6714     {
6715       if (cpu_arch == TAG_CPU_ARCH_V6T2 || cpu_arch > TAG_CPU_ARCH_V6K)
6716 	globals->use_blx = 1;
6717     }
6718   else
6719     {
6720       if (cpu_arch > TAG_CPU_ARCH_V4T)
6721 	globals->use_blx = 1;
6722     }
6723 }
6724 
6725 bfd_boolean
6726 bfd_elf32_arm_process_before_allocation (bfd *abfd,
6727 					 struct bfd_link_info *link_info)
6728 {
6729   Elf_Internal_Shdr *symtab_hdr;
6730   Elf_Internal_Rela *internal_relocs = NULL;
6731   Elf_Internal_Rela *irel, *irelend;
6732   bfd_byte *contents = NULL;
6733 
6734   asection *sec;
6735   struct elf32_arm_link_hash_table *globals;
6736 
6737   /* If we are only performing a partial link do not bother
6738      to construct any glue.  */
6739   if (bfd_link_relocatable (link_info))
6740     return TRUE;
6741 
6742   /* Here we have a bfd that is to be included on the link.  We have a
6743      hook to do reloc rummaging, before section sizes are nailed down.  */
6744   globals = elf32_arm_hash_table (link_info);
6745   BFD_ASSERT (globals != NULL);
6746 
6747   check_use_blx (globals);
6748 
6749   if (globals->byteswap_code && !bfd_big_endian (abfd))
6750     {
6751       _bfd_error_handler (_("%B: BE8 images only valid in big-endian mode."),
6752 			  abfd);
6753       return FALSE;
6754     }
6755 
6756   /* PR 5398: If we have not decided to include any loadable sections in
6757      the output then we will not have a glue owner bfd.  This is OK, it
6758      just means that there is nothing else for us to do here.  */
6759   if (globals->bfd_of_glue_owner == NULL)
6760     return TRUE;
6761 
6762   /* Rummage around all the relocs and map the glue vectors.  */
6763   sec = abfd->sections;
6764 
6765   if (sec == NULL)
6766     return TRUE;
6767 
6768   for (; sec != NULL; sec = sec->next)
6769     {
6770       if (sec->reloc_count == 0)
6771 	continue;
6772 
6773       if ((sec->flags & SEC_EXCLUDE) != 0)
6774 	continue;
6775 
6776       symtab_hdr = & elf_symtab_hdr (abfd);
6777 
6778       /* Load the relocs.  */
6779       internal_relocs
6780 	= _bfd_elf_link_read_relocs (abfd, sec, NULL, NULL, FALSE);
6781 
6782       if (internal_relocs == NULL)
6783 	goto error_return;
6784 
6785       irelend = internal_relocs + sec->reloc_count;
6786       for (irel = internal_relocs; irel < irelend; irel++)
6787 	{
6788 	  long r_type;
6789 	  unsigned long r_index;
6790 
6791 	  struct elf_link_hash_entry *h;
6792 
6793 	  r_type = ELF32_R_TYPE (irel->r_info);
6794 	  r_index = ELF32_R_SYM (irel->r_info);
6795 
6796 	  /* These are the only relocation types we care about.  */
6797 	  if (   r_type != R_ARM_PC24
6798 	      && (r_type != R_ARM_V4BX || globals->fix_v4bx < 2))
6799 	    continue;
6800 
6801 	  /* Get the section contents if we haven't done so already.  */
6802 	  if (contents == NULL)
6803 	    {
6804 	      /* Get cached copy if it exists.  */
6805 	      if (elf_section_data (sec)->this_hdr.contents != NULL)
6806 		contents = elf_section_data (sec)->this_hdr.contents;
6807 	      else
6808 		{
6809 		  /* Go get them off disk.  */
6810 		  if (! bfd_malloc_and_get_section (abfd, sec, &contents))
6811 		    goto error_return;
6812 		}
6813 	    }
6814 
6815 	  if (r_type == R_ARM_V4BX)
6816 	    {
6817 	      int reg;
6818 
6819 	      reg = bfd_get_32 (abfd, contents + irel->r_offset) & 0xf;
6820 	      record_arm_bx_glue (link_info, reg);
6821 	      continue;
6822 	    }
6823 
6824 	  /* If the relocation is not against a symbol it cannot concern us.  */
6825 	  h = NULL;
6826 
6827 	  /* We don't care about local symbols.  */
6828 	  if (r_index < symtab_hdr->sh_info)
6829 	    continue;
6830 
6831 	  /* This is an external symbol.  */
6832 	  r_index -= symtab_hdr->sh_info;
6833 	  h = (struct elf_link_hash_entry *)
6834 	    elf_sym_hashes (abfd)[r_index];
6835 
6836 	  /* If the relocation is against a static symbol it must be within
6837 	     the current section and so cannot be a cross ARM/Thumb relocation.  */
6838 	  if (h == NULL)
6839 	    continue;
6840 
6841 	  /* If the call will go through a PLT entry then we do not need
6842 	     glue.  */
6843 	  if (globals->root.splt != NULL && h->plt.offset != (bfd_vma) -1)
6844 	    continue;
6845 
6846 	  switch (r_type)
6847 	    {
6848 	    case R_ARM_PC24:
6849 	      /* This one is a call from arm code.  We need to look up
6850 		 the target of the call.  If it is a thumb target, we
6851 		 insert glue.  */
6852 	      if (ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
6853 		  == ST_BRANCH_TO_THUMB)
6854 		record_arm_to_thumb_glue (link_info, h);
6855 	      break;
6856 
6857 	    default:
6858 	      abort ();
6859 	    }
6860 	}
6861 
6862       if (contents != NULL
6863 	  && elf_section_data (sec)->this_hdr.contents != contents)
6864 	free (contents);
6865       contents = NULL;
6866 
6867       if (internal_relocs != NULL
6868 	  && elf_section_data (sec)->relocs != internal_relocs)
6869 	free (internal_relocs);
6870       internal_relocs = NULL;
6871     }
6872 
6873   return TRUE;
6874 
6875 error_return:
6876   if (contents != NULL
6877       && elf_section_data (sec)->this_hdr.contents != contents)
6878     free (contents);
6879   if (internal_relocs != NULL
6880       && elf_section_data (sec)->relocs != internal_relocs)
6881     free (internal_relocs);
6882 
6883   return FALSE;
6884 }
6885 #endif
6886 
6887 
6888 /* Initialise maps of ARM/Thumb/data for input BFDs.  */
6889 
6890 void
6891 bfd_elf32_arm_init_maps (bfd *abfd)
6892 {
6893   Elf_Internal_Sym *isymbuf;
6894   Elf_Internal_Shdr *hdr;
6895   unsigned int i, localsyms;
6896 
6897   /* PR 7093: Make sure that we are dealing with an arm elf binary.  */
6898   if (! is_arm_elf (abfd))
6899     return;
6900 
6901   if ((abfd->flags & DYNAMIC) != 0)
6902     return;
6903 
6904   hdr = & elf_symtab_hdr (abfd);
6905   localsyms = hdr->sh_info;
6906 
6907   /* Obtain a buffer full of symbols for this BFD. The hdr->sh_info field
6908      should contain the number of local symbols, which should come before any
6909      global symbols.  Mapping symbols are always local.  */
6910   isymbuf = bfd_elf_get_elf_syms (abfd, hdr, localsyms, 0, NULL, NULL,
6911 				  NULL);
6912 
6913   /* No internal symbols read?  Skip this BFD.  */
6914   if (isymbuf == NULL)
6915     return;
6916 
6917   for (i = 0; i < localsyms; i++)
6918     {
6919       Elf_Internal_Sym *isym = &isymbuf[i];
6920       asection *sec = bfd_section_from_elf_index (abfd, isym->st_shndx);
6921       const char *name;
6922 
6923       if (sec != NULL
6924 	  && ELF_ST_BIND (isym->st_info) == STB_LOCAL)
6925 	{
6926 	  name = bfd_elf_string_from_elf_section (abfd,
6927 	    hdr->sh_link, isym->st_name);
6928 
6929 	  if (bfd_is_arm_special_symbol_name (name,
6930 					      BFD_ARM_SPECIAL_SYM_TYPE_MAP))
6931 	    elf32_arm_section_map_add (sec, name[1], isym->st_value);
6932 	}
6933     }
6934 }
6935 
6936 
6937 /* Auto-select enabling of Cortex-A8 erratum fix if the user didn't explicitly
6938    say what they wanted.  */
6939 
6940 void
6941 bfd_elf32_arm_set_cortex_a8_fix (bfd *obfd, struct bfd_link_info *link_info)
6942 {
6943   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6944   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6945 
6946   if (globals == NULL)
6947     return;
6948 
6949   if (globals->fix_cortex_a8 == -1)
6950     {
6951       /* Turn on Cortex-A8 erratum workaround for ARMv7-A.  */
6952       if (out_attr[Tag_CPU_arch].i == TAG_CPU_ARCH_V7
6953 	  && (out_attr[Tag_CPU_arch_profile].i == 'A'
6954 	      || out_attr[Tag_CPU_arch_profile].i == 0))
6955 	globals->fix_cortex_a8 = 1;
6956       else
6957 	globals->fix_cortex_a8 = 0;
6958     }
6959 }
6960 
6961 
6962 void
6963 bfd_elf32_arm_set_vfp11_fix (bfd *obfd, struct bfd_link_info *link_info)
6964 {
6965   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6966   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6967 
6968   if (globals == NULL)
6969     return;
6970   /* We assume that ARMv7+ does not need the VFP11 denorm erratum fix.  */
6971   if (out_attr[Tag_CPU_arch].i >= TAG_CPU_ARCH_V7)
6972     {
6973       switch (globals->vfp11_fix)
6974 	{
6975 	case BFD_ARM_VFP11_FIX_DEFAULT:
6976 	case BFD_ARM_VFP11_FIX_NONE:
6977 	  globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6978 	  break;
6979 
6980 	default:
6981 	  /* Give a warning, but do as the user requests anyway.  */
6982 	  (*_bfd_error_handler) (_("%B: warning: selected VFP11 erratum "
6983 	    "workaround is not necessary for target architecture"), obfd);
6984 	}
6985     }
6986   else if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_DEFAULT)
6987     /* For earlier architectures, we might need the workaround, but do not
6988        enable it by default.  If users is running with broken hardware, they
6989        must enable the erratum fix explicitly.  */
6990     globals->vfp11_fix = BFD_ARM_VFP11_FIX_NONE;
6991 }
6992 
6993 void
6994 bfd_elf32_arm_set_stm32l4xx_fix (bfd *obfd, struct bfd_link_info *link_info)
6995 {
6996   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
6997   obj_attribute *out_attr = elf_known_obj_attributes_proc (obfd);
6998 
6999   if (globals == NULL)
7000     return;
7001 
7002   /* We assume only Cortex-M4 may require the fix.  */
7003   if (out_attr[Tag_CPU_arch].i != TAG_CPU_ARCH_V7E_M
7004       || out_attr[Tag_CPU_arch_profile].i != 'M')
7005     {
7006       if (globals->stm32l4xx_fix != BFD_ARM_STM32L4XX_FIX_NONE)
7007 	/* Give a warning, but do as the user requests anyway.  */
7008 	(*_bfd_error_handler)
7009 	  (_("%B: warning: selected STM32L4XX erratum "
7010 	     "workaround is not necessary for target architecture"), obfd);
7011     }
7012 }
7013 
7014 enum bfd_arm_vfp11_pipe
7015 {
7016   VFP11_FMAC,
7017   VFP11_LS,
7018   VFP11_DS,
7019   VFP11_BAD
7020 };
7021 
7022 /* Return a VFP register number.  This is encoded as RX:X for single-precision
7023    registers, or X:RX for double-precision registers, where RX is the group of
7024    four bits in the instruction encoding and X is the single extension bit.
7025    RX and X fields are specified using their lowest (starting) bit.  The return
7026    value is:
7027 
7028      0...31: single-precision registers s0...s31
7029      32...63: double-precision registers d0...d31.
7030 
7031    Although X should be zero for VFP11 (encoding d0...d15 only), we might
7032    encounter VFP3 instructions, so we allow the full range for DP registers.  */
7033 
7034 static unsigned int
7035 bfd_arm_vfp11_regno (unsigned int insn, bfd_boolean is_double, unsigned int rx,
7036 		     unsigned int x)
7037 {
7038   if (is_double)
7039     return (((insn >> rx) & 0xf) | (((insn >> x) & 1) << 4)) + 32;
7040   else
7041     return (((insn >> rx) & 0xf) << 1) | ((insn >> x) & 1);
7042 }
7043 
7044 /* Set bits in *WMASK according to a register number REG as encoded by
7045    bfd_arm_vfp11_regno().  Ignore d16-d31.  */
7046 
7047 static void
7048 bfd_arm_vfp11_write_mask (unsigned int *wmask, unsigned int reg)
7049 {
7050   if (reg < 32)
7051     *wmask |= 1 << reg;
7052   else if (reg < 48)
7053     *wmask |= 3 << ((reg - 32) * 2);
7054 }
7055 
7056 /* Return TRUE if WMASK overwrites anything in REGS.  */
7057 
7058 static bfd_boolean
7059 bfd_arm_vfp11_antidependency (unsigned int wmask, int *regs, int numregs)
7060 {
7061   int i;
7062 
7063   for (i = 0; i < numregs; i++)
7064     {
7065       unsigned int reg = regs[i];
7066 
7067       if (reg < 32 && (wmask & (1 << reg)) != 0)
7068 	return TRUE;
7069 
7070       reg -= 32;
7071 
7072       if (reg >= 16)
7073 	continue;
7074 
7075       if ((wmask & (3 << (reg * 2))) != 0)
7076 	return TRUE;
7077     }
7078 
7079   return FALSE;
7080 }
7081 
7082 /* In this function, we're interested in two things: finding input registers
7083    for VFP data-processing instructions, and finding the set of registers which
7084    arbitrary VFP instructions may write to.  We use a 32-bit unsigned int to
7085    hold the written set, so FLDM etc. are easy to deal with (we're only
7086    interested in 32 SP registers or 16 dp registers, due to the VFP version
7087    implemented by the chip in question).  DP registers are marked by setting
7088    both SP registers in the write mask).  */
7089 
7090 static enum bfd_arm_vfp11_pipe
7091 bfd_arm_vfp11_insn_decode (unsigned int insn, unsigned int *destmask, int *regs,
7092 			   int *numregs)
7093 {
7094   enum bfd_arm_vfp11_pipe vpipe = VFP11_BAD;
7095   bfd_boolean is_double = ((insn & 0xf00) == 0xb00) ? 1 : 0;
7096 
7097   if ((insn & 0x0f000e10) == 0x0e000a00)  /* A data-processing insn.  */
7098     {
7099       unsigned int pqrs;
7100       unsigned int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7101       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7102 
7103       pqrs = ((insn & 0x00800000) >> 20)
7104 	   | ((insn & 0x00300000) >> 19)
7105 	   | ((insn & 0x00000040) >> 6);
7106 
7107       switch (pqrs)
7108 	{
7109 	case 0: /* fmac[sd].  */
7110 	case 1: /* fnmac[sd].  */
7111 	case 2: /* fmsc[sd].  */
7112 	case 3: /* fnmsc[sd].  */
7113 	  vpipe = VFP11_FMAC;
7114 	  bfd_arm_vfp11_write_mask (destmask, fd);
7115 	  regs[0] = fd;
7116 	  regs[1] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);  /* Fn.  */
7117 	  regs[2] = fm;
7118 	  *numregs = 3;
7119 	  break;
7120 
7121 	case 4: /* fmul[sd].  */
7122 	case 5: /* fnmul[sd].  */
7123 	case 6: /* fadd[sd].  */
7124 	case 7: /* fsub[sd].  */
7125 	  vpipe = VFP11_FMAC;
7126 	  goto vfp_binop;
7127 
7128 	case 8: /* fdiv[sd].  */
7129 	  vpipe = VFP11_DS;
7130 	  vfp_binop:
7131 	  bfd_arm_vfp11_write_mask (destmask, fd);
7132 	  regs[0] = bfd_arm_vfp11_regno (insn, is_double, 16, 7);   /* Fn.  */
7133 	  regs[1] = fm;
7134 	  *numregs = 2;
7135 	  break;
7136 
7137 	case 15: /* extended opcode.  */
7138 	  {
7139 	    unsigned int extn = ((insn >> 15) & 0x1e)
7140 			      | ((insn >> 7) & 1);
7141 
7142 	    switch (extn)
7143 	      {
7144 	      case 0: /* fcpy[sd].  */
7145 	      case 1: /* fabs[sd].  */
7146 	      case 2: /* fneg[sd].  */
7147 	      case 8: /* fcmp[sd].  */
7148 	      case 9: /* fcmpe[sd].  */
7149 	      case 10: /* fcmpz[sd].  */
7150 	      case 11: /* fcmpez[sd].  */
7151 	      case 16: /* fuito[sd].  */
7152 	      case 17: /* fsito[sd].  */
7153 	      case 24: /* ftoui[sd].  */
7154 	      case 25: /* ftouiz[sd].  */
7155 	      case 26: /* ftosi[sd].  */
7156 	      case 27: /* ftosiz[sd].  */
7157 		/* These instructions will not bounce due to underflow.  */
7158 		*numregs = 0;
7159 		vpipe = VFP11_FMAC;
7160 		break;
7161 
7162 	      case 3: /* fsqrt[sd].  */
7163 		/* fsqrt cannot underflow, but it can (perhaps) overwrite
7164 		   registers to cause the erratum in previous instructions.  */
7165 		bfd_arm_vfp11_write_mask (destmask, fd);
7166 		vpipe = VFP11_DS;
7167 		break;
7168 
7169 	      case 15: /* fcvt{ds,sd}.  */
7170 		{
7171 		  int rnum = 0;
7172 
7173 		  bfd_arm_vfp11_write_mask (destmask, fd);
7174 
7175 		  /* Only FCVTSD can underflow.  */
7176 		  if ((insn & 0x100) != 0)
7177 		    regs[rnum++] = fm;
7178 
7179 		  *numregs = rnum;
7180 
7181 		  vpipe = VFP11_FMAC;
7182 		}
7183 		break;
7184 
7185 	      default:
7186 		return VFP11_BAD;
7187 	      }
7188 	  }
7189 	  break;
7190 
7191 	default:
7192 	  return VFP11_BAD;
7193 	}
7194     }
7195   /* Two-register transfer.  */
7196   else if ((insn & 0x0fe00ed0) == 0x0c400a10)
7197     {
7198       unsigned int fm = bfd_arm_vfp11_regno (insn, is_double, 0, 5);
7199 
7200       if ((insn & 0x100000) == 0)
7201 	{
7202 	  if (is_double)
7203 	    bfd_arm_vfp11_write_mask (destmask, fm);
7204 	  else
7205 	    {
7206 	      bfd_arm_vfp11_write_mask (destmask, fm);
7207 	      bfd_arm_vfp11_write_mask (destmask, fm + 1);
7208 	    }
7209 	}
7210 
7211       vpipe = VFP11_LS;
7212     }
7213   else if ((insn & 0x0e100e00) == 0x0c100a00)  /* A load insn.  */
7214     {
7215       int fd = bfd_arm_vfp11_regno (insn, is_double, 12, 22);
7216       unsigned int puw = ((insn >> 21) & 0x1) | (((insn >> 23) & 3) << 1);
7217 
7218       switch (puw)
7219 	{
7220 	case 0: /* Two-reg transfer.  We should catch these above.  */
7221 	  abort ();
7222 
7223 	case 2: /* fldm[sdx].  */
7224 	case 3:
7225 	case 5:
7226 	  {
7227 	    unsigned int i, offset = insn & 0xff;
7228 
7229 	    if (is_double)
7230 	      offset >>= 1;
7231 
7232 	    for (i = fd; i < fd + offset; i++)
7233 	      bfd_arm_vfp11_write_mask (destmask, i);
7234 	  }
7235 	  break;
7236 
7237 	case 4: /* fld[sd].  */
7238 	case 6:
7239 	  bfd_arm_vfp11_write_mask (destmask, fd);
7240 	  break;
7241 
7242 	default:
7243 	  return VFP11_BAD;
7244 	}
7245 
7246       vpipe = VFP11_LS;
7247     }
7248   /* Single-register transfer. Note L==0.  */
7249   else if ((insn & 0x0f100e10) == 0x0e000a10)
7250     {
7251       unsigned int opcode = (insn >> 21) & 7;
7252       unsigned int fn = bfd_arm_vfp11_regno (insn, is_double, 16, 7);
7253 
7254       switch (opcode)
7255 	{
7256 	case 0: /* fmsr/fmdlr.  */
7257 	case 1: /* fmdhr.  */
7258 	  /* Mark fmdhr and fmdlr as writing to the whole of the DP
7259 	     destination register.  I don't know if this is exactly right,
7260 	     but it is the conservative choice.  */
7261 	  bfd_arm_vfp11_write_mask (destmask, fn);
7262 	  break;
7263 
7264 	case 7: /* fmxr.  */
7265 	  break;
7266 	}
7267 
7268       vpipe = VFP11_LS;
7269     }
7270 
7271   return vpipe;
7272 }
7273 
7274 
7275 static int elf32_arm_compare_mapping (const void * a, const void * b);
7276 
7277 
7278 /* Look for potentially-troublesome code sequences which might trigger the
7279    VFP11 denormal/antidependency erratum.  See, e.g., the ARM1136 errata sheet
7280    (available from ARM) for details of the erratum.  A short version is
7281    described in ld.texinfo.  */
7282 
7283 bfd_boolean
7284 bfd_elf32_arm_vfp11_erratum_scan (bfd *abfd, struct bfd_link_info *link_info)
7285 {
7286   asection *sec;
7287   bfd_byte *contents = NULL;
7288   int state = 0;
7289   int regs[3], numregs = 0;
7290   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7291   int use_vector = (globals->vfp11_fix == BFD_ARM_VFP11_FIX_VECTOR);
7292 
7293   if (globals == NULL)
7294     return FALSE;
7295 
7296   /* We use a simple FSM to match troublesome VFP11 instruction sequences.
7297      The states transition as follows:
7298 
7299        0 -> 1 (vector) or 0 -> 2 (scalar)
7300 	   A VFP FMAC-pipeline instruction has been seen. Fill
7301 	   regs[0]..regs[numregs-1] with its input operands. Remember this
7302 	   instruction in 'first_fmac'.
7303 
7304        1 -> 2
7305 	   Any instruction, except for a VFP instruction which overwrites
7306 	   regs[*].
7307 
7308        1 -> 3 [ -> 0 ]  or
7309        2 -> 3 [ -> 0 ]
7310 	   A VFP instruction has been seen which overwrites any of regs[*].
7311 	   We must make a veneer!  Reset state to 0 before examining next
7312 	   instruction.
7313 
7314        2 -> 0
7315 	   If we fail to match anything in state 2, reset to state 0 and reset
7316 	   the instruction pointer to the instruction after 'first_fmac'.
7317 
7318      If the VFP11 vector mode is in use, there must be at least two unrelated
7319      instructions between anti-dependent VFP11 instructions to properly avoid
7320      triggering the erratum, hence the use of the extra state 1.  */
7321 
7322   /* If we are only performing a partial link do not bother
7323      to construct any glue.  */
7324   if (bfd_link_relocatable (link_info))
7325     return TRUE;
7326 
7327   /* Skip if this bfd does not correspond to an ELF image.  */
7328   if (! is_arm_elf (abfd))
7329     return TRUE;
7330 
7331   /* We should have chosen a fix type by the time we get here.  */
7332   BFD_ASSERT (globals->vfp11_fix != BFD_ARM_VFP11_FIX_DEFAULT);
7333 
7334   if (globals->vfp11_fix == BFD_ARM_VFP11_FIX_NONE)
7335     return TRUE;
7336 
7337   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
7338   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7339     return TRUE;
7340 
7341   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7342     {
7343       unsigned int i, span, first_fmac = 0, veneer_of_insn = 0;
7344       struct _arm_elf_section_data *sec_data;
7345 
7346       /* If we don't have executable progbits, we're not interested in this
7347 	 section.  Also skip if section is to be excluded.  */
7348       if (elf_section_type (sec) != SHT_PROGBITS
7349 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7350 	  || (sec->flags & SEC_EXCLUDE) != 0
7351 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7352 	  || sec->output_section == bfd_abs_section_ptr
7353 	  || strcmp (sec->name, VFP11_ERRATUM_VENEER_SECTION_NAME) == 0)
7354 	continue;
7355 
7356       sec_data = elf32_arm_section_data (sec);
7357 
7358       if (sec_data->mapcount == 0)
7359 	continue;
7360 
7361       if (elf_section_data (sec)->this_hdr.contents != NULL)
7362 	contents = elf_section_data (sec)->this_hdr.contents;
7363       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7364 	goto error_return;
7365 
7366       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7367 	     elf32_arm_compare_mapping);
7368 
7369       for (span = 0; span < sec_data->mapcount; span++)
7370 	{
7371 	  unsigned int span_start = sec_data->map[span].vma;
7372 	  unsigned int span_end = (span == sec_data->mapcount - 1)
7373 				  ? sec->size : sec_data->map[span + 1].vma;
7374 	  char span_type = sec_data->map[span].type;
7375 
7376 	  /* FIXME: Only ARM mode is supported at present.  We may need to
7377 	     support Thumb-2 mode also at some point.  */
7378 	  if (span_type != 'a')
7379 	    continue;
7380 
7381 	  for (i = span_start; i < span_end;)
7382 	    {
7383 	      unsigned int next_i = i + 4;
7384 	      unsigned int insn = bfd_big_endian (abfd)
7385 		? (contents[i] << 24)
7386 		  | (contents[i + 1] << 16)
7387 		  | (contents[i + 2] << 8)
7388 		  | contents[i + 3]
7389 		: (contents[i + 3] << 24)
7390 		  | (contents[i + 2] << 16)
7391 		  | (contents[i + 1] << 8)
7392 		  | contents[i];
7393 	      unsigned int writemask = 0;
7394 	      enum bfd_arm_vfp11_pipe vpipe;
7395 
7396 	      switch (state)
7397 		{
7398 		case 0:
7399 		  vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask, regs,
7400 						    &numregs);
7401 		  /* I'm assuming the VFP11 erratum can trigger with denorm
7402 		     operands on either the FMAC or the DS pipeline. This might
7403 		     lead to slightly overenthusiastic veneer insertion.  */
7404 		  if (vpipe == VFP11_FMAC || vpipe == VFP11_DS)
7405 		    {
7406 		      state = use_vector ? 1 : 2;
7407 		      first_fmac = i;
7408 		      veneer_of_insn = insn;
7409 		    }
7410 		  break;
7411 
7412 		case 1:
7413 		  {
7414 		    int other_regs[3], other_numregs;
7415 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7416 						      other_regs,
7417 						      &other_numregs);
7418 		    if (vpipe != VFP11_BAD
7419 			&& bfd_arm_vfp11_antidependency (writemask, regs,
7420 							 numregs))
7421 		      state = 3;
7422 		    else
7423 		      state = 2;
7424 		  }
7425 		  break;
7426 
7427 		case 2:
7428 		  {
7429 		    int other_regs[3], other_numregs;
7430 		    vpipe = bfd_arm_vfp11_insn_decode (insn, &writemask,
7431 						      other_regs,
7432 						      &other_numregs);
7433 		    if (vpipe != VFP11_BAD
7434 			&& bfd_arm_vfp11_antidependency (writemask, regs,
7435 							 numregs))
7436 		      state = 3;
7437 		    else
7438 		      {
7439 			state = 0;
7440 			next_i = first_fmac + 4;
7441 		      }
7442 		  }
7443 		  break;
7444 
7445 		case 3:
7446 		  abort ();  /* Should be unreachable.  */
7447 		}
7448 
7449 	      if (state == 3)
7450 		{
7451 		  elf32_vfp11_erratum_list *newerr =(elf32_vfp11_erratum_list *)
7452 		      bfd_zmalloc (sizeof (elf32_vfp11_erratum_list));
7453 
7454 		  elf32_arm_section_data (sec)->erratumcount += 1;
7455 
7456 		  newerr->u.b.vfp_insn = veneer_of_insn;
7457 
7458 		  switch (span_type)
7459 		    {
7460 		    case 'a':
7461 		      newerr->type = VFP11_ERRATUM_BRANCH_TO_ARM_VENEER;
7462 		      break;
7463 
7464 		    default:
7465 		      abort ();
7466 		    }
7467 
7468 		  record_vfp11_erratum_veneer (link_info, newerr, abfd, sec,
7469 					       first_fmac);
7470 
7471 		  newerr->vma = -1;
7472 
7473 		  newerr->next = sec_data->erratumlist;
7474 		  sec_data->erratumlist = newerr;
7475 
7476 		  state = 0;
7477 		}
7478 
7479 	      i = next_i;
7480 	    }
7481 	}
7482 
7483       if (contents != NULL
7484 	  && elf_section_data (sec)->this_hdr.contents != contents)
7485 	free (contents);
7486       contents = NULL;
7487     }
7488 
7489   return TRUE;
7490 
7491 error_return:
7492   if (contents != NULL
7493       && elf_section_data (sec)->this_hdr.contents != contents)
7494     free (contents);
7495 
7496   return FALSE;
7497 }
7498 
7499 /* Find virtual-memory addresses for VFP11 erratum veneers and return locations
7500    after sections have been laid out, using specially-named symbols.  */
7501 
7502 void
7503 bfd_elf32_arm_vfp11_fix_veneer_locations (bfd *abfd,
7504 					  struct bfd_link_info *link_info)
7505 {
7506   asection *sec;
7507   struct elf32_arm_link_hash_table *globals;
7508   char *tmp_name;
7509 
7510   if (bfd_link_relocatable (link_info))
7511     return;
7512 
7513   /* Skip if this bfd does not correspond to an ELF image.  */
7514   if (! is_arm_elf (abfd))
7515     return;
7516 
7517   globals = elf32_arm_hash_table (link_info);
7518   if (globals == NULL)
7519     return;
7520 
7521   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7522 				  (VFP11_ERRATUM_VENEER_ENTRY_NAME) + 10);
7523 
7524   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7525     {
7526       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7527       elf32_vfp11_erratum_list *errnode = sec_data->erratumlist;
7528 
7529       for (; errnode != NULL; errnode = errnode->next)
7530 	{
7531 	  struct elf_link_hash_entry *myh;
7532 	  bfd_vma vma;
7533 
7534 	  switch (errnode->type)
7535 	    {
7536 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
7537 	    case VFP11_ERRATUM_BRANCH_TO_THUMB_VENEER:
7538 	      /* Find veneer symbol.  */
7539 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME,
7540 		       errnode->u.b.veneer->u.v.id);
7541 
7542 	      myh = elf_link_hash_lookup
7543 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7544 
7545 	      if (myh == NULL)
7546 		(*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7547 					 "`%s'"), abfd, tmp_name);
7548 
7549 	      vma = myh->root.u.def.section->output_section->vma
7550 		    + myh->root.u.def.section->output_offset
7551 		    + myh->root.u.def.value;
7552 
7553 	      errnode->u.b.veneer->vma = vma;
7554 	      break;
7555 
7556 	    case VFP11_ERRATUM_ARM_VENEER:
7557 	    case VFP11_ERRATUM_THUMB_VENEER:
7558 	      /* Find return location.  */
7559 	      sprintf (tmp_name, VFP11_ERRATUM_VENEER_ENTRY_NAME "_r",
7560 		       errnode->u.v.id);
7561 
7562 	      myh = elf_link_hash_lookup
7563 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7564 
7565 	      if (myh == NULL)
7566 		(*_bfd_error_handler) (_("%B: unable to find VFP11 veneer "
7567 					 "`%s'"), abfd, tmp_name);
7568 
7569 	      vma = myh->root.u.def.section->output_section->vma
7570 		    + myh->root.u.def.section->output_offset
7571 		    + myh->root.u.def.value;
7572 
7573 	      errnode->u.v.branch->vma = vma;
7574 	      break;
7575 
7576 	    default:
7577 	      abort ();
7578 	    }
7579 	}
7580     }
7581 
7582   free (tmp_name);
7583 }
7584 
7585 /* Find virtual-memory addresses for STM32L4XX erratum veneers and
7586    return locations after sections have been laid out, using
7587    specially-named symbols.  */
7588 
7589 void
7590 bfd_elf32_arm_stm32l4xx_fix_veneer_locations (bfd *abfd,
7591 					      struct bfd_link_info *link_info)
7592 {
7593   asection *sec;
7594   struct elf32_arm_link_hash_table *globals;
7595   char *tmp_name;
7596 
7597   if (bfd_link_relocatable (link_info))
7598     return;
7599 
7600   /* Skip if this bfd does not correspond to an ELF image.  */
7601   if (! is_arm_elf (abfd))
7602     return;
7603 
7604   globals = elf32_arm_hash_table (link_info);
7605   if (globals == NULL)
7606     return;
7607 
7608   tmp_name = (char *) bfd_malloc ((bfd_size_type) strlen
7609 				  (STM32L4XX_ERRATUM_VENEER_ENTRY_NAME) + 10);
7610 
7611   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7612     {
7613       struct _arm_elf_section_data *sec_data = elf32_arm_section_data (sec);
7614       elf32_stm32l4xx_erratum_list *errnode = sec_data->stm32l4xx_erratumlist;
7615 
7616       for (; errnode != NULL; errnode = errnode->next)
7617 	{
7618 	  struct elf_link_hash_entry *myh;
7619 	  bfd_vma vma;
7620 
7621 	  switch (errnode->type)
7622 	    {
7623 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
7624 	      /* Find veneer symbol.  */
7625 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME,
7626 		       errnode->u.b.veneer->u.v.id);
7627 
7628 	      myh = elf_link_hash_lookup
7629 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7630 
7631 	      if (myh == NULL)
7632 		(*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7633 					 "`%s'"), abfd, tmp_name);
7634 
7635 	      vma = myh->root.u.def.section->output_section->vma
7636 		+ myh->root.u.def.section->output_offset
7637 		+ myh->root.u.def.value;
7638 
7639 	      errnode->u.b.veneer->vma = vma;
7640 	      break;
7641 
7642 	    case STM32L4XX_ERRATUM_VENEER:
7643 	      /* Find return location.  */
7644 	      sprintf (tmp_name, STM32L4XX_ERRATUM_VENEER_ENTRY_NAME "_r",
7645 		       errnode->u.v.id);
7646 
7647 	      myh = elf_link_hash_lookup
7648 		(&(globals)->root, tmp_name, FALSE, FALSE, TRUE);
7649 
7650 	      if (myh == NULL)
7651 		(*_bfd_error_handler) (_("%B: unable to find STM32L4XX veneer "
7652 					 "`%s'"), abfd, tmp_name);
7653 
7654 	      vma = myh->root.u.def.section->output_section->vma
7655 		+ myh->root.u.def.section->output_offset
7656 		+ myh->root.u.def.value;
7657 
7658 	      errnode->u.v.branch->vma = vma;
7659 	      break;
7660 
7661 	    default:
7662 	      abort ();
7663 	    }
7664 	}
7665     }
7666 
7667   free (tmp_name);
7668 }
7669 
7670 static inline bfd_boolean
7671 is_thumb2_ldmia (const insn32 insn)
7672 {
7673   /* Encoding T2: LDM<c>.W <Rn>{!},<registers>
7674      1110 - 1000 - 10W1 - rrrr - PM (0) l - llll - llll - llll.  */
7675   return (insn & 0xffd02000) == 0xe8900000;
7676 }
7677 
7678 static inline bfd_boolean
7679 is_thumb2_ldmdb (const insn32 insn)
7680 {
7681   /* Encoding T1: LDMDB<c> <Rn>{!},<registers>
7682      1110 - 1001 - 00W1 - rrrr - PM (0) l - llll - llll - llll.  */
7683   return (insn & 0xffd02000) == 0xe9100000;
7684 }
7685 
7686 static inline bfd_boolean
7687 is_thumb2_vldm (const insn32 insn)
7688 {
7689   /* A6.5 Extension register load or store instruction
7690      A7.7.229
7691      We look for SP 32-bit and DP 64-bit registers.
7692      Encoding T1 VLDM{mode}<c> <Rn>{!}, <list>
7693      <list> is consecutive 64-bit registers
7694      1110 - 110P - UDW1 - rrrr - vvvv - 1011 - iiii - iiii
7695      Encoding T2 VLDM{mode}<c> <Rn>{!}, <list>
7696      <list> is consecutive 32-bit registers
7697      1110 - 110P - UDW1 - rrrr - vvvv - 1010 - iiii - iiii
7698      if P==0 && U==1 && W==1 && Rn=1101 VPOP
7699      if PUW=010 || PUW=011 || PUW=101 VLDM.  */
7700   return
7701     (((insn & 0xfe100f00) == 0xec100b00) ||
7702      ((insn & 0xfe100f00) == 0xec100a00))
7703     && /* (IA without !).  */
7704     (((((insn << 7) >> 28) & 0xd) == 0x4)
7705      /* (IA with !), includes VPOP (when reg number is SP).  */
7706      || ((((insn << 7) >> 28) & 0xd) == 0x5)
7707      /* (DB with !).  */
7708      || ((((insn << 7) >> 28) & 0xd) == 0x9));
7709 }
7710 
7711 /* STM STM32L4XX erratum : This function assumes that it receives an LDM or
7712    VLDM opcode and:
7713  - computes the number and the mode of memory accesses
7714  - decides if the replacement should be done:
7715    . replaces only if > 8-word accesses
7716    . or (testing purposes only) replaces all accesses.  */
7717 
7718 static bfd_boolean
7719 stm32l4xx_need_create_replacing_stub (const insn32 insn,
7720 				      bfd_arm_stm32l4xx_fix stm32l4xx_fix)
7721 {
7722   int nb_words = 0;
7723 
7724   /* The field encoding the register list is the same for both LDMIA
7725      and LDMDB encodings.  */
7726   if (is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn))
7727     nb_words = popcount (insn & 0x0000ffff);
7728   else if (is_thumb2_vldm (insn))
7729    nb_words = (insn & 0xff);
7730 
7731   /* DEFAULT mode accounts for the real bug condition situation,
7732      ALL mode inserts stubs for each LDM/VLDM instruction (testing).  */
7733   return
7734     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_DEFAULT) ? nb_words > 8 :
7735     (stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_ALL) ? TRUE : FALSE;
7736 }
7737 
7738 /* Look for potentially-troublesome code sequences which might trigger
7739    the STM STM32L4XX erratum.  */
7740 
7741 bfd_boolean
7742 bfd_elf32_arm_stm32l4xx_erratum_scan (bfd *abfd,
7743 				      struct bfd_link_info *link_info)
7744 {
7745   asection *sec;
7746   bfd_byte *contents = NULL;
7747   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
7748 
7749   if (globals == NULL)
7750     return FALSE;
7751 
7752   /* If we are only performing a partial link do not bother
7753      to construct any glue.  */
7754   if (bfd_link_relocatable (link_info))
7755     return TRUE;
7756 
7757   /* Skip if this bfd does not correspond to an ELF image.  */
7758   if (! is_arm_elf (abfd))
7759     return TRUE;
7760 
7761   if (globals->stm32l4xx_fix == BFD_ARM_STM32L4XX_FIX_NONE)
7762     return TRUE;
7763 
7764   /* Skip this BFD if it corresponds to an executable or dynamic object.  */
7765   if ((abfd->flags & (EXEC_P | DYNAMIC)) != 0)
7766     return TRUE;
7767 
7768   for (sec = abfd->sections; sec != NULL; sec = sec->next)
7769     {
7770       unsigned int i, span;
7771       struct _arm_elf_section_data *sec_data;
7772 
7773       /* If we don't have executable progbits, we're not interested in this
7774 	 section.  Also skip if section is to be excluded.  */
7775       if (elf_section_type (sec) != SHT_PROGBITS
7776 	  || (elf_section_flags (sec) & SHF_EXECINSTR) == 0
7777 	  || (sec->flags & SEC_EXCLUDE) != 0
7778 	  || sec->sec_info_type == SEC_INFO_TYPE_JUST_SYMS
7779 	  || sec->output_section == bfd_abs_section_ptr
7780 	  || strcmp (sec->name, STM32L4XX_ERRATUM_VENEER_SECTION_NAME) == 0)
7781 	continue;
7782 
7783       sec_data = elf32_arm_section_data (sec);
7784 
7785       if (sec_data->mapcount == 0)
7786 	continue;
7787 
7788       if (elf_section_data (sec)->this_hdr.contents != NULL)
7789 	contents = elf_section_data (sec)->this_hdr.contents;
7790       else if (! bfd_malloc_and_get_section (abfd, sec, &contents))
7791 	goto error_return;
7792 
7793       qsort (sec_data->map, sec_data->mapcount, sizeof (elf32_arm_section_map),
7794 	     elf32_arm_compare_mapping);
7795 
7796       for (span = 0; span < sec_data->mapcount; span++)
7797 	{
7798 	  unsigned int span_start = sec_data->map[span].vma;
7799 	  unsigned int span_end = (span == sec_data->mapcount - 1)
7800 	    ? sec->size : sec_data->map[span + 1].vma;
7801 	  char span_type = sec_data->map[span].type;
7802 	  int itblock_current_pos = 0;
7803 
7804 	  /* Only Thumb2 mode need be supported with this CM4 specific
7805 	     code, we should not encounter any arm mode eg span_type
7806 	     != 'a'.  */
7807 	  if (span_type != 't')
7808 	    continue;
7809 
7810 	  for (i = span_start; i < span_end;)
7811 	    {
7812 	      unsigned int insn = bfd_get_16 (abfd, &contents[i]);
7813 	      bfd_boolean insn_32bit = FALSE;
7814 	      bfd_boolean is_ldm = FALSE;
7815 	      bfd_boolean is_vldm = FALSE;
7816 	      bfd_boolean is_not_last_in_it_block = FALSE;
7817 
7818 	      /* The first 16-bits of all 32-bit thumb2 instructions start
7819 		 with opcode[15..13]=0b111 and the encoded op1 can be anything
7820 		 except opcode[12..11]!=0b00.
7821 		 See 32-bit Thumb instruction encoding.  */
7822 	      if ((insn & 0xe000) == 0xe000 && (insn & 0x1800) != 0x0000)
7823 		insn_32bit = TRUE;
7824 
7825 	      /* Compute the predicate that tells if the instruction
7826 		 is concerned by the IT block
7827 		 - Creates an error if there is a ldm that is not
7828 		   last in the IT block thus cannot be replaced
7829 		 - Otherwise we can create a branch at the end of the
7830 		   IT block, it will be controlled naturally by IT
7831 		   with the proper pseudo-predicate
7832 		 - So the only interesting predicate is the one that
7833 		   tells that we are not on the last item of an IT
7834 		   block.  */
7835 	      if (itblock_current_pos != 0)
7836 		  is_not_last_in_it_block = !!--itblock_current_pos;
7837 
7838 	      if (insn_32bit)
7839 		{
7840 		  /* Load the rest of the insn (in manual-friendly order).  */
7841 		  insn = (insn << 16) | bfd_get_16 (abfd, &contents[i + 2]);
7842 		  is_ldm = is_thumb2_ldmia (insn) || is_thumb2_ldmdb (insn);
7843 		  is_vldm = is_thumb2_vldm (insn);
7844 
7845 		  /* Veneers are created for (v)ldm depending on
7846 		     option flags and memory accesses conditions; but
7847 		     if the instruction is not the last instruction of
7848 		     an IT block, we cannot create a jump there, so we
7849 		     bail out.  */
7850 		    if ((is_ldm || is_vldm) &&
7851 			stm32l4xx_need_create_replacing_stub
7852 			(insn, globals->stm32l4xx_fix))
7853 		      {
7854 			if (is_not_last_in_it_block)
7855 			  {
7856 			    (*_bfd_error_handler)
7857 			      /* Note - overlong line used here to allow for translation.  */
7858 			      (_("\
7859 %B(%A+0x%lx): error: multiple load detected in non-last IT block instruction : STM32L4XX veneer cannot be generated.\n"
7860 				 "Use gcc option -mrestrict-it to generate only one instruction per IT block.\n"),
7861 			       abfd, sec, (long)i);
7862 			  }
7863 			else
7864 			  {
7865 			    elf32_stm32l4xx_erratum_list *newerr =
7866 			      (elf32_stm32l4xx_erratum_list *)
7867 			      bfd_zmalloc
7868 			      (sizeof (elf32_stm32l4xx_erratum_list));
7869 
7870 			    elf32_arm_section_data (sec)
7871 			      ->stm32l4xx_erratumcount += 1;
7872 			    newerr->u.b.insn = insn;
7873 			    /* We create only thumb branches.  */
7874 			    newerr->type =
7875 			      STM32L4XX_ERRATUM_BRANCH_TO_VENEER;
7876 			    record_stm32l4xx_erratum_veneer
7877 			      (link_info, newerr, abfd, sec,
7878 			       i,
7879 			       is_ldm ?
7880 			       STM32L4XX_ERRATUM_LDM_VENEER_SIZE:
7881 			       STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
7882 			    newerr->vma = -1;
7883 			    newerr->next = sec_data->stm32l4xx_erratumlist;
7884 			    sec_data->stm32l4xx_erratumlist = newerr;
7885 			  }
7886 		      }
7887 		}
7888 	      else
7889 		{
7890 		  /* A7.7.37 IT p208
7891 		     IT blocks are only encoded in T1
7892 		     Encoding T1: IT{x{y{z}}} <firstcond>
7893 		     1 0 1 1 - 1 1 1 1 - firstcond - mask
7894 		     if mask = '0000' then see 'related encodings'
7895 		     We don't deal with UNPREDICTABLE, just ignore these.
7896 		     There can be no nested IT blocks so an IT block
7897 		     is naturally a new one for which it is worth
7898 		     computing its size.  */
7899 		  bfd_boolean is_newitblock = ((insn & 0xff00) == 0xbf00) &&
7900 		    ((insn & 0x000f) != 0x0000);
7901 		  /* If we have a new IT block we compute its size.  */
7902 		  if (is_newitblock)
7903 		    {
7904 		      /* Compute the number of instructions controlled
7905 			 by the IT block, it will be used to decide
7906 			 whether we are inside an IT block or not.  */
7907 		      unsigned int mask = insn & 0x000f;
7908 		      itblock_current_pos = 4 - ctz (mask);
7909 		    }
7910 		}
7911 
7912 	      i += insn_32bit ? 4 : 2;
7913 	    }
7914 	}
7915 
7916       if (contents != NULL
7917 	  && elf_section_data (sec)->this_hdr.contents != contents)
7918 	free (contents);
7919       contents = NULL;
7920     }
7921 
7922   return TRUE;
7923 
7924 error_return:
7925   if (contents != NULL
7926       && elf_section_data (sec)->this_hdr.contents != contents)
7927     free (contents);
7928 
7929   return FALSE;
7930 }
7931 
7932 /* Set target relocation values needed during linking.  */
7933 
7934 void
7935 bfd_elf32_arm_set_target_relocs (struct bfd *output_bfd,
7936 				 struct bfd_link_info *link_info,
7937 				 int target1_is_rel,
7938 				 char * target2_type,
7939 				 int fix_v4bx,
7940 				 int use_blx,
7941 				 bfd_arm_vfp11_fix vfp11_fix,
7942 				 bfd_arm_stm32l4xx_fix stm32l4xx_fix,
7943 				 int no_enum_warn, int no_wchar_warn,
7944 				 int pic_veneer, int fix_cortex_a8,
7945 				 int fix_arm1176)
7946 {
7947   struct elf32_arm_link_hash_table *globals;
7948 
7949   globals = elf32_arm_hash_table (link_info);
7950   if (globals == NULL)
7951     return;
7952 
7953   globals->target1_is_rel = target1_is_rel;
7954   if (strcmp (target2_type, "rel") == 0)
7955     globals->target2_reloc = R_ARM_REL32;
7956   else if (strcmp (target2_type, "abs") == 0)
7957     globals->target2_reloc = R_ARM_ABS32;
7958   else if (strcmp (target2_type, "got-rel") == 0)
7959     globals->target2_reloc = R_ARM_GOT_PREL;
7960   else
7961     {
7962       _bfd_error_handler (_("Invalid TARGET2 relocation type '%s'."),
7963 			  target2_type);
7964     }
7965   globals->fix_v4bx = fix_v4bx;
7966   globals->use_blx |= use_blx;
7967   globals->vfp11_fix = vfp11_fix;
7968   globals->stm32l4xx_fix = stm32l4xx_fix;
7969   globals->pic_veneer = pic_veneer;
7970   globals->fix_cortex_a8 = fix_cortex_a8;
7971   globals->fix_arm1176 = fix_arm1176;
7972 
7973   BFD_ASSERT (is_arm_elf (output_bfd));
7974   elf_arm_tdata (output_bfd)->no_enum_size_warning = no_enum_warn;
7975   elf_arm_tdata (output_bfd)->no_wchar_size_warning = no_wchar_warn;
7976 }
7977 
7978 /* Replace the target offset of a Thumb bl or b.w instruction.  */
7979 
7980 static void
7981 insert_thumb_branch (bfd *abfd, long int offset, bfd_byte *insn)
7982 {
7983   bfd_vma upper;
7984   bfd_vma lower;
7985   int reloc_sign;
7986 
7987   BFD_ASSERT ((offset & 1) == 0);
7988 
7989   upper = bfd_get_16 (abfd, insn);
7990   lower = bfd_get_16 (abfd, insn + 2);
7991   reloc_sign = (offset < 0) ? 1 : 0;
7992   upper = (upper & ~(bfd_vma) 0x7ff)
7993 	  | ((offset >> 12) & 0x3ff)
7994 	  | (reloc_sign << 10);
7995   lower = (lower & ~(bfd_vma) 0x2fff)
7996 	  | (((!((offset >> 23) & 1)) ^ reloc_sign) << 13)
7997 	  | (((!((offset >> 22) & 1)) ^ reloc_sign) << 11)
7998 	  | ((offset >> 1) & 0x7ff);
7999   bfd_put_16 (abfd, upper, insn);
8000   bfd_put_16 (abfd, lower, insn + 2);
8001 }
8002 
8003 /* Thumb code calling an ARM function.  */
8004 
8005 static int
8006 elf32_thumb_to_arm_stub (struct bfd_link_info * info,
8007 			 const char *           name,
8008 			 bfd *                  input_bfd,
8009 			 bfd *                  output_bfd,
8010 			 asection *             input_section,
8011 			 bfd_byte *             hit_data,
8012 			 asection *             sym_sec,
8013 			 bfd_vma                offset,
8014 			 bfd_signed_vma         addend,
8015 			 bfd_vma                val,
8016 			 char **error_message)
8017 {
8018   asection * s = 0;
8019   bfd_vma my_offset;
8020   long int ret_offset;
8021   struct elf_link_hash_entry * myh;
8022   struct elf32_arm_link_hash_table * globals;
8023 
8024   myh = find_thumb_glue (info, name, error_message);
8025   if (myh == NULL)
8026     return FALSE;
8027 
8028   globals = elf32_arm_hash_table (info);
8029   BFD_ASSERT (globals != NULL);
8030   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8031 
8032   my_offset = myh->root.u.def.value;
8033 
8034   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8035 			      THUMB2ARM_GLUE_SECTION_NAME);
8036 
8037   BFD_ASSERT (s != NULL);
8038   BFD_ASSERT (s->contents != NULL);
8039   BFD_ASSERT (s->output_section != NULL);
8040 
8041   if ((my_offset & 0x01) == 0x01)
8042     {
8043       if (sym_sec != NULL
8044 	  && sym_sec->owner != NULL
8045 	  && !INTERWORK_FLAG (sym_sec->owner))
8046 	{
8047 	  (*_bfd_error_handler)
8048 	    (_("%B(%s): warning: interworking not enabled.\n"
8049 	       "  first occurrence: %B: Thumb call to ARM"),
8050 	     sym_sec->owner, input_bfd, name);
8051 
8052 	  return FALSE;
8053 	}
8054 
8055       --my_offset;
8056       myh->root.u.def.value = my_offset;
8057 
8058       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a1_bx_pc_insn,
8059 		      s->contents + my_offset);
8060 
8061       put_thumb_insn (globals, output_bfd, (bfd_vma) t2a2_noop_insn,
8062 		      s->contents + my_offset + 2);
8063 
8064       ret_offset =
8065 	/* Address of destination of the stub.  */
8066 	((bfd_signed_vma) val)
8067 	- ((bfd_signed_vma)
8068 	   /* Offset from the start of the current section
8069 	      to the start of the stubs.  */
8070 	   (s->output_offset
8071 	    /* Offset of the start of this stub from the start of the stubs.  */
8072 	    + my_offset
8073 	    /* Address of the start of the current section.  */
8074 	    + s->output_section->vma)
8075 	   /* The branch instruction is 4 bytes into the stub.  */
8076 	   + 4
8077 	   /* ARM branches work from the pc of the instruction + 8.  */
8078 	   + 8);
8079 
8080       put_arm_insn (globals, output_bfd,
8081 		    (bfd_vma) t2a3_b_insn | ((ret_offset >> 2) & 0x00FFFFFF),
8082 		    s->contents + my_offset + 4);
8083     }
8084 
8085   BFD_ASSERT (my_offset <= globals->thumb_glue_size);
8086 
8087   /* Now go back and fix up the original BL insn to point to here.  */
8088   ret_offset =
8089     /* Address of where the stub is located.  */
8090     (s->output_section->vma + s->output_offset + my_offset)
8091      /* Address of where the BL is located.  */
8092     - (input_section->output_section->vma + input_section->output_offset
8093        + offset)
8094     /* Addend in the relocation.  */
8095     - addend
8096     /* Biassing for PC-relative addressing.  */
8097     - 8;
8098 
8099   insert_thumb_branch (input_bfd, ret_offset, hit_data - input_section->vma);
8100 
8101   return TRUE;
8102 }
8103 
8104 /* Populate an Arm to Thumb stub.  Returns the stub symbol.  */
8105 
8106 static struct elf_link_hash_entry *
8107 elf32_arm_create_thumb_stub (struct bfd_link_info * info,
8108 			     const char *           name,
8109 			     bfd *                  input_bfd,
8110 			     bfd *                  output_bfd,
8111 			     asection *             sym_sec,
8112 			     bfd_vma                val,
8113 			     asection *             s,
8114 			     char **                error_message)
8115 {
8116   bfd_vma my_offset;
8117   long int ret_offset;
8118   struct elf_link_hash_entry * myh;
8119   struct elf32_arm_link_hash_table * globals;
8120 
8121   myh = find_arm_glue (info, name, error_message);
8122   if (myh == NULL)
8123     return NULL;
8124 
8125   globals = elf32_arm_hash_table (info);
8126   BFD_ASSERT (globals != NULL);
8127   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8128 
8129   my_offset = myh->root.u.def.value;
8130 
8131   if ((my_offset & 0x01) == 0x01)
8132     {
8133       if (sym_sec != NULL
8134 	  && sym_sec->owner != NULL
8135 	  && !INTERWORK_FLAG (sym_sec->owner))
8136 	{
8137 	  (*_bfd_error_handler)
8138 	    (_("%B(%s): warning: interworking not enabled.\n"
8139 	       "  first occurrence: %B: arm call to thumb"),
8140 	     sym_sec->owner, input_bfd, name);
8141 	}
8142 
8143       --my_offset;
8144       myh->root.u.def.value = my_offset;
8145 
8146       if (bfd_link_pic (info)
8147 	  || globals->root.is_relocatable_executable
8148 	  || globals->pic_veneer)
8149 	{
8150 	  /* For relocatable objects we can't use absolute addresses,
8151 	     so construct the address from a relative offset.  */
8152 	  /* TODO: If the offset is small it's probably worth
8153 	     constructing the address with adds.  */
8154 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1p_ldr_insn,
8155 			s->contents + my_offset);
8156 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2p_add_pc_insn,
8157 			s->contents + my_offset + 4);
8158 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t3p_bx_r12_insn,
8159 			s->contents + my_offset + 8);
8160 	  /* Adjust the offset by 4 for the position of the add,
8161 	     and 8 for the pipeline offset.  */
8162 	  ret_offset = (val - (s->output_offset
8163 			       + s->output_section->vma
8164 			       + my_offset + 12))
8165 		       | 1;
8166 	  bfd_put_32 (output_bfd, ret_offset,
8167 		      s->contents + my_offset + 12);
8168 	}
8169       else if (globals->use_blx)
8170 	{
8171 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1v5_ldr_insn,
8172 			s->contents + my_offset);
8173 
8174 	  /* It's a thumb address.  Add the low order bit.  */
8175 	  bfd_put_32 (output_bfd, val | a2t2v5_func_addr_insn,
8176 		      s->contents + my_offset + 4);
8177 	}
8178       else
8179 	{
8180 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t1_ldr_insn,
8181 			s->contents + my_offset);
8182 
8183 	  put_arm_insn (globals, output_bfd, (bfd_vma) a2t2_bx_r12_insn,
8184 			s->contents + my_offset + 4);
8185 
8186 	  /* It's a thumb address.  Add the low order bit.  */
8187 	  bfd_put_32 (output_bfd, val | a2t3_func_addr_insn,
8188 		      s->contents + my_offset + 8);
8189 
8190 	  my_offset += 12;
8191 	}
8192     }
8193 
8194   BFD_ASSERT (my_offset <= globals->arm_glue_size);
8195 
8196   return myh;
8197 }
8198 
8199 /* Arm code calling a Thumb function.  */
8200 
8201 static int
8202 elf32_arm_to_thumb_stub (struct bfd_link_info * info,
8203 			 const char *           name,
8204 			 bfd *                  input_bfd,
8205 			 bfd *                  output_bfd,
8206 			 asection *             input_section,
8207 			 bfd_byte *             hit_data,
8208 			 asection *             sym_sec,
8209 			 bfd_vma                offset,
8210 			 bfd_signed_vma         addend,
8211 			 bfd_vma                val,
8212 			 char **error_message)
8213 {
8214   unsigned long int tmp;
8215   bfd_vma my_offset;
8216   asection * s;
8217   long int ret_offset;
8218   struct elf_link_hash_entry * myh;
8219   struct elf32_arm_link_hash_table * globals;
8220 
8221   globals = elf32_arm_hash_table (info);
8222   BFD_ASSERT (globals != NULL);
8223   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8224 
8225   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8226 			      ARM2THUMB_GLUE_SECTION_NAME);
8227   BFD_ASSERT (s != NULL);
8228   BFD_ASSERT (s->contents != NULL);
8229   BFD_ASSERT (s->output_section != NULL);
8230 
8231   myh = elf32_arm_create_thumb_stub (info, name, input_bfd, output_bfd,
8232 				     sym_sec, val, s, error_message);
8233   if (!myh)
8234     return FALSE;
8235 
8236   my_offset = myh->root.u.def.value;
8237   tmp = bfd_get_32 (input_bfd, hit_data);
8238   tmp = tmp & 0xFF000000;
8239 
8240   /* Somehow these are both 4 too far, so subtract 8.  */
8241   ret_offset = (s->output_offset
8242 		+ my_offset
8243 		+ s->output_section->vma
8244 		- (input_section->output_offset
8245 		   + input_section->output_section->vma
8246 		   + offset + addend)
8247 		- 8);
8248 
8249   tmp = tmp | ((ret_offset >> 2) & 0x00FFFFFF);
8250 
8251   bfd_put_32 (output_bfd, (bfd_vma) tmp, hit_data - input_section->vma);
8252 
8253   return TRUE;
8254 }
8255 
8256 /* Populate Arm stub for an exported Thumb function.  */
8257 
8258 static bfd_boolean
8259 elf32_arm_to_thumb_export_stub (struct elf_link_hash_entry *h, void * inf)
8260 {
8261   struct bfd_link_info * info = (struct bfd_link_info *) inf;
8262   asection * s;
8263   struct elf_link_hash_entry * myh;
8264   struct elf32_arm_link_hash_entry *eh;
8265   struct elf32_arm_link_hash_table * globals;
8266   asection *sec;
8267   bfd_vma val;
8268   char *error_message;
8269 
8270   eh = elf32_arm_hash_entry (h);
8271   /* Allocate stubs for exported Thumb functions on v4t.  */
8272   if (eh->export_glue == NULL)
8273     return TRUE;
8274 
8275   globals = elf32_arm_hash_table (info);
8276   BFD_ASSERT (globals != NULL);
8277   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8278 
8279   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8280 			      ARM2THUMB_GLUE_SECTION_NAME);
8281   BFD_ASSERT (s != NULL);
8282   BFD_ASSERT (s->contents != NULL);
8283   BFD_ASSERT (s->output_section != NULL);
8284 
8285   sec = eh->export_glue->root.u.def.section;
8286 
8287   BFD_ASSERT (sec->output_section != NULL);
8288 
8289   val = eh->export_glue->root.u.def.value + sec->output_offset
8290 	+ sec->output_section->vma;
8291 
8292   myh = elf32_arm_create_thumb_stub (info, h->root.root.string,
8293 				     h->root.u.def.section->owner,
8294 				     globals->obfd, sec, val, s,
8295 				     &error_message);
8296   BFD_ASSERT (myh);
8297   return TRUE;
8298 }
8299 
8300 /* Populate ARMv4 BX veneers.  Returns the absolute adress of the veneer.  */
8301 
8302 static bfd_vma
8303 elf32_arm_bx_glue (struct bfd_link_info * info, int reg)
8304 {
8305   bfd_byte *p;
8306   bfd_vma glue_addr;
8307   asection *s;
8308   struct elf32_arm_link_hash_table *globals;
8309 
8310   globals = elf32_arm_hash_table (info);
8311   BFD_ASSERT (globals != NULL);
8312   BFD_ASSERT (globals->bfd_of_glue_owner != NULL);
8313 
8314   s = bfd_get_linker_section (globals->bfd_of_glue_owner,
8315 			      ARM_BX_GLUE_SECTION_NAME);
8316   BFD_ASSERT (s != NULL);
8317   BFD_ASSERT (s->contents != NULL);
8318   BFD_ASSERT (s->output_section != NULL);
8319 
8320   BFD_ASSERT (globals->bx_glue_offset[reg] & 2);
8321 
8322   glue_addr = globals->bx_glue_offset[reg] & ~(bfd_vma)3;
8323 
8324   if ((globals->bx_glue_offset[reg] & 1) == 0)
8325     {
8326       p = s->contents + glue_addr;
8327       bfd_put_32 (globals->obfd, armbx1_tst_insn + (reg << 16), p);
8328       bfd_put_32 (globals->obfd, armbx2_moveq_insn + reg, p + 4);
8329       bfd_put_32 (globals->obfd, armbx3_bx_insn + reg, p + 8);
8330       globals->bx_glue_offset[reg] |= 1;
8331     }
8332 
8333   return glue_addr + s->output_section->vma + s->output_offset;
8334 }
8335 
8336 /* Generate Arm stubs for exported Thumb symbols.  */
8337 static void
8338 elf32_arm_begin_write_processing (bfd *abfd ATTRIBUTE_UNUSED,
8339 				  struct bfd_link_info *link_info)
8340 {
8341   struct elf32_arm_link_hash_table * globals;
8342 
8343   if (link_info == NULL)
8344     /* Ignore this if we are not called by the ELF backend linker.  */
8345     return;
8346 
8347   globals = elf32_arm_hash_table (link_info);
8348   if (globals == NULL)
8349     return;
8350 
8351   /* If blx is available then exported Thumb symbols are OK and there is
8352      nothing to do.  */
8353   if (globals->use_blx)
8354     return;
8355 
8356   elf_link_hash_traverse (&globals->root, elf32_arm_to_thumb_export_stub,
8357 			  link_info);
8358 }
8359 
8360 /* Reserve space for COUNT dynamic relocations in relocation selection
8361    SRELOC.  */
8362 
8363 static void
8364 elf32_arm_allocate_dynrelocs (struct bfd_link_info *info, asection *sreloc,
8365 			      bfd_size_type count)
8366 {
8367   struct elf32_arm_link_hash_table *htab;
8368 
8369   htab = elf32_arm_hash_table (info);
8370   BFD_ASSERT (htab->root.dynamic_sections_created);
8371   if (sreloc == NULL)
8372     abort ();
8373   sreloc->size += RELOC_SIZE (htab) * count;
8374 }
8375 
8376 /* Reserve space for COUNT R_ARM_IRELATIVE relocations.  If the link is
8377    dynamic, the relocations should go in SRELOC, otherwise they should
8378    go in the special .rel.iplt section.  */
8379 
8380 static void
8381 elf32_arm_allocate_irelocs (struct bfd_link_info *info, asection *sreloc,
8382 			    bfd_size_type count)
8383 {
8384   struct elf32_arm_link_hash_table *htab;
8385 
8386   htab = elf32_arm_hash_table (info);
8387   if (!htab->root.dynamic_sections_created)
8388     htab->root.irelplt->size += RELOC_SIZE (htab) * count;
8389   else
8390     {
8391       BFD_ASSERT (sreloc != NULL);
8392       sreloc->size += RELOC_SIZE (htab) * count;
8393     }
8394 }
8395 
8396 /* Add relocation REL to the end of relocation section SRELOC.  */
8397 
8398 static void
8399 elf32_arm_add_dynreloc (bfd *output_bfd, struct bfd_link_info *info,
8400 			asection *sreloc, Elf_Internal_Rela *rel)
8401 {
8402   bfd_byte *loc;
8403   struct elf32_arm_link_hash_table *htab;
8404 
8405   htab = elf32_arm_hash_table (info);
8406   if (!htab->root.dynamic_sections_created
8407       && ELF32_R_TYPE (rel->r_info) == R_ARM_IRELATIVE)
8408     sreloc = htab->root.irelplt;
8409   if (sreloc == NULL)
8410     abort ();
8411   loc = sreloc->contents;
8412   loc += sreloc->reloc_count++ * RELOC_SIZE (htab);
8413   if (sreloc->reloc_count * RELOC_SIZE (htab) > sreloc->size)
8414     abort ();
8415   SWAP_RELOC_OUT (htab) (output_bfd, rel, loc);
8416 }
8417 
8418 /* Allocate room for a PLT entry described by ROOT_PLT and ARM_PLT.
8419    IS_IPLT_ENTRY says whether the entry belongs to .iplt rather than
8420    to .plt.  */
8421 
8422 static void
8423 elf32_arm_allocate_plt_entry (struct bfd_link_info *info,
8424 			      bfd_boolean is_iplt_entry,
8425 			      union gotplt_union *root_plt,
8426 			      struct arm_plt_info *arm_plt)
8427 {
8428   struct elf32_arm_link_hash_table *htab;
8429   asection *splt;
8430   asection *sgotplt;
8431 
8432   htab = elf32_arm_hash_table (info);
8433 
8434   if (is_iplt_entry)
8435     {
8436       splt = htab->root.iplt;
8437       sgotplt = htab->root.igotplt;
8438 
8439       /* NaCl uses a special first entry in .iplt too.  */
8440       if (htab->nacl_p && splt->size == 0)
8441 	splt->size += htab->plt_header_size;
8442 
8443       /* Allocate room for an R_ARM_IRELATIVE relocation in .rel.iplt.  */
8444       elf32_arm_allocate_irelocs (info, htab->root.irelplt, 1);
8445     }
8446   else
8447     {
8448       splt = htab->root.splt;
8449       sgotplt = htab->root.sgotplt;
8450 
8451       /* Allocate room for an R_JUMP_SLOT relocation in .rel.plt.  */
8452       elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
8453 
8454       /* If this is the first .plt entry, make room for the special
8455 	 first entry.  */
8456       if (splt->size == 0)
8457 	splt->size += htab->plt_header_size;
8458 
8459       htab->next_tls_desc_index++;
8460     }
8461 
8462   /* Allocate the PLT entry itself, including any leading Thumb stub.  */
8463   if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8464     splt->size += PLT_THUMB_STUB_SIZE;
8465   root_plt->offset = splt->size;
8466   splt->size += htab->plt_entry_size;
8467 
8468   if (!htab->symbian_p)
8469     {
8470       /* We also need to make an entry in the .got.plt section, which
8471 	 will be placed in the .got section by the linker script.  */
8472       if (is_iplt_entry)
8473 	arm_plt->got_offset = sgotplt->size;
8474       else
8475 	arm_plt->got_offset = sgotplt->size - 8 * htab->num_tls_desc;
8476       sgotplt->size += 4;
8477     }
8478 }
8479 
8480 static bfd_vma
8481 arm_movw_immediate (bfd_vma value)
8482 {
8483   return (value & 0x00000fff) | ((value & 0x0000f000) << 4);
8484 }
8485 
8486 static bfd_vma
8487 arm_movt_immediate (bfd_vma value)
8488 {
8489   return ((value & 0x0fff0000) >> 16) | ((value & 0xf0000000) >> 12);
8490 }
8491 
8492 /* Fill in a PLT entry and its associated GOT slot.  If DYNINDX == -1,
8493    the entry lives in .iplt and resolves to (*SYM_VALUE)().
8494    Otherwise, DYNINDX is the index of the symbol in the dynamic
8495    symbol table and SYM_VALUE is undefined.
8496 
8497    ROOT_PLT points to the offset of the PLT entry from the start of its
8498    section (.iplt or .plt).  ARM_PLT points to the symbol's ARM-specific
8499    bookkeeping information.
8500 
8501    Returns FALSE if there was a problem.  */
8502 
8503 static bfd_boolean
8504 elf32_arm_populate_plt_entry (bfd *output_bfd, struct bfd_link_info *info,
8505 			      union gotplt_union *root_plt,
8506 			      struct arm_plt_info *arm_plt,
8507 			      int dynindx, bfd_vma sym_value)
8508 {
8509   struct elf32_arm_link_hash_table *htab;
8510   asection *sgot;
8511   asection *splt;
8512   asection *srel;
8513   bfd_byte *loc;
8514   bfd_vma plt_index;
8515   Elf_Internal_Rela rel;
8516   bfd_vma plt_header_size;
8517   bfd_vma got_header_size;
8518 
8519   htab = elf32_arm_hash_table (info);
8520 
8521   /* Pick the appropriate sections and sizes.  */
8522   if (dynindx == -1)
8523     {
8524       splt = htab->root.iplt;
8525       sgot = htab->root.igotplt;
8526       srel = htab->root.irelplt;
8527 
8528       /* There are no reserved entries in .igot.plt, and no special
8529 	 first entry in .iplt.  */
8530       got_header_size = 0;
8531       plt_header_size = 0;
8532     }
8533   else
8534     {
8535       splt = htab->root.splt;
8536       sgot = htab->root.sgotplt;
8537       srel = htab->root.srelplt;
8538 
8539       got_header_size = get_elf_backend_data (output_bfd)->got_header_size;
8540       plt_header_size = htab->plt_header_size;
8541     }
8542   BFD_ASSERT (splt != NULL && srel != NULL);
8543 
8544   /* Fill in the entry in the procedure linkage table.  */
8545   if (htab->symbian_p)
8546     {
8547       BFD_ASSERT (dynindx >= 0);
8548       put_arm_insn (htab, output_bfd,
8549 		    elf32_arm_symbian_plt_entry[0],
8550 		    splt->contents + root_plt->offset);
8551       bfd_put_32 (output_bfd,
8552 		  elf32_arm_symbian_plt_entry[1],
8553 		  splt->contents + root_plt->offset + 4);
8554 
8555       /* Fill in the entry in the .rel.plt section.  */
8556       rel.r_offset = (splt->output_section->vma
8557 		      + splt->output_offset
8558 		      + root_plt->offset + 4);
8559       rel.r_info = ELF32_R_INFO (dynindx, R_ARM_GLOB_DAT);
8560 
8561       /* Get the index in the procedure linkage table which
8562 	 corresponds to this symbol.  This is the index of this symbol
8563 	 in all the symbols for which we are making plt entries.  The
8564 	 first entry in the procedure linkage table is reserved.  */
8565       plt_index = ((root_plt->offset - plt_header_size)
8566 		   / htab->plt_entry_size);
8567     }
8568   else
8569     {
8570       bfd_vma got_offset, got_address, plt_address;
8571       bfd_vma got_displacement, initial_got_entry;
8572       bfd_byte * ptr;
8573 
8574       BFD_ASSERT (sgot != NULL);
8575 
8576       /* Get the offset into the .(i)got.plt table of the entry that
8577 	 corresponds to this function.  */
8578       got_offset = (arm_plt->got_offset & -2);
8579 
8580       /* Get the index in the procedure linkage table which
8581 	 corresponds to this symbol.  This is the index of this symbol
8582 	 in all the symbols for which we are making plt entries.
8583 	 After the reserved .got.plt entries, all symbols appear in
8584 	 the same order as in .plt.  */
8585       plt_index = (got_offset - got_header_size) / 4;
8586 
8587       /* Calculate the address of the GOT entry.  */
8588       got_address = (sgot->output_section->vma
8589 		     + sgot->output_offset
8590 		     + got_offset);
8591 
8592       /* ...and the address of the PLT entry.  */
8593       plt_address = (splt->output_section->vma
8594 		     + splt->output_offset
8595 		     + root_plt->offset);
8596 
8597       ptr = splt->contents + root_plt->offset;
8598       if (htab->vxworks_p && bfd_link_pic (info))
8599 	{
8600 	  unsigned int i;
8601 	  bfd_vma val;
8602 
8603 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8604 	    {
8605 	      val = elf32_arm_vxworks_shared_plt_entry[i];
8606 	      if (i == 2)
8607 		val |= got_address - sgot->output_section->vma;
8608 	      if (i == 5)
8609 		val |= plt_index * RELOC_SIZE (htab);
8610 	      if (i == 2 || i == 5)
8611 		bfd_put_32 (output_bfd, val, ptr);
8612 	      else
8613 		put_arm_insn (htab, output_bfd, val, ptr);
8614 	    }
8615 	}
8616       else if (htab->vxworks_p)
8617 	{
8618 	  unsigned int i;
8619 	  bfd_vma val;
8620 
8621 	  for (i = 0; i != htab->plt_entry_size / 4; i++, ptr += 4)
8622 	    {
8623 	      val = elf32_arm_vxworks_exec_plt_entry[i];
8624 	      if (i == 2)
8625 		val |= got_address;
8626 	      if (i == 4)
8627 		val |= 0xffffff & -((root_plt->offset + i * 4 + 8) >> 2);
8628 	      if (i == 5)
8629 		val |= plt_index * RELOC_SIZE (htab);
8630 	      if (i == 2 || i == 5)
8631 		bfd_put_32 (output_bfd, val, ptr);
8632 	      else
8633 		put_arm_insn (htab, output_bfd, val, ptr);
8634 	    }
8635 
8636 	  loc = (htab->srelplt2->contents
8637 		 + (plt_index * 2 + 1) * RELOC_SIZE (htab));
8638 
8639 	  /* Create the .rela.plt.unloaded R_ARM_ABS32 relocation
8640 	     referencing the GOT for this PLT entry.  */
8641 	  rel.r_offset = plt_address + 8;
8642 	  rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
8643 	  rel.r_addend = got_offset;
8644 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8645 	  loc += RELOC_SIZE (htab);
8646 
8647 	  /* Create the R_ARM_ABS32 relocation referencing the
8648 	     beginning of the PLT for this GOT entry.  */
8649 	  rel.r_offset = got_address;
8650 	  rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
8651 	  rel.r_addend = 0;
8652 	  SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8653 	}
8654       else if (htab->nacl_p)
8655 	{
8656 	  /* Calculate the displacement between the PLT slot and the
8657 	     common tail that's part of the special initial PLT slot.  */
8658 	  int32_t tail_displacement
8659 	    = ((splt->output_section->vma + splt->output_offset
8660 		+ ARM_NACL_PLT_TAIL_OFFSET)
8661 	       - (plt_address + htab->plt_entry_size + 4));
8662 	  BFD_ASSERT ((tail_displacement & 3) == 0);
8663 	  tail_displacement >>= 2;
8664 
8665 	  BFD_ASSERT ((tail_displacement & 0xff000000) == 0
8666 		      || (-tail_displacement & 0xff000000) == 0);
8667 
8668 	  /* Calculate the displacement between the PLT slot and the entry
8669 	     in the GOT.  The offset accounts for the value produced by
8670 	     adding to pc in the penultimate instruction of the PLT stub.  */
8671 	  got_displacement = (got_address
8672 			      - (plt_address + htab->plt_entry_size));
8673 
8674 	  /* NaCl does not support interworking at all.  */
8675 	  BFD_ASSERT (!elf32_arm_plt_needs_thumb_stub_p (info, arm_plt));
8676 
8677 	  put_arm_insn (htab, output_bfd,
8678 			elf32_arm_nacl_plt_entry[0]
8679 			| arm_movw_immediate (got_displacement),
8680 			ptr + 0);
8681 	  put_arm_insn (htab, output_bfd,
8682 			elf32_arm_nacl_plt_entry[1]
8683 			| arm_movt_immediate (got_displacement),
8684 			ptr + 4);
8685 	  put_arm_insn (htab, output_bfd,
8686 			elf32_arm_nacl_plt_entry[2],
8687 			ptr + 8);
8688 	  put_arm_insn (htab, output_bfd,
8689 			elf32_arm_nacl_plt_entry[3]
8690 			| (tail_displacement & 0x00ffffff),
8691 			ptr + 12);
8692 	}
8693       else if (using_thumb_only (htab))
8694 	{
8695 	  /* PR ld/16017: Generate thumb only PLT entries.  */
8696 	  if (!using_thumb2 (htab))
8697 	    {
8698 	      /* FIXME: We ought to be able to generate thumb-1 PLT
8699 		 instructions...  */
8700 	      _bfd_error_handler (_("%B: Warning: thumb-1 mode PLT generation not currently supported"),
8701 				  output_bfd);
8702 	      return FALSE;
8703 	    }
8704 
8705 	  /* Calculate the displacement between the PLT slot and the entry in
8706 	     the GOT.  The 12-byte offset accounts for the value produced by
8707 	     adding to pc in the 3rd instruction of the PLT stub.  */
8708 	  got_displacement = got_address - (plt_address + 12);
8709 
8710 	  /* As we are using 32 bit instructions we have to use 'put_arm_insn'
8711 	     instead of 'put_thumb_insn'.  */
8712 	  put_arm_insn (htab, output_bfd,
8713 			elf32_thumb2_plt_entry[0]
8714 			| ((got_displacement & 0x000000ff) << 16)
8715 			| ((got_displacement & 0x00000700) << 20)
8716 			| ((got_displacement & 0x00000800) >>  1)
8717 			| ((got_displacement & 0x0000f000) >> 12),
8718 			ptr + 0);
8719 	  put_arm_insn (htab, output_bfd,
8720 			elf32_thumb2_plt_entry[1]
8721 			| ((got_displacement & 0x00ff0000)      )
8722 			| ((got_displacement & 0x07000000) <<  4)
8723 			| ((got_displacement & 0x08000000) >> 17)
8724 			| ((got_displacement & 0xf0000000) >> 28),
8725 			ptr + 4);
8726 	  put_arm_insn (htab, output_bfd,
8727 			elf32_thumb2_plt_entry[2],
8728 			ptr + 8);
8729 	  put_arm_insn (htab, output_bfd,
8730 			elf32_thumb2_plt_entry[3],
8731 			ptr + 12);
8732 	}
8733       else
8734 	{
8735 	  /* Calculate the displacement between the PLT slot and the
8736 	     entry in the GOT.  The eight-byte offset accounts for the
8737 	     value produced by adding to pc in the first instruction
8738 	     of the PLT stub.  */
8739 	  got_displacement = got_address - (plt_address + 8);
8740 
8741 	  if (elf32_arm_plt_needs_thumb_stub_p (info, arm_plt))
8742 	    {
8743 	      put_thumb_insn (htab, output_bfd,
8744 			      elf32_arm_plt_thumb_stub[0], ptr - 4);
8745 	      put_thumb_insn (htab, output_bfd,
8746 			      elf32_arm_plt_thumb_stub[1], ptr - 2);
8747 	    }
8748 
8749 	  if (!elf32_arm_use_long_plt_entry)
8750 	    {
8751 	      BFD_ASSERT ((got_displacement & 0xf0000000) == 0);
8752 
8753 	      put_arm_insn (htab, output_bfd,
8754 			    elf32_arm_plt_entry_short[0]
8755 			    | ((got_displacement & 0x0ff00000) >> 20),
8756 			    ptr + 0);
8757 	      put_arm_insn (htab, output_bfd,
8758 			    elf32_arm_plt_entry_short[1]
8759 			    | ((got_displacement & 0x000ff000) >> 12),
8760 			    ptr+ 4);
8761 	      put_arm_insn (htab, output_bfd,
8762 			    elf32_arm_plt_entry_short[2]
8763 			    | (got_displacement & 0x00000fff),
8764 			    ptr + 8);
8765 #ifdef FOUR_WORD_PLT
8766 	      bfd_put_32 (output_bfd, elf32_arm_plt_entry_short[3], ptr + 12);
8767 #endif
8768 	    }
8769 	  else
8770 	    {
8771 	      put_arm_insn (htab, output_bfd,
8772 			    elf32_arm_plt_entry_long[0]
8773 			    | ((got_displacement & 0xf0000000) >> 28),
8774 			    ptr + 0);
8775 	      put_arm_insn (htab, output_bfd,
8776 			    elf32_arm_plt_entry_long[1]
8777 			    | ((got_displacement & 0x0ff00000) >> 20),
8778 			    ptr + 4);
8779 	      put_arm_insn (htab, output_bfd,
8780 			    elf32_arm_plt_entry_long[2]
8781 			    | ((got_displacement & 0x000ff000) >> 12),
8782 			    ptr+ 8);
8783 	      put_arm_insn (htab, output_bfd,
8784 			    elf32_arm_plt_entry_long[3]
8785 			    | (got_displacement & 0x00000fff),
8786 			    ptr + 12);
8787 	    }
8788 	}
8789 
8790       /* Fill in the entry in the .rel(a).(i)plt section.  */
8791       rel.r_offset = got_address;
8792       rel.r_addend = 0;
8793       if (dynindx == -1)
8794 	{
8795 	  /* .igot.plt entries use IRELATIVE relocations against SYM_VALUE.
8796 	     The dynamic linker or static executable then calls SYM_VALUE
8797 	     to determine the correct run-time value of the .igot.plt entry.  */
8798 	  rel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
8799 	  initial_got_entry = sym_value;
8800 	}
8801       else
8802 	{
8803 	  rel.r_info = ELF32_R_INFO (dynindx, R_ARM_JUMP_SLOT);
8804 	  initial_got_entry = (splt->output_section->vma
8805 			       + splt->output_offset);
8806 	}
8807 
8808       /* Fill in the entry in the global offset table.  */
8809       bfd_put_32 (output_bfd, initial_got_entry,
8810 		  sgot->contents + got_offset);
8811     }
8812 
8813   if (dynindx == -1)
8814     elf32_arm_add_dynreloc (output_bfd, info, srel, &rel);
8815   else
8816     {
8817       loc = srel->contents + plt_index * RELOC_SIZE (htab);
8818       SWAP_RELOC_OUT (htab) (output_bfd, &rel, loc);
8819     }
8820 
8821   return TRUE;
8822 }
8823 
8824 /* Some relocations map to different relocations depending on the
8825    target.  Return the real relocation.  */
8826 
8827 static int
8828 arm_real_reloc_type (struct elf32_arm_link_hash_table * globals,
8829 		     int r_type)
8830 {
8831   switch (r_type)
8832     {
8833     case R_ARM_TARGET1:
8834       if (globals->target1_is_rel)
8835 	return R_ARM_REL32;
8836       else
8837 	return R_ARM_ABS32;
8838 
8839     case R_ARM_TARGET2:
8840       return globals->target2_reloc;
8841 
8842     default:
8843       return r_type;
8844     }
8845 }
8846 
8847 /* Return the base VMA address which should be subtracted from real addresses
8848    when resolving @dtpoff relocation.
8849    This is PT_TLS segment p_vaddr.  */
8850 
8851 static bfd_vma
8852 dtpoff_base (struct bfd_link_info *info)
8853 {
8854   /* If tls_sec is NULL, we should have signalled an error already.  */
8855   if (elf_hash_table (info)->tls_sec == NULL)
8856     return 0;
8857   return elf_hash_table (info)->tls_sec->vma;
8858 }
8859 
8860 /* Return the relocation value for @tpoff relocation
8861    if STT_TLS virtual address is ADDRESS.  */
8862 
8863 static bfd_vma
8864 tpoff (struct bfd_link_info *info, bfd_vma address)
8865 {
8866   struct elf_link_hash_table *htab = elf_hash_table (info);
8867   bfd_vma base;
8868 
8869   /* If tls_sec is NULL, we should have signalled an error already.  */
8870   if (htab->tls_sec == NULL)
8871     return 0;
8872   base = align_power ((bfd_vma) TCB_SIZE, htab->tls_sec->alignment_power);
8873   return address - htab->tls_sec->vma + base;
8874 }
8875 
8876 /* Perform an R_ARM_ABS12 relocation on the field pointed to by DATA.
8877    VALUE is the relocation value.  */
8878 
8879 static bfd_reloc_status_type
8880 elf32_arm_abs12_reloc (bfd *abfd, void *data, bfd_vma value)
8881 {
8882   if (value > 0xfff)
8883     return bfd_reloc_overflow;
8884 
8885   value |= bfd_get_32 (abfd, data) & 0xfffff000;
8886   bfd_put_32 (abfd, value, data);
8887   return bfd_reloc_ok;
8888 }
8889 
8890 /* Handle TLS relaxations.  Relaxing is possible for symbols that use
8891    R_ARM_GOTDESC, R_ARM_{,THM_}TLS_CALL or
8892    R_ARM_{,THM_}TLS_DESCSEQ relocations, during a static link.
8893 
8894    Return bfd_reloc_ok if we're done, bfd_reloc_continue if the caller
8895    is to then call final_link_relocate.  Return other values in the
8896    case of error.
8897 
8898    FIXME:When --emit-relocs is in effect, we'll emit relocs describing
8899    the pre-relaxed code.  It would be nice if the relocs were updated
8900    to match the optimization.   */
8901 
8902 static bfd_reloc_status_type
8903 elf32_arm_tls_relax (struct elf32_arm_link_hash_table *globals,
8904 		     bfd *input_bfd, asection *input_sec, bfd_byte *contents,
8905 		     Elf_Internal_Rela *rel, unsigned long is_local)
8906 {
8907   unsigned long insn;
8908 
8909   switch (ELF32_R_TYPE (rel->r_info))
8910     {
8911     default:
8912       return bfd_reloc_notsupported;
8913 
8914     case R_ARM_TLS_GOTDESC:
8915       if (is_local)
8916 	insn = 0;
8917       else
8918 	{
8919 	  insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8920 	  if (insn & 1)
8921 	    insn -= 5; /* THUMB */
8922 	  else
8923 	    insn -= 8; /* ARM */
8924 	}
8925       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
8926       return bfd_reloc_continue;
8927 
8928     case R_ARM_THM_TLS_DESCSEQ:
8929       /* Thumb insn.  */
8930       insn = bfd_get_16 (input_bfd, contents + rel->r_offset);
8931       if ((insn & 0xff78) == 0x4478)	  /* add rx, pc */
8932 	{
8933 	  if (is_local)
8934 	    /* nop */
8935 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8936 	}
8937       else if ((insn & 0xffc0) == 0x6840)  /* ldr rx,[ry,#4] */
8938 	{
8939 	  if (is_local)
8940 	    /* nop */
8941 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8942 	  else
8943 	    /* ldr rx,[ry] */
8944 	    bfd_put_16 (input_bfd, insn & 0xf83f, contents + rel->r_offset);
8945 	}
8946       else if ((insn & 0xff87) == 0x4780)  /* blx rx */
8947 	{
8948 	  if (is_local)
8949 	    /* nop */
8950 	    bfd_put_16 (input_bfd, 0x46c0, contents + rel->r_offset);
8951 	  else
8952 	    /* mov r0, rx */
8953 	    bfd_put_16 (input_bfd, 0x4600 | (insn & 0x78),
8954 			contents + rel->r_offset);
8955 	}
8956       else
8957 	{
8958 	  if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
8959 	    /* It's a 32 bit instruction, fetch the rest of it for
8960 	       error generation.  */
8961 	    insn = (insn << 16)
8962 	      | bfd_get_16 (input_bfd, contents + rel->r_offset + 2);
8963 	  (*_bfd_error_handler)
8964 	    (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' in TLS trampoline"),
8965 	     input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
8966 	  return bfd_reloc_notsupported;
8967 	}
8968       break;
8969 
8970     case R_ARM_TLS_DESCSEQ:
8971       /* arm insn.  */
8972       insn = bfd_get_32 (input_bfd, contents + rel->r_offset);
8973       if ((insn & 0xffff0ff0) == 0xe08f0000) /* add rx,pc,ry */
8974 	{
8975 	  if (is_local)
8976 	    /* mov rx, ry */
8977 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xffff),
8978 			contents + rel->r_offset);
8979 	}
8980       else if ((insn & 0xfff00fff) == 0xe5900004) /* ldr rx,[ry,#4]*/
8981 	{
8982 	  if (is_local)
8983 	    /* nop */
8984 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8985 	  else
8986 	    /* ldr rx,[ry] */
8987 	    bfd_put_32 (input_bfd, insn & 0xfffff000,
8988 			contents + rel->r_offset);
8989 	}
8990       else if ((insn & 0xfffffff0) == 0xe12fff30) /* blx rx */
8991 	{
8992 	  if (is_local)
8993 	    /* nop */
8994 	    bfd_put_32 (input_bfd, 0xe1a00000, contents + rel->r_offset);
8995 	  else
8996 	    /* mov r0, rx */
8997 	    bfd_put_32 (input_bfd, 0xe1a00000 | (insn & 0xf),
8998 			contents + rel->r_offset);
8999 	}
9000       else
9001 	{
9002 	  (*_bfd_error_handler)
9003 	    (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' in TLS trampoline"),
9004 	     input_bfd, input_sec, (unsigned long)rel->r_offset, insn);
9005 	  return bfd_reloc_notsupported;
9006 	}
9007       break;
9008 
9009     case R_ARM_TLS_CALL:
9010       /* GD->IE relaxation, turn the instruction into 'nop' or
9011 	 'ldr r0, [pc,r0]'  */
9012       insn = is_local ? 0xe1a00000 : 0xe79f0000;
9013       bfd_put_32 (input_bfd, insn, contents + rel->r_offset);
9014       break;
9015 
9016     case R_ARM_THM_TLS_CALL:
9017       /* GD->IE relaxation.  */
9018       if (!is_local)
9019 	/* add r0,pc; ldr r0, [r0]  */
9020 	insn = 0x44786800;
9021       else if (using_thumb2 (globals))
9022 	/* nop.w */
9023 	insn = 0xf3af8000;
9024       else
9025 	/* nop; nop */
9026 	insn = 0xbf00bf00;
9027 
9028       bfd_put_16 (input_bfd, insn >> 16, contents + rel->r_offset);
9029       bfd_put_16 (input_bfd, insn & 0xffff, contents + rel->r_offset + 2);
9030       break;
9031     }
9032   return bfd_reloc_ok;
9033 }
9034 
9035 /* For a given value of n, calculate the value of G_n as required to
9036    deal with group relocations.  We return it in the form of an
9037    encoded constant-and-rotation, together with the final residual.  If n is
9038    specified as less than zero, then final_residual is filled with the
9039    input value and no further action is performed.  */
9040 
9041 static bfd_vma
9042 calculate_group_reloc_mask (bfd_vma value, int n, bfd_vma *final_residual)
9043 {
9044   int current_n;
9045   bfd_vma g_n;
9046   bfd_vma encoded_g_n = 0;
9047   bfd_vma residual = value; /* Also known as Y_n.  */
9048 
9049   for (current_n = 0; current_n <= n; current_n++)
9050     {
9051       int shift;
9052 
9053       /* Calculate which part of the value to mask.  */
9054       if (residual == 0)
9055 	shift = 0;
9056       else
9057 	{
9058 	  int msb;
9059 
9060 	  /* Determine the most significant bit in the residual and
9061 	     align the resulting value to a 2-bit boundary.  */
9062 	  for (msb = 30; msb >= 0; msb -= 2)
9063 	    if (residual & (3 << msb))
9064 	      break;
9065 
9066 	  /* The desired shift is now (msb - 6), or zero, whichever
9067 	     is the greater.  */
9068 	  shift = msb - 6;
9069 	  if (shift < 0)
9070 	    shift = 0;
9071 	}
9072 
9073       /* Calculate g_n in 32-bit as well as encoded constant+rotation form.  */
9074       g_n = residual & (0xff << shift);
9075       encoded_g_n = (g_n >> shift)
9076 		    | ((g_n <= 0xff ? 0 : (32 - shift) / 2) << 8);
9077 
9078       /* Calculate the residual for the next time around.  */
9079       residual &= ~g_n;
9080     }
9081 
9082   *final_residual = residual;
9083 
9084   return encoded_g_n;
9085 }
9086 
9087 /* Given an ARM instruction, determine whether it is an ADD or a SUB.
9088    Returns 1 if it is an ADD, -1 if it is a SUB, and 0 otherwise.  */
9089 
9090 static int
9091 identify_add_or_sub (bfd_vma insn)
9092 {
9093   int opcode = insn & 0x1e00000;
9094 
9095   if (opcode == 1 << 23) /* ADD */
9096     return 1;
9097 
9098   if (opcode == 1 << 22) /* SUB */
9099     return -1;
9100 
9101   return 0;
9102 }
9103 
9104 /* Perform a relocation as part of a final link.  */
9105 
9106 static bfd_reloc_status_type
9107 elf32_arm_final_link_relocate (reloc_howto_type *           howto,
9108 			       bfd *                        input_bfd,
9109 			       bfd *                        output_bfd,
9110 			       asection *                   input_section,
9111 			       bfd_byte *                   contents,
9112 			       Elf_Internal_Rela *          rel,
9113 			       bfd_vma                      value,
9114 			       struct bfd_link_info *       info,
9115 			       asection *                   sym_sec,
9116 			       const char *                 sym_name,
9117 			       unsigned char                st_type,
9118 			       enum arm_st_branch_type      branch_type,
9119 			       struct elf_link_hash_entry * h,
9120 			       bfd_boolean *                unresolved_reloc_p,
9121 			       char **                      error_message)
9122 {
9123   unsigned long                 r_type = howto->type;
9124   unsigned long                 r_symndx;
9125   bfd_byte *                    hit_data = contents + rel->r_offset;
9126   bfd_vma *                     local_got_offsets;
9127   bfd_vma *                     local_tlsdesc_gotents;
9128   asection *                    sgot;
9129   asection *                    splt;
9130   asection *                    sreloc = NULL;
9131   asection *                    srelgot;
9132   bfd_vma                       addend;
9133   bfd_signed_vma                signed_addend;
9134   unsigned char                 dynreloc_st_type;
9135   bfd_vma                       dynreloc_value;
9136   struct elf32_arm_link_hash_table * globals;
9137   struct elf32_arm_link_hash_entry *eh;
9138   union gotplt_union           *root_plt;
9139   struct arm_plt_info          *arm_plt;
9140   bfd_vma                       plt_offset;
9141   bfd_vma                       gotplt_offset;
9142   bfd_boolean                   has_iplt_entry;
9143 
9144   globals = elf32_arm_hash_table (info);
9145   if (globals == NULL)
9146     return bfd_reloc_notsupported;
9147 
9148   BFD_ASSERT (is_arm_elf (input_bfd));
9149 
9150   /* Some relocation types map to different relocations depending on the
9151      target.  We pick the right one here.  */
9152   r_type = arm_real_reloc_type (globals, r_type);
9153 
9154   /* It is possible to have linker relaxations on some TLS access
9155      models.  Update our information here.  */
9156   r_type = elf32_arm_tls_transition (info, r_type, h);
9157 
9158   if (r_type != howto->type)
9159     howto = elf32_arm_howto_from_type (r_type);
9160 
9161   eh = (struct elf32_arm_link_hash_entry *) h;
9162   sgot = globals->root.sgot;
9163   local_got_offsets = elf_local_got_offsets (input_bfd);
9164   local_tlsdesc_gotents = elf32_arm_local_tlsdesc_gotent (input_bfd);
9165 
9166   if (globals->root.dynamic_sections_created)
9167     srelgot = globals->root.srelgot;
9168   else
9169     srelgot = NULL;
9170 
9171   r_symndx = ELF32_R_SYM (rel->r_info);
9172 
9173   if (globals->use_rel)
9174     {
9175       addend = bfd_get_32 (input_bfd, hit_data) & howto->src_mask;
9176 
9177       if (addend & ((howto->src_mask + 1) >> 1))
9178 	{
9179 	  signed_addend = -1;
9180 	  signed_addend &= ~ howto->src_mask;
9181 	  signed_addend |= addend;
9182 	}
9183       else
9184 	signed_addend = addend;
9185     }
9186   else
9187     addend = signed_addend = rel->r_addend;
9188 
9189   /* ST_BRANCH_TO_ARM is nonsense to thumb-only targets when we
9190      are resolving a function call relocation.  */
9191   if (using_thumb_only (globals)
9192       && (r_type == R_ARM_THM_CALL
9193 	  || r_type == R_ARM_THM_JUMP24)
9194       && branch_type == ST_BRANCH_TO_ARM)
9195     branch_type = ST_BRANCH_TO_THUMB;
9196 
9197   /* Record the symbol information that should be used in dynamic
9198      relocations.  */
9199   dynreloc_st_type = st_type;
9200   dynreloc_value = value;
9201   if (branch_type == ST_BRANCH_TO_THUMB)
9202     dynreloc_value |= 1;
9203 
9204   /* Find out whether the symbol has a PLT.  Set ST_VALUE, BRANCH_TYPE and
9205      VALUE appropriately for relocations that we resolve at link time.  */
9206   has_iplt_entry = FALSE;
9207   if (elf32_arm_get_plt_info (input_bfd, eh, r_symndx, &root_plt, &arm_plt)
9208       && root_plt->offset != (bfd_vma) -1)
9209     {
9210       plt_offset = root_plt->offset;
9211       gotplt_offset = arm_plt->got_offset;
9212 
9213       if (h == NULL || eh->is_iplt)
9214 	{
9215 	  has_iplt_entry = TRUE;
9216 	  splt = globals->root.iplt;
9217 
9218 	  /* Populate .iplt entries here, because not all of them will
9219 	     be seen by finish_dynamic_symbol.  The lower bit is set if
9220 	     we have already populated the entry.  */
9221 	  if (plt_offset & 1)
9222 	    plt_offset--;
9223 	  else
9224 	    {
9225 	      if (elf32_arm_populate_plt_entry (output_bfd, info, root_plt, arm_plt,
9226 						-1, dynreloc_value))
9227 		root_plt->offset |= 1;
9228 	      else
9229 		return bfd_reloc_notsupported;
9230 	    }
9231 
9232 	  /* Static relocations always resolve to the .iplt entry.  */
9233 	  st_type = STT_FUNC;
9234 	  value = (splt->output_section->vma
9235 		   + splt->output_offset
9236 		   + plt_offset);
9237 	  branch_type = ST_BRANCH_TO_ARM;
9238 
9239 	  /* If there are non-call relocations that resolve to the .iplt
9240 	     entry, then all dynamic ones must too.  */
9241 	  if (arm_plt->noncall_refcount != 0)
9242 	    {
9243 	      dynreloc_st_type = st_type;
9244 	      dynreloc_value = value;
9245 	    }
9246 	}
9247       else
9248 	/* We populate the .plt entry in finish_dynamic_symbol.  */
9249 	splt = globals->root.splt;
9250     }
9251   else
9252     {
9253       splt = NULL;
9254       plt_offset = (bfd_vma) -1;
9255       gotplt_offset = (bfd_vma) -1;
9256     }
9257 
9258   switch (r_type)
9259     {
9260     case R_ARM_NONE:
9261       /* We don't need to find a value for this symbol.  It's just a
9262 	 marker.  */
9263       *unresolved_reloc_p = FALSE;
9264       return bfd_reloc_ok;
9265 
9266     case R_ARM_ABS12:
9267       if (!globals->vxworks_p)
9268 	return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9269 
9270     case R_ARM_PC24:
9271     case R_ARM_ABS32:
9272     case R_ARM_ABS32_NOI:
9273     case R_ARM_REL32:
9274     case R_ARM_REL32_NOI:
9275     case R_ARM_CALL:
9276     case R_ARM_JUMP24:
9277     case R_ARM_XPC25:
9278     case R_ARM_PREL31:
9279     case R_ARM_PLT32:
9280       /* Handle relocations which should use the PLT entry.  ABS32/REL32
9281 	 will use the symbol's value, which may point to a PLT entry, but we
9282 	 don't need to handle that here.  If we created a PLT entry, all
9283 	 branches in this object should go to it, except if the PLT is too
9284 	 far away, in which case a long branch stub should be inserted.  */
9285       if ((r_type != R_ARM_ABS32 && r_type != R_ARM_REL32
9286 	   && r_type != R_ARM_ABS32_NOI && r_type != R_ARM_REL32_NOI
9287 	   && r_type != R_ARM_CALL
9288 	   && r_type != R_ARM_JUMP24
9289 	   && r_type != R_ARM_PLT32)
9290 	  && plt_offset != (bfd_vma) -1)
9291 	{
9292 	  /* If we've created a .plt section, and assigned a PLT entry
9293 	     to this function, it must either be a STT_GNU_IFUNC reference
9294 	     or not be known to bind locally.  In other cases, we should
9295 	     have cleared the PLT entry by now.  */
9296 	  BFD_ASSERT (has_iplt_entry || !SYMBOL_CALLS_LOCAL (info, h));
9297 
9298 	  value = (splt->output_section->vma
9299 		   + splt->output_offset
9300 		   + plt_offset);
9301 	  *unresolved_reloc_p = FALSE;
9302 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
9303 					   contents, rel->r_offset, value,
9304 					   rel->r_addend);
9305 	}
9306 
9307       /* When generating a shared object or relocatable executable, these
9308 	 relocations are copied into the output file to be resolved at
9309 	 run time.  */
9310       if ((bfd_link_pic (info)
9311 	   || globals->root.is_relocatable_executable)
9312 	  && (input_section->flags & SEC_ALLOC)
9313 	  && !(globals->vxworks_p
9314 	       && strcmp (input_section->output_section->name,
9315 			  ".tls_vars") == 0)
9316 	  && ((r_type != R_ARM_REL32 && r_type != R_ARM_REL32_NOI)
9317 	      || !SYMBOL_CALLS_LOCAL (info, h))
9318 	  && !(input_bfd == globals->stub_bfd
9319 	       && strstr (input_section->name, STUB_SUFFIX))
9320 	  && (h == NULL
9321 	      || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
9322 	      || h->root.type != bfd_link_hash_undefweak)
9323 	  && r_type != R_ARM_PC24
9324 	  && r_type != R_ARM_CALL
9325 	  && r_type != R_ARM_JUMP24
9326 	  && r_type != R_ARM_PREL31
9327 	  && r_type != R_ARM_PLT32)
9328 	{
9329 	  Elf_Internal_Rela outrel;
9330 	  bfd_boolean skip, relocate;
9331 
9332 	  if ((r_type == R_ARM_REL32 || r_type == R_ARM_REL32_NOI)
9333 	      && !h->def_regular)
9334 	    {
9335 	      char *v = _("shared object");
9336 
9337 	      if (bfd_link_executable (info))
9338 		v = _("PIE executable");
9339 
9340 	      (*_bfd_error_handler)
9341 		(_("%B: relocation %s against external or undefined symbol `%s'"
9342 		   " can not be used when making a %s; recompile with -fPIC"), input_bfd,
9343 		 elf32_arm_howto_table_1[r_type].name, h->root.root.string, v);
9344 	      return bfd_reloc_notsupported;
9345 	    }
9346 
9347 	  *unresolved_reloc_p = FALSE;
9348 
9349 	  if (sreloc == NULL && globals->root.dynamic_sections_created)
9350 	    {
9351 	      sreloc = _bfd_elf_get_dynamic_reloc_section (input_bfd, input_section,
9352 							   ! globals->use_rel);
9353 
9354 	      if (sreloc == NULL)
9355 		return bfd_reloc_notsupported;
9356 	    }
9357 
9358 	  skip = FALSE;
9359 	  relocate = FALSE;
9360 
9361 	  outrel.r_addend = addend;
9362 	  outrel.r_offset =
9363 	    _bfd_elf_section_offset (output_bfd, info, input_section,
9364 				     rel->r_offset);
9365 	  if (outrel.r_offset == (bfd_vma) -1)
9366 	    skip = TRUE;
9367 	  else if (outrel.r_offset == (bfd_vma) -2)
9368 	    skip = TRUE, relocate = TRUE;
9369 	  outrel.r_offset += (input_section->output_section->vma
9370 			      + input_section->output_offset);
9371 
9372 	  if (skip)
9373 	    memset (&outrel, 0, sizeof outrel);
9374 	  else if (h != NULL
9375 		   && h->dynindx != -1
9376 		   && (!bfd_link_pic (info)
9377 		       || !SYMBOLIC_BIND (info, h)
9378 		       || !h->def_regular))
9379 	    outrel.r_info = ELF32_R_INFO (h->dynindx, r_type);
9380 	  else
9381 	    {
9382 	      int symbol;
9383 
9384 	      /* This symbol is local, or marked to become local.  */
9385 	      BFD_ASSERT (r_type == R_ARM_ABS32 || r_type == R_ARM_ABS32_NOI);
9386 	      if (globals->symbian_p)
9387 		{
9388 		  asection *osec;
9389 
9390 		  /* On Symbian OS, the data segment and text segement
9391 		     can be relocated independently.  Therefore, we
9392 		     must indicate the segment to which this
9393 		     relocation is relative.  The BPABI allows us to
9394 		     use any symbol in the right segment; we just use
9395 		     the section symbol as it is convenient.  (We
9396 		     cannot use the symbol given by "h" directly as it
9397 		     will not appear in the dynamic symbol table.)
9398 
9399 		     Note that the dynamic linker ignores the section
9400 		     symbol value, so we don't subtract osec->vma
9401 		     from the emitted reloc addend.  */
9402 		  if (sym_sec)
9403 		    osec = sym_sec->output_section;
9404 		  else
9405 		    osec = input_section->output_section;
9406 		  symbol = elf_section_data (osec)->dynindx;
9407 		  if (symbol == 0)
9408 		    {
9409 		      struct elf_link_hash_table *htab = elf_hash_table (info);
9410 
9411 		      if ((osec->flags & SEC_READONLY) == 0
9412 			  && htab->data_index_section != NULL)
9413 			osec = htab->data_index_section;
9414 		      else
9415 			osec = htab->text_index_section;
9416 		      symbol = elf_section_data (osec)->dynindx;
9417 		    }
9418 		  BFD_ASSERT (symbol != 0);
9419 		}
9420 	      else
9421 		/* On SVR4-ish systems, the dynamic loader cannot
9422 		   relocate the text and data segments independently,
9423 		   so the symbol does not matter.  */
9424 		symbol = 0;
9425 	      if (dynreloc_st_type == STT_GNU_IFUNC)
9426 		/* We have an STT_GNU_IFUNC symbol that doesn't resolve
9427 		   to the .iplt entry.  Instead, every non-call reference
9428 		   must use an R_ARM_IRELATIVE relocation to obtain the
9429 		   correct run-time address.  */
9430 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_IRELATIVE);
9431 	      else
9432 		outrel.r_info = ELF32_R_INFO (symbol, R_ARM_RELATIVE);
9433 	      if (globals->use_rel)
9434 		relocate = TRUE;
9435 	      else
9436 		outrel.r_addend += dynreloc_value;
9437 	    }
9438 
9439 	  elf32_arm_add_dynreloc (output_bfd, info, sreloc, &outrel);
9440 
9441 	  /* If this reloc is against an external symbol, we do not want to
9442 	     fiddle with the addend.  Otherwise, we need to include the symbol
9443 	     value so that it becomes an addend for the dynamic reloc.  */
9444 	  if (! relocate)
9445 	    return bfd_reloc_ok;
9446 
9447 	  return _bfd_final_link_relocate (howto, input_bfd, input_section,
9448 					   contents, rel->r_offset,
9449 					   dynreloc_value, (bfd_vma) 0);
9450 	}
9451       else switch (r_type)
9452 	{
9453 	case R_ARM_ABS12:
9454 	  return elf32_arm_abs12_reloc (input_bfd, hit_data, value + addend);
9455 
9456 	case R_ARM_XPC25:	  /* Arm BLX instruction.  */
9457 	case R_ARM_CALL:
9458 	case R_ARM_JUMP24:
9459 	case R_ARM_PC24:	  /* Arm B/BL instruction.  */
9460 	case R_ARM_PLT32:
9461 	  {
9462 	  struct elf32_arm_stub_hash_entry *stub_entry = NULL;
9463 
9464 	  if (r_type == R_ARM_XPC25)
9465 	    {
9466 	      /* Check for Arm calling Arm function.  */
9467 	      /* FIXME: Should we translate the instruction into a BL
9468 		 instruction instead ?  */
9469 	      if (branch_type != ST_BRANCH_TO_THUMB)
9470 		(*_bfd_error_handler)
9471 		  (_("\%B: Warning: Arm BLX instruction targets Arm function '%s'."),
9472 		   input_bfd,
9473 		   h ? h->root.root.string : "(local)");
9474 	    }
9475 	  else if (r_type == R_ARM_PC24)
9476 	    {
9477 	      /* Check for Arm calling Thumb function.  */
9478 	      if (branch_type == ST_BRANCH_TO_THUMB)
9479 		{
9480 		  if (elf32_arm_to_thumb_stub (info, sym_name, input_bfd,
9481 					       output_bfd, input_section,
9482 					       hit_data, sym_sec, rel->r_offset,
9483 					       signed_addend, value,
9484 					       error_message))
9485 		    return bfd_reloc_ok;
9486 		  else
9487 		    return bfd_reloc_dangerous;
9488 		}
9489 	    }
9490 
9491 	  /* Check if a stub has to be inserted because the
9492 	     destination is too far or we are changing mode.  */
9493 	  if (   r_type == R_ARM_CALL
9494 	      || r_type == R_ARM_JUMP24
9495 	      || r_type == R_ARM_PLT32)
9496 	    {
9497 	      enum elf32_arm_stub_type stub_type = arm_stub_none;
9498 	      struct elf32_arm_link_hash_entry *hash;
9499 
9500 	      hash = (struct elf32_arm_link_hash_entry *) h;
9501 	      stub_type = arm_type_of_stub (info, input_section, rel,
9502 					    st_type, &branch_type,
9503 					    hash, value, sym_sec,
9504 					    input_bfd, sym_name);
9505 
9506 	      if (stub_type != arm_stub_none)
9507 		{
9508 		  /* The target is out of reach, so redirect the
9509 		     branch to the local stub for this function.  */
9510 		  stub_entry = elf32_arm_get_stub_entry (input_section,
9511 							 sym_sec, h,
9512 							 rel, globals,
9513 							 stub_type);
9514 		  {
9515 		    if (stub_entry != NULL)
9516 		      value = (stub_entry->stub_offset
9517 			       + stub_entry->stub_sec->output_offset
9518 			       + stub_entry->stub_sec->output_section->vma);
9519 
9520 		    if (plt_offset != (bfd_vma) -1)
9521 		      *unresolved_reloc_p = FALSE;
9522 		  }
9523 		}
9524 	      else
9525 		{
9526 		  /* If the call goes through a PLT entry, make sure to
9527 		     check distance to the right destination address.  */
9528 		  if (plt_offset != (bfd_vma) -1)
9529 		    {
9530 		      value = (splt->output_section->vma
9531 			       + splt->output_offset
9532 			       + plt_offset);
9533 		      *unresolved_reloc_p = FALSE;
9534 		      /* The PLT entry is in ARM mode, regardless of the
9535 			 target function.  */
9536 		      branch_type = ST_BRANCH_TO_ARM;
9537 		    }
9538 		}
9539 	    }
9540 
9541 	  /* The ARM ELF ABI says that this reloc is computed as: S - P + A
9542 	     where:
9543 	      S is the address of the symbol in the relocation.
9544 	      P is address of the instruction being relocated.
9545 	      A is the addend (extracted from the instruction) in bytes.
9546 
9547 	     S is held in 'value'.
9548 	     P is the base address of the section containing the
9549 	       instruction plus the offset of the reloc into that
9550 	       section, ie:
9551 		 (input_section->output_section->vma +
9552 		  input_section->output_offset +
9553 		  rel->r_offset).
9554 	     A is the addend, converted into bytes, ie:
9555 		 (signed_addend * 4)
9556 
9557 	     Note: None of these operations have knowledge of the pipeline
9558 	     size of the processor, thus it is up to the assembler to
9559 	     encode this information into the addend.  */
9560 	  value -= (input_section->output_section->vma
9561 		    + input_section->output_offset);
9562 	  value -= rel->r_offset;
9563 	  if (globals->use_rel)
9564 	    value += (signed_addend << howto->size);
9565 	  else
9566 	    /* RELA addends do not have to be adjusted by howto->size.  */
9567 	    value += signed_addend;
9568 
9569 	  signed_addend = value;
9570 	  signed_addend >>= howto->rightshift;
9571 
9572 	  /* A branch to an undefined weak symbol is turned into a jump to
9573 	     the next instruction unless a PLT entry will be created.
9574 	     Do the same for local undefined symbols (but not for STN_UNDEF).
9575 	     The jump to the next instruction is optimized as a NOP depending
9576 	     on the architecture.  */
9577 	  if (h ? (h->root.type == bfd_link_hash_undefweak
9578 		   && plt_offset == (bfd_vma) -1)
9579 	      : r_symndx != STN_UNDEF && bfd_is_und_section (sym_sec))
9580 	    {
9581 	      value = (bfd_get_32 (input_bfd, hit_data) & 0xf0000000);
9582 
9583 	      if (arch_has_arm_nop (globals))
9584 		value |= 0x0320f000;
9585 	      else
9586 		value |= 0x01a00000; /* Using pre-UAL nop: mov r0, r0.  */
9587 	    }
9588 	  else
9589 	    {
9590 	      /* Perform a signed range check.  */
9591 	      if (   signed_addend >   ((bfd_signed_vma)  (howto->dst_mask >> 1))
9592 		  || signed_addend < - ((bfd_signed_vma) ((howto->dst_mask + 1) >> 1)))
9593 		return bfd_reloc_overflow;
9594 
9595 	      addend = (value & 2);
9596 
9597 	      value = (signed_addend & howto->dst_mask)
9598 		| (bfd_get_32 (input_bfd, hit_data) & (~ howto->dst_mask));
9599 
9600 	      if (r_type == R_ARM_CALL)
9601 		{
9602 		  /* Set the H bit in the BLX instruction.  */
9603 		  if (branch_type == ST_BRANCH_TO_THUMB)
9604 		    {
9605 		      if (addend)
9606 			value |= (1 << 24);
9607 		      else
9608 			value &= ~(bfd_vma)(1 << 24);
9609 		    }
9610 
9611 		  /* Select the correct instruction (BL or BLX).  */
9612 		  /* Only if we are not handling a BL to a stub. In this
9613 		     case, mode switching is performed by the stub.  */
9614 		  if (branch_type == ST_BRANCH_TO_THUMB && !stub_entry)
9615 		    value |= (1 << 28);
9616 		  else if (stub_entry || branch_type != ST_BRANCH_UNKNOWN)
9617 		    {
9618 		      value &= ~(bfd_vma)(1 << 28);
9619 		      value |= (1 << 24);
9620 		    }
9621 		}
9622 	    }
9623 	  }
9624 	  break;
9625 
9626 	case R_ARM_ABS32:
9627 	  value += addend;
9628 	  if (branch_type == ST_BRANCH_TO_THUMB)
9629 	    value |= 1;
9630 	  break;
9631 
9632 	case R_ARM_ABS32_NOI:
9633 	  value += addend;
9634 	  break;
9635 
9636 	case R_ARM_REL32:
9637 	  value += addend;
9638 	  if (branch_type == ST_BRANCH_TO_THUMB)
9639 	    value |= 1;
9640 	  value -= (input_section->output_section->vma
9641 		    + input_section->output_offset + rel->r_offset);
9642 	  break;
9643 
9644 	case R_ARM_REL32_NOI:
9645 	  value += addend;
9646 	  value -= (input_section->output_section->vma
9647 		    + input_section->output_offset + rel->r_offset);
9648 	  break;
9649 
9650 	case R_ARM_PREL31:
9651 	  value -= (input_section->output_section->vma
9652 		    + input_section->output_offset + rel->r_offset);
9653 	  value += signed_addend;
9654 	  if (! h || h->root.type != bfd_link_hash_undefweak)
9655 	    {
9656 	      /* Check for overflow.  */
9657 	      if ((value ^ (value >> 1)) & (1 << 30))
9658 		return bfd_reloc_overflow;
9659 	    }
9660 	  value &= 0x7fffffff;
9661 	  value |= (bfd_get_32 (input_bfd, hit_data) & 0x80000000);
9662 	  if (branch_type == ST_BRANCH_TO_THUMB)
9663 	    value |= 1;
9664 	  break;
9665 	}
9666 
9667       bfd_put_32 (input_bfd, value, hit_data);
9668       return bfd_reloc_ok;
9669 
9670     case R_ARM_ABS8:
9671       /* PR 16202: Refectch the addend using the correct size.  */
9672       if (globals->use_rel)
9673 	addend = bfd_get_8 (input_bfd, hit_data);
9674       value += addend;
9675 
9676       /* There is no way to tell whether the user intended to use a signed or
9677 	 unsigned addend.  When checking for overflow we accept either,
9678 	 as specified by the AAELF.  */
9679       if ((long) value > 0xff || (long) value < -0x80)
9680 	return bfd_reloc_overflow;
9681 
9682       bfd_put_8 (input_bfd, value, hit_data);
9683       return bfd_reloc_ok;
9684 
9685     case R_ARM_ABS16:
9686       /* PR 16202: Refectch the addend using the correct size.  */
9687       if (globals->use_rel)
9688 	addend = bfd_get_16 (input_bfd, hit_data);
9689       value += addend;
9690 
9691       /* See comment for R_ARM_ABS8.  */
9692       if ((long) value > 0xffff || (long) value < -0x8000)
9693 	return bfd_reloc_overflow;
9694 
9695       bfd_put_16 (input_bfd, value, hit_data);
9696       return bfd_reloc_ok;
9697 
9698     case R_ARM_THM_ABS5:
9699       /* Support ldr and str instructions for the thumb.  */
9700       if (globals->use_rel)
9701 	{
9702 	  /* Need to refetch addend.  */
9703 	  addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
9704 	  /* ??? Need to determine shift amount from operand size.  */
9705 	  addend >>= howto->rightshift;
9706 	}
9707       value += addend;
9708 
9709       /* ??? Isn't value unsigned?  */
9710       if ((long) value > 0x1f || (long) value < -0x10)
9711 	return bfd_reloc_overflow;
9712 
9713       /* ??? Value needs to be properly shifted into place first.  */
9714       value |= bfd_get_16 (input_bfd, hit_data) & 0xf83f;
9715       bfd_put_16 (input_bfd, value, hit_data);
9716       return bfd_reloc_ok;
9717 
9718     case R_ARM_THM_ALU_PREL_11_0:
9719       /* Corresponds to: addw.w reg, pc, #offset (and similarly for subw).  */
9720       {
9721 	bfd_vma insn;
9722 	bfd_signed_vma relocation;
9723 
9724 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9725 	     | bfd_get_16 (input_bfd, hit_data + 2);
9726 
9727 	if (globals->use_rel)
9728 	  {
9729 	    signed_addend = (insn & 0xff) | ((insn & 0x7000) >> 4)
9730 			  | ((insn & (1 << 26)) >> 15);
9731 	    if (insn & 0xf00000)
9732 	      signed_addend = -signed_addend;
9733 	  }
9734 
9735 	relocation = value + signed_addend;
9736 	relocation -= Pa (input_section->output_section->vma
9737 			  + input_section->output_offset
9738 			  + rel->r_offset);
9739 
9740 	value = relocation;
9741 
9742 	if (value >= 0x1000)
9743 	  return bfd_reloc_overflow;
9744 
9745 	insn = (insn & 0xfb0f8f00) | (value & 0xff)
9746 	     | ((value & 0x700) << 4)
9747 	     | ((value & 0x800) << 15);
9748 	if (relocation < 0)
9749 	  insn |= 0xa00000;
9750 
9751 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
9752 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9753 
9754 	return bfd_reloc_ok;
9755       }
9756 
9757     case R_ARM_THM_PC8:
9758       /* PR 10073:  This reloc is not generated by the GNU toolchain,
9759 	 but it is supported for compatibility with third party libraries
9760 	 generated by other compilers, specifically the ARM/IAR.  */
9761       {
9762 	bfd_vma insn;
9763 	bfd_signed_vma relocation;
9764 
9765 	insn = bfd_get_16 (input_bfd, hit_data);
9766 
9767 	if (globals->use_rel)
9768 	  addend = ((((insn & 0x00ff) << 2) + 4) & 0x3ff) -4;
9769 
9770 	relocation = value + addend;
9771 	relocation -= Pa (input_section->output_section->vma
9772 			  + input_section->output_offset
9773 			  + rel->r_offset);
9774 
9775 	value = relocation;
9776 
9777 	/* We do not check for overflow of this reloc.  Although strictly
9778 	   speaking this is incorrect, it appears to be necessary in order
9779 	   to work with IAR generated relocs.  Since GCC and GAS do not
9780 	   generate R_ARM_THM_PC8 relocs, the lack of a check should not be
9781 	   a problem for them.  */
9782 	value &= 0x3fc;
9783 
9784 	insn = (insn & 0xff00) | (value >> 2);
9785 
9786 	bfd_put_16 (input_bfd, insn, hit_data);
9787 
9788 	return bfd_reloc_ok;
9789       }
9790 
9791     case R_ARM_THM_PC12:
9792       /* Corresponds to: ldr.w reg, [pc, #offset].  */
9793       {
9794 	bfd_vma insn;
9795 	bfd_signed_vma relocation;
9796 
9797 	insn = (bfd_get_16 (input_bfd, hit_data) << 16)
9798 	     | bfd_get_16 (input_bfd, hit_data + 2);
9799 
9800 	if (globals->use_rel)
9801 	  {
9802 	    signed_addend = insn & 0xfff;
9803 	    if (!(insn & (1 << 23)))
9804 	      signed_addend = -signed_addend;
9805 	  }
9806 
9807 	relocation = value + signed_addend;
9808 	relocation -= Pa (input_section->output_section->vma
9809 			  + input_section->output_offset
9810 			  + rel->r_offset);
9811 
9812 	value = relocation;
9813 
9814 	if (value >= 0x1000)
9815 	  return bfd_reloc_overflow;
9816 
9817 	insn = (insn & 0xff7ff000) | value;
9818 	if (relocation >= 0)
9819 	  insn |= (1 << 23);
9820 
9821 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
9822 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
9823 
9824 	return bfd_reloc_ok;
9825       }
9826 
9827     case R_ARM_THM_XPC22:
9828     case R_ARM_THM_CALL:
9829     case R_ARM_THM_JUMP24:
9830       /* Thumb BL (branch long instruction).  */
9831       {
9832 	bfd_vma relocation;
9833 	bfd_vma reloc_sign;
9834 	bfd_boolean overflow = FALSE;
9835 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
9836 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
9837 	bfd_signed_vma reloc_signed_max;
9838 	bfd_signed_vma reloc_signed_min;
9839 	bfd_vma check;
9840 	bfd_signed_vma signed_check;
9841 	int bitsize;
9842 	const int thumb2 = using_thumb2 (globals);
9843 
9844 	/* A branch to an undefined weak symbol is turned into a jump to
9845 	   the next instruction unless a PLT entry will be created.
9846 	   The jump to the next instruction is optimized as a NOP.W for
9847 	   Thumb-2 enabled architectures.  */
9848 	if (h && h->root.type == bfd_link_hash_undefweak
9849 	    && plt_offset == (bfd_vma) -1)
9850 	  {
9851 	    if (thumb2)
9852 	      {
9853 		bfd_put_16 (input_bfd, 0xf3af, hit_data);
9854 		bfd_put_16 (input_bfd, 0x8000, hit_data + 2);
9855 	      }
9856 	    else
9857 	      {
9858 		bfd_put_16 (input_bfd, 0xe000, hit_data);
9859 		bfd_put_16 (input_bfd, 0xbf00, hit_data + 2);
9860 	      }
9861 	    return bfd_reloc_ok;
9862 	  }
9863 
9864 	/* Fetch the addend.  We use the Thumb-2 encoding (backwards compatible
9865 	   with Thumb-1) involving the J1 and J2 bits.  */
9866 	if (globals->use_rel)
9867 	  {
9868 	    bfd_vma s = (upper_insn & (1 << 10)) >> 10;
9869 	    bfd_vma upper = upper_insn & 0x3ff;
9870 	    bfd_vma lower = lower_insn & 0x7ff;
9871 	    bfd_vma j1 = (lower_insn & (1 << 13)) >> 13;
9872 	    bfd_vma j2 = (lower_insn & (1 << 11)) >> 11;
9873 	    bfd_vma i1 = j1 ^ s ? 0 : 1;
9874 	    bfd_vma i2 = j2 ^ s ? 0 : 1;
9875 
9876 	    addend = (i1 << 23) | (i2 << 22) | (upper << 12) | (lower << 1);
9877 	    /* Sign extend.  */
9878 	    addend = (addend | ((s ? 0 : 1) << 24)) - (1 << 24);
9879 
9880 	    signed_addend = addend;
9881 	  }
9882 
9883 	if (r_type == R_ARM_THM_XPC22)
9884 	  {
9885 	    /* Check for Thumb to Thumb call.  */
9886 	    /* FIXME: Should we translate the instruction into a BL
9887 	       instruction instead ?  */
9888 	    if (branch_type == ST_BRANCH_TO_THUMB)
9889 	      (*_bfd_error_handler)
9890 		(_("%B: Warning: Thumb BLX instruction targets thumb function '%s'."),
9891 		 input_bfd,
9892 		 h ? h->root.root.string : "(local)");
9893 	  }
9894 	else
9895 	  {
9896 	    /* If it is not a call to Thumb, assume call to Arm.
9897 	       If it is a call relative to a section name, then it is not a
9898 	       function call at all, but rather a long jump.  Calls through
9899 	       the PLT do not require stubs.  */
9900 	    if (branch_type == ST_BRANCH_TO_ARM && plt_offset == (bfd_vma) -1)
9901 	      {
9902 		if (globals->use_blx && r_type == R_ARM_THM_CALL)
9903 		  {
9904 		    /* Convert BL to BLX.  */
9905 		    lower_insn = (lower_insn & ~0x1000) | 0x0800;
9906 		  }
9907 		else if ((   r_type != R_ARM_THM_CALL)
9908 			 && (r_type != R_ARM_THM_JUMP24))
9909 		  {
9910 		    if (elf32_thumb_to_arm_stub
9911 			(info, sym_name, input_bfd, output_bfd, input_section,
9912 			 hit_data, sym_sec, rel->r_offset, signed_addend, value,
9913 			 error_message))
9914 		      return bfd_reloc_ok;
9915 		    else
9916 		      return bfd_reloc_dangerous;
9917 		  }
9918 	      }
9919 	    else if (branch_type == ST_BRANCH_TO_THUMB
9920 		     && globals->use_blx
9921 		     && r_type == R_ARM_THM_CALL)
9922 	      {
9923 		/* Make sure this is a BL.  */
9924 		lower_insn |= 0x1800;
9925 	      }
9926 	  }
9927 
9928 	enum elf32_arm_stub_type stub_type = arm_stub_none;
9929 	if (r_type == R_ARM_THM_CALL || r_type == R_ARM_THM_JUMP24)
9930 	  {
9931 	    /* Check if a stub has to be inserted because the destination
9932 	       is too far.  */
9933 	    struct elf32_arm_stub_hash_entry *stub_entry;
9934 	    struct elf32_arm_link_hash_entry *hash;
9935 
9936 	    hash = (struct elf32_arm_link_hash_entry *) h;
9937 
9938 	    stub_type = arm_type_of_stub (info, input_section, rel,
9939 					  st_type, &branch_type,
9940 					  hash, value, sym_sec,
9941 					  input_bfd, sym_name);
9942 
9943 	    if (stub_type != arm_stub_none)
9944 	      {
9945 		/* The target is out of reach or we are changing modes, so
9946 		   redirect the branch to the local stub for this
9947 		   function.  */
9948 		stub_entry = elf32_arm_get_stub_entry (input_section,
9949 						       sym_sec, h,
9950 						       rel, globals,
9951 						       stub_type);
9952 		if (stub_entry != NULL)
9953 		  {
9954 		    value = (stub_entry->stub_offset
9955 			     + stub_entry->stub_sec->output_offset
9956 			     + stub_entry->stub_sec->output_section->vma);
9957 
9958 		    if (plt_offset != (bfd_vma) -1)
9959 		      *unresolved_reloc_p = FALSE;
9960 		  }
9961 
9962 		/* If this call becomes a call to Arm, force BLX.  */
9963 		if (globals->use_blx && (r_type == R_ARM_THM_CALL))
9964 		  {
9965 		    if ((stub_entry
9966 			 && !arm_stub_is_thumb (stub_entry->stub_type))
9967 			|| branch_type != ST_BRANCH_TO_THUMB)
9968 		      lower_insn = (lower_insn & ~0x1000) | 0x0800;
9969 		  }
9970 	      }
9971 	  }
9972 
9973 	/* Handle calls via the PLT.  */
9974 	if (stub_type == arm_stub_none && plt_offset != (bfd_vma) -1)
9975 	  {
9976 	    value = (splt->output_section->vma
9977 		     + splt->output_offset
9978 		     + plt_offset);
9979 
9980 	    if (globals->use_blx
9981 		&& r_type == R_ARM_THM_CALL
9982 		&& ! using_thumb_only (globals))
9983 	      {
9984 		/* If the Thumb BLX instruction is available, convert
9985 		   the BL to a BLX instruction to call the ARM-mode
9986 		   PLT entry.  */
9987 		lower_insn = (lower_insn & ~0x1000) | 0x0800;
9988 		branch_type = ST_BRANCH_TO_ARM;
9989 	      }
9990 	    else
9991 	      {
9992 		if (! using_thumb_only (globals))
9993 		  /* Target the Thumb stub before the ARM PLT entry.  */
9994 		  value -= PLT_THUMB_STUB_SIZE;
9995 		branch_type = ST_BRANCH_TO_THUMB;
9996 	      }
9997 	    *unresolved_reloc_p = FALSE;
9998 	  }
9999 
10000 	relocation = value + signed_addend;
10001 
10002 	relocation -= (input_section->output_section->vma
10003 		       + input_section->output_offset
10004 		       + rel->r_offset);
10005 
10006 	check = relocation >> howto->rightshift;
10007 
10008 	/* If this is a signed value, the rightshift just dropped
10009 	   leading 1 bits (assuming twos complement).  */
10010 	if ((bfd_signed_vma) relocation >= 0)
10011 	  signed_check = check;
10012 	else
10013 	  signed_check = check | ~((bfd_vma) -1 >> howto->rightshift);
10014 
10015 	/* Calculate the permissable maximum and minimum values for
10016 	   this relocation according to whether we're relocating for
10017 	   Thumb-2 or not.  */
10018 	bitsize = howto->bitsize;
10019 	if (!thumb2)
10020 	  bitsize -= 2;
10021 	reloc_signed_max = (1 << (bitsize - 1)) - 1;
10022 	reloc_signed_min = ~reloc_signed_max;
10023 
10024 	/* Assumes two's complement.  */
10025 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10026 	  overflow = TRUE;
10027 
10028 	if ((lower_insn & 0x5000) == 0x4000)
10029 	  /* For a BLX instruction, make sure that the relocation is rounded up
10030 	     to a word boundary.  This follows the semantics of the instruction
10031 	     which specifies that bit 1 of the target address will come from bit
10032 	     1 of the base address.  */
10033 	  relocation = (relocation + 2) & ~ 3;
10034 
10035 	/* Put RELOCATION back into the insn.  Assumes two's complement.
10036 	   We use the Thumb-2 encoding, which is safe even if dealing with
10037 	   a Thumb-1 instruction by virtue of our overflow check above.  */
10038 	reloc_sign = (signed_check < 0) ? 1 : 0;
10039 	upper_insn = (upper_insn & ~(bfd_vma) 0x7ff)
10040 		     | ((relocation >> 12) & 0x3ff)
10041 		     | (reloc_sign << 10);
10042 	lower_insn = (lower_insn & ~(bfd_vma) 0x2fff)
10043 		     | (((!((relocation >> 23) & 1)) ^ reloc_sign) << 13)
10044 		     | (((!((relocation >> 22) & 1)) ^ reloc_sign) << 11)
10045 		     | ((relocation >> 1) & 0x7ff);
10046 
10047 	/* Put the relocated value back in the object file:  */
10048 	bfd_put_16 (input_bfd, upper_insn, hit_data);
10049 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10050 
10051 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10052       }
10053       break;
10054 
10055     case R_ARM_THM_JUMP19:
10056       /* Thumb32 conditional branch instruction.  */
10057       {
10058 	bfd_vma relocation;
10059 	bfd_boolean overflow = FALSE;
10060 	bfd_vma upper_insn = bfd_get_16 (input_bfd, hit_data);
10061 	bfd_vma lower_insn = bfd_get_16 (input_bfd, hit_data + 2);
10062 	bfd_signed_vma reloc_signed_max = 0xffffe;
10063 	bfd_signed_vma reloc_signed_min = -0x100000;
10064 	bfd_signed_vma signed_check;
10065         enum elf32_arm_stub_type stub_type = arm_stub_none;
10066 	struct elf32_arm_stub_hash_entry *stub_entry;
10067 	struct elf32_arm_link_hash_entry *hash;
10068 
10069 	/* Need to refetch the addend, reconstruct the top three bits,
10070 	   and squish the two 11 bit pieces together.  */
10071 	if (globals->use_rel)
10072 	  {
10073 	    bfd_vma S     = (upper_insn & 0x0400) >> 10;
10074 	    bfd_vma upper = (upper_insn & 0x003f);
10075 	    bfd_vma J1    = (lower_insn & 0x2000) >> 13;
10076 	    bfd_vma J2    = (lower_insn & 0x0800) >> 11;
10077 	    bfd_vma lower = (lower_insn & 0x07ff);
10078 
10079 	    upper |= J1 << 6;
10080 	    upper |= J2 << 7;
10081 	    upper |= (!S) << 8;
10082 	    upper -= 0x0100; /* Sign extend.  */
10083 
10084 	    addend = (upper << 12) | (lower << 1);
10085 	    signed_addend = addend;
10086 	  }
10087 
10088 	/* Handle calls via the PLT.  */
10089 	if (plt_offset != (bfd_vma) -1)
10090 	  {
10091 	    value = (splt->output_section->vma
10092 		     + splt->output_offset
10093 		     + plt_offset);
10094 	    /* Target the Thumb stub before the ARM PLT entry.  */
10095 	    value -= PLT_THUMB_STUB_SIZE;
10096 	    *unresolved_reloc_p = FALSE;
10097 	  }
10098 
10099 	hash = (struct elf32_arm_link_hash_entry *)h;
10100 
10101 	stub_type = arm_type_of_stub (info, input_section, rel,
10102 		                      st_type, &branch_type,
10103 		                      hash, value, sym_sec,
10104 		                      input_bfd, sym_name);
10105 	if (stub_type != arm_stub_none)
10106 	  {
10107 	    stub_entry = elf32_arm_get_stub_entry (input_section,
10108 				                   sym_sec, h,
10109 				                   rel, globals,
10110 				                   stub_type);
10111 	    if (stub_entry != NULL)
10112 	      {
10113 	        value = (stub_entry->stub_offset
10114                         + stub_entry->stub_sec->output_offset
10115                         + stub_entry->stub_sec->output_section->vma);
10116 	      }
10117 	  }
10118 
10119 	relocation = value + signed_addend;
10120 	relocation -= (input_section->output_section->vma
10121 		       + input_section->output_offset
10122 		       + rel->r_offset);
10123 	signed_check = (bfd_signed_vma) relocation;
10124 
10125 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10126 	  overflow = TRUE;
10127 
10128 	/* Put RELOCATION back into the insn.  */
10129 	{
10130 	  bfd_vma S  = (relocation & 0x00100000) >> 20;
10131 	  bfd_vma J2 = (relocation & 0x00080000) >> 19;
10132 	  bfd_vma J1 = (relocation & 0x00040000) >> 18;
10133 	  bfd_vma hi = (relocation & 0x0003f000) >> 12;
10134 	  bfd_vma lo = (relocation & 0x00000ffe) >>  1;
10135 
10136 	  upper_insn = (upper_insn & 0xfbc0) | (S << 10) | hi;
10137 	  lower_insn = (lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) | lo;
10138 	}
10139 
10140 	/* Put the relocated value back in the object file:  */
10141 	bfd_put_16 (input_bfd, upper_insn, hit_data);
10142 	bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10143 
10144 	return (overflow ? bfd_reloc_overflow : bfd_reloc_ok);
10145       }
10146 
10147     case R_ARM_THM_JUMP11:
10148     case R_ARM_THM_JUMP8:
10149     case R_ARM_THM_JUMP6:
10150       /* Thumb B (branch) instruction).  */
10151       {
10152 	bfd_signed_vma relocation;
10153 	bfd_signed_vma reloc_signed_max = (1 << (howto->bitsize - 1)) - 1;
10154 	bfd_signed_vma reloc_signed_min = ~ reloc_signed_max;
10155 	bfd_signed_vma signed_check;
10156 
10157 	/* CZB cannot jump backward.  */
10158 	if (r_type == R_ARM_THM_JUMP6)
10159 	  reloc_signed_min = 0;
10160 
10161 	if (globals->use_rel)
10162 	  {
10163 	    /* Need to refetch addend.  */
10164 	    addend = bfd_get_16 (input_bfd, hit_data) & howto->src_mask;
10165 	    if (addend & ((howto->src_mask + 1) >> 1))
10166 	      {
10167 		signed_addend = -1;
10168 		signed_addend &= ~ howto->src_mask;
10169 		signed_addend |= addend;
10170 	      }
10171 	    else
10172 	      signed_addend = addend;
10173 	    /* The value in the insn has been right shifted.  We need to
10174 	       undo this, so that we can perform the address calculation
10175 	       in terms of bytes.  */
10176 	    signed_addend <<= howto->rightshift;
10177 	  }
10178 	relocation = value + signed_addend;
10179 
10180 	relocation -= (input_section->output_section->vma
10181 		       + input_section->output_offset
10182 		       + rel->r_offset);
10183 
10184 	relocation >>= howto->rightshift;
10185 	signed_check = relocation;
10186 
10187 	if (r_type == R_ARM_THM_JUMP6)
10188 	  relocation = ((relocation & 0x0020) << 4) | ((relocation & 0x001f) << 3);
10189 	else
10190 	  relocation &= howto->dst_mask;
10191 	relocation |= (bfd_get_16 (input_bfd, hit_data) & (~ howto->dst_mask));
10192 
10193 	bfd_put_16 (input_bfd, relocation, hit_data);
10194 
10195 	/* Assumes two's complement.  */
10196 	if (signed_check > reloc_signed_max || signed_check < reloc_signed_min)
10197 	  return bfd_reloc_overflow;
10198 
10199 	return bfd_reloc_ok;
10200       }
10201 
10202     case R_ARM_ALU_PCREL7_0:
10203     case R_ARM_ALU_PCREL15_8:
10204     case R_ARM_ALU_PCREL23_15:
10205       {
10206 	bfd_vma insn;
10207 	bfd_vma relocation;
10208 
10209 	insn = bfd_get_32 (input_bfd, hit_data);
10210 	if (globals->use_rel)
10211 	  {
10212 	    /* Extract the addend.  */
10213 	    addend = (insn & 0xff) << ((insn & 0xf00) >> 7);
10214 	    signed_addend = addend;
10215 	  }
10216 	relocation = value + signed_addend;
10217 
10218 	relocation -= (input_section->output_section->vma
10219 		       + input_section->output_offset
10220 		       + rel->r_offset);
10221 	insn = (insn & ~0xfff)
10222 	       | ((howto->bitpos << 7) & 0xf00)
10223 	       | ((relocation >> howto->bitpos) & 0xff);
10224 	bfd_put_32 (input_bfd, value, hit_data);
10225       }
10226       return bfd_reloc_ok;
10227 
10228     case R_ARM_GNU_VTINHERIT:
10229     case R_ARM_GNU_VTENTRY:
10230       return bfd_reloc_ok;
10231 
10232     case R_ARM_GOTOFF32:
10233       /* Relocation is relative to the start of the
10234 	 global offset table.  */
10235 
10236       BFD_ASSERT (sgot != NULL);
10237       if (sgot == NULL)
10238 	return bfd_reloc_notsupported;
10239 
10240       /* If we are addressing a Thumb function, we need to adjust the
10241 	 address by one, so that attempts to call the function pointer will
10242 	 correctly interpret it as Thumb code.  */
10243       if (branch_type == ST_BRANCH_TO_THUMB)
10244 	value += 1;
10245 
10246       /* Note that sgot->output_offset is not involved in this
10247 	 calculation.  We always want the start of .got.  If we
10248 	 define _GLOBAL_OFFSET_TABLE in a different way, as is
10249 	 permitted by the ABI, we might have to change this
10250 	 calculation.  */
10251       value -= sgot->output_section->vma;
10252       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10253 				       contents, rel->r_offset, value,
10254 				       rel->r_addend);
10255 
10256     case R_ARM_GOTPC:
10257       /* Use global offset table as symbol value.  */
10258       BFD_ASSERT (sgot != NULL);
10259 
10260       if (sgot == NULL)
10261 	return bfd_reloc_notsupported;
10262 
10263       *unresolved_reloc_p = FALSE;
10264       value = sgot->output_section->vma;
10265       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10266 				       contents, rel->r_offset, value,
10267 				       rel->r_addend);
10268 
10269     case R_ARM_GOT32:
10270     case R_ARM_GOT_PREL:
10271       /* Relocation is to the entry for this symbol in the
10272 	 global offset table.  */
10273       if (sgot == NULL)
10274 	return bfd_reloc_notsupported;
10275 
10276       if (dynreloc_st_type == STT_GNU_IFUNC
10277 	  && plt_offset != (bfd_vma) -1
10278 	  && (h == NULL || SYMBOL_REFERENCES_LOCAL (info, h)))
10279 	{
10280 	  /* We have a relocation against a locally-binding STT_GNU_IFUNC
10281 	     symbol, and the relocation resolves directly to the runtime
10282 	     target rather than to the .iplt entry.  This means that any
10283 	     .got entry would be the same value as the .igot.plt entry,
10284 	     so there's no point creating both.  */
10285 	  sgot = globals->root.igotplt;
10286 	  value = sgot->output_offset + gotplt_offset;
10287 	}
10288       else if (h != NULL)
10289 	{
10290 	  bfd_vma off;
10291 
10292 	  off = h->got.offset;
10293 	  BFD_ASSERT (off != (bfd_vma) -1);
10294 	  if ((off & 1) != 0)
10295 	    {
10296 	      /* We have already processsed one GOT relocation against
10297 		 this symbol.  */
10298 	      off &= ~1;
10299 	      if (globals->root.dynamic_sections_created
10300 		  && !SYMBOL_REFERENCES_LOCAL (info, h))
10301 		*unresolved_reloc_p = FALSE;
10302 	    }
10303 	  else
10304 	    {
10305 	      Elf_Internal_Rela outrel;
10306 
10307 	      if (h->dynindx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
10308 		{
10309 		  /* If the symbol doesn't resolve locally in a static
10310 		     object, we have an undefined reference.  If the
10311 		     symbol doesn't resolve locally in a dynamic object,
10312 		     it should be resolved by the dynamic linker.  */
10313 		  if (globals->root.dynamic_sections_created)
10314 		    {
10315 		      outrel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_GLOB_DAT);
10316 		      *unresolved_reloc_p = FALSE;
10317 		    }
10318 		  else
10319 		    outrel.r_info = 0;
10320 		  outrel.r_addend = 0;
10321 		}
10322 	      else
10323 		{
10324 		  if (dynreloc_st_type == STT_GNU_IFUNC)
10325 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10326 		  else if (bfd_link_pic (info) &&
10327 			   (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10328 			    || h->root.type != bfd_link_hash_undefweak))
10329 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10330 		  else
10331 		    outrel.r_info = 0;
10332 		  outrel.r_addend = dynreloc_value;
10333 		}
10334 
10335 	      /* The GOT entry is initialized to zero by default.
10336 		 See if we should install a different value.  */
10337 	      if (outrel.r_addend != 0
10338 		  && (outrel.r_info == 0 || globals->use_rel))
10339 		{
10340 		  bfd_put_32 (output_bfd, outrel.r_addend,
10341 			      sgot->contents + off);
10342 		  outrel.r_addend = 0;
10343 		}
10344 
10345 	      if (outrel.r_info != 0)
10346 		{
10347 		  outrel.r_offset = (sgot->output_section->vma
10348 				     + sgot->output_offset
10349 				     + off);
10350 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10351 		}
10352 	      h->got.offset |= 1;
10353 	    }
10354 	  value = sgot->output_offset + off;
10355 	}
10356       else
10357 	{
10358 	  bfd_vma off;
10359 
10360 	  BFD_ASSERT (local_got_offsets != NULL &&
10361 		      local_got_offsets[r_symndx] != (bfd_vma) -1);
10362 
10363 	  off = local_got_offsets[r_symndx];
10364 
10365 	  /* The offset must always be a multiple of 4.  We use the
10366 	     least significant bit to record whether we have already
10367 	     generated the necessary reloc.  */
10368 	  if ((off & 1) != 0)
10369 	    off &= ~1;
10370 	  else
10371 	    {
10372 	      if (globals->use_rel)
10373 		bfd_put_32 (output_bfd, dynreloc_value, sgot->contents + off);
10374 
10375 	      if (bfd_link_pic (info) || dynreloc_st_type == STT_GNU_IFUNC)
10376 		{
10377 		  Elf_Internal_Rela outrel;
10378 
10379 		  outrel.r_addend = addend + dynreloc_value;
10380 		  outrel.r_offset = (sgot->output_section->vma
10381 				     + sgot->output_offset
10382 				     + off);
10383 		  if (dynreloc_st_type == STT_GNU_IFUNC)
10384 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_IRELATIVE);
10385 		  else
10386 		    outrel.r_info = ELF32_R_INFO (0, R_ARM_RELATIVE);
10387 		  elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10388 		}
10389 
10390 	      local_got_offsets[r_symndx] |= 1;
10391 	    }
10392 
10393 	  value = sgot->output_offset + off;
10394 	}
10395       if (r_type != R_ARM_GOT32)
10396 	value += sgot->output_section->vma;
10397 
10398       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10399 				       contents, rel->r_offset, value,
10400 				       rel->r_addend);
10401 
10402     case R_ARM_TLS_LDO32:
10403       value = value - dtpoff_base (info);
10404 
10405       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10406 				       contents, rel->r_offset, value,
10407 				       rel->r_addend);
10408 
10409     case R_ARM_TLS_LDM32:
10410       {
10411 	bfd_vma off;
10412 
10413 	if (sgot == NULL)
10414 	  abort ();
10415 
10416 	off = globals->tls_ldm_got.offset;
10417 
10418 	if ((off & 1) != 0)
10419 	  off &= ~1;
10420 	else
10421 	  {
10422 	    /* If we don't know the module number, create a relocation
10423 	       for it.  */
10424 	    if (bfd_link_pic (info))
10425 	      {
10426 		Elf_Internal_Rela outrel;
10427 
10428 		if (srelgot == NULL)
10429 		  abort ();
10430 
10431 		outrel.r_addend = 0;
10432 		outrel.r_offset = (sgot->output_section->vma
10433 				   + sgot->output_offset + off);
10434 		outrel.r_info = ELF32_R_INFO (0, R_ARM_TLS_DTPMOD32);
10435 
10436 		if (globals->use_rel)
10437 		  bfd_put_32 (output_bfd, outrel.r_addend,
10438 			      sgot->contents + off);
10439 
10440 		elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10441 	      }
10442 	    else
10443 	      bfd_put_32 (output_bfd, 1, sgot->contents + off);
10444 
10445 	    globals->tls_ldm_got.offset |= 1;
10446 	  }
10447 
10448 	value = sgot->output_section->vma + sgot->output_offset + off
10449 	  - (input_section->output_section->vma + input_section->output_offset + rel->r_offset);
10450 
10451 	return _bfd_final_link_relocate (howto, input_bfd, input_section,
10452 					 contents, rel->r_offset, value,
10453 					 rel->r_addend);
10454       }
10455 
10456     case R_ARM_TLS_CALL:
10457     case R_ARM_THM_TLS_CALL:
10458     case R_ARM_TLS_GD32:
10459     case R_ARM_TLS_IE32:
10460     case R_ARM_TLS_GOTDESC:
10461     case R_ARM_TLS_DESCSEQ:
10462     case R_ARM_THM_TLS_DESCSEQ:
10463       {
10464 	bfd_vma off, offplt;
10465 	int indx = 0;
10466 	char tls_type;
10467 
10468 	BFD_ASSERT (sgot != NULL);
10469 
10470 	if (h != NULL)
10471 	  {
10472 	    bfd_boolean dyn;
10473 	    dyn = globals->root.dynamic_sections_created;
10474 	    if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
10475 						 bfd_link_pic (info),
10476 						 h)
10477 		&& (!bfd_link_pic (info)
10478 		    || !SYMBOL_REFERENCES_LOCAL (info, h)))
10479 	      {
10480 		*unresolved_reloc_p = FALSE;
10481 		indx = h->dynindx;
10482 	      }
10483 	    off = h->got.offset;
10484 	    offplt = elf32_arm_hash_entry (h)->tlsdesc_got;
10485 	    tls_type = ((struct elf32_arm_link_hash_entry *) h)->tls_type;
10486 	  }
10487 	else
10488 	  {
10489 	    BFD_ASSERT (local_got_offsets != NULL);
10490 	    off = local_got_offsets[r_symndx];
10491 	    offplt = local_tlsdesc_gotents[r_symndx];
10492 	    tls_type = elf32_arm_local_got_tls_type (input_bfd)[r_symndx];
10493 	  }
10494 
10495 	/* Linker relaxations happens from one of the
10496 	   R_ARM_{GOTDESC,CALL,DESCSEQ} relocations to IE or LE.  */
10497 	if (ELF32_R_TYPE(rel->r_info) != r_type)
10498 	  tls_type = GOT_TLS_IE;
10499 
10500 	BFD_ASSERT (tls_type != GOT_UNKNOWN);
10501 
10502 	if ((off & 1) != 0)
10503 	  off &= ~1;
10504 	else
10505 	  {
10506 	    bfd_boolean need_relocs = FALSE;
10507 	    Elf_Internal_Rela outrel;
10508 	    int cur_off = off;
10509 
10510 	    /* The GOT entries have not been initialized yet.  Do it
10511 	       now, and emit any relocations.  If both an IE GOT and a
10512 	       GD GOT are necessary, we emit the GD first.  */
10513 
10514 	    if ((bfd_link_pic (info) || indx != 0)
10515 		&& (h == NULL
10516 		    || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
10517 		    || h->root.type != bfd_link_hash_undefweak))
10518 	      {
10519 		need_relocs = TRUE;
10520 		BFD_ASSERT (srelgot != NULL);
10521 	      }
10522 
10523 	    if (tls_type & GOT_TLS_GDESC)
10524 	      {
10525 		bfd_byte *loc;
10526 
10527 		/* We should have relaxed, unless this is an undefined
10528 		   weak symbol.  */
10529 		BFD_ASSERT ((h && (h->root.type == bfd_link_hash_undefweak))
10530 			    || bfd_link_pic (info));
10531 		BFD_ASSERT (globals->sgotplt_jump_table_size + offplt + 8
10532 			    <= globals->root.sgotplt->size);
10533 
10534 		outrel.r_addend = 0;
10535 		outrel.r_offset = (globals->root.sgotplt->output_section->vma
10536 				   + globals->root.sgotplt->output_offset
10537 				   + offplt
10538 				   + globals->sgotplt_jump_table_size);
10539 
10540 		outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DESC);
10541 		sreloc = globals->root.srelplt;
10542 		loc = sreloc->contents;
10543 		loc += globals->next_tls_desc_index++ * RELOC_SIZE (globals);
10544 		BFD_ASSERT (loc + RELOC_SIZE (globals)
10545 			   <= sreloc->contents + sreloc->size);
10546 
10547 		SWAP_RELOC_OUT (globals) (output_bfd, &outrel, loc);
10548 
10549 		/* For globals, the first word in the relocation gets
10550 		   the relocation index and the top bit set, or zero,
10551 		   if we're binding now.  For locals, it gets the
10552 		   symbol's offset in the tls section.  */
10553 		bfd_put_32 (output_bfd,
10554 			    !h ? value - elf_hash_table (info)->tls_sec->vma
10555 			    : info->flags & DF_BIND_NOW ? 0
10556 			    : 0x80000000 | ELF32_R_SYM (outrel.r_info),
10557 			    globals->root.sgotplt->contents + offplt
10558 			    + globals->sgotplt_jump_table_size);
10559 
10560 		/* Second word in the relocation is always zero.  */
10561 		bfd_put_32 (output_bfd, 0,
10562 			    globals->root.sgotplt->contents + offplt
10563 			    + globals->sgotplt_jump_table_size + 4);
10564 	      }
10565 	    if (tls_type & GOT_TLS_GD)
10566 	      {
10567 		if (need_relocs)
10568 		  {
10569 		    outrel.r_addend = 0;
10570 		    outrel.r_offset = (sgot->output_section->vma
10571 				       + sgot->output_offset
10572 				       + cur_off);
10573 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_DTPMOD32);
10574 
10575 		    if (globals->use_rel)
10576 		      bfd_put_32 (output_bfd, outrel.r_addend,
10577 				  sgot->contents + cur_off);
10578 
10579 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10580 
10581 		    if (indx == 0)
10582 		      bfd_put_32 (output_bfd, value - dtpoff_base (info),
10583 				  sgot->contents + cur_off + 4);
10584 		    else
10585 		      {
10586 			outrel.r_addend = 0;
10587 			outrel.r_info = ELF32_R_INFO (indx,
10588 						      R_ARM_TLS_DTPOFF32);
10589 			outrel.r_offset += 4;
10590 
10591 			if (globals->use_rel)
10592 			  bfd_put_32 (output_bfd, outrel.r_addend,
10593 				      sgot->contents + cur_off + 4);
10594 
10595 			elf32_arm_add_dynreloc (output_bfd, info,
10596 						srelgot, &outrel);
10597 		      }
10598 		  }
10599 		else
10600 		  {
10601 		    /* If we are not emitting relocations for a
10602 		       general dynamic reference, then we must be in a
10603 		       static link or an executable link with the
10604 		       symbol binding locally.  Mark it as belonging
10605 		       to module 1, the executable.  */
10606 		    bfd_put_32 (output_bfd, 1,
10607 				sgot->contents + cur_off);
10608 		    bfd_put_32 (output_bfd, value - dtpoff_base (info),
10609 				sgot->contents + cur_off + 4);
10610 		  }
10611 
10612 		cur_off += 8;
10613 	      }
10614 
10615 	    if (tls_type & GOT_TLS_IE)
10616 	      {
10617 		if (need_relocs)
10618 		  {
10619 		    if (indx == 0)
10620 		      outrel.r_addend = value - dtpoff_base (info);
10621 		    else
10622 		      outrel.r_addend = 0;
10623 		    outrel.r_offset = (sgot->output_section->vma
10624 				       + sgot->output_offset
10625 				       + cur_off);
10626 		    outrel.r_info = ELF32_R_INFO (indx, R_ARM_TLS_TPOFF32);
10627 
10628 		    if (globals->use_rel)
10629 		      bfd_put_32 (output_bfd, outrel.r_addend,
10630 				  sgot->contents + cur_off);
10631 
10632 		    elf32_arm_add_dynreloc (output_bfd, info, srelgot, &outrel);
10633 		  }
10634 		else
10635 		  bfd_put_32 (output_bfd, tpoff (info, value),
10636 			      sgot->contents + cur_off);
10637 		cur_off += 4;
10638 	      }
10639 
10640 	    if (h != NULL)
10641 	      h->got.offset |= 1;
10642 	    else
10643 	      local_got_offsets[r_symndx] |= 1;
10644 	  }
10645 
10646 	if ((tls_type & GOT_TLS_GD) && r_type != R_ARM_TLS_GD32)
10647 	  off += 8;
10648 	else if (tls_type & GOT_TLS_GDESC)
10649 	  off = offplt;
10650 
10651 	if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL
10652 	    || ELF32_R_TYPE(rel->r_info) == R_ARM_THM_TLS_CALL)
10653 	  {
10654 	    bfd_signed_vma offset;
10655 	    /* TLS stubs are arm mode.  The original symbol is a
10656 	       data object, so branch_type is bogus.  */
10657 	    branch_type = ST_BRANCH_TO_ARM;
10658 	    enum elf32_arm_stub_type stub_type
10659 	      = arm_type_of_stub (info, input_section, rel,
10660 				  st_type, &branch_type,
10661 				  (struct elf32_arm_link_hash_entry *)h,
10662 				  globals->tls_trampoline, globals->root.splt,
10663 				  input_bfd, sym_name);
10664 
10665 	    if (stub_type != arm_stub_none)
10666 	      {
10667 		struct elf32_arm_stub_hash_entry *stub_entry
10668 		  = elf32_arm_get_stub_entry
10669 		  (input_section, globals->root.splt, 0, rel,
10670 		   globals, stub_type);
10671 		offset = (stub_entry->stub_offset
10672 			  + stub_entry->stub_sec->output_offset
10673 			  + stub_entry->stub_sec->output_section->vma);
10674 	      }
10675 	    else
10676 	      offset = (globals->root.splt->output_section->vma
10677 			+ globals->root.splt->output_offset
10678 			+ globals->tls_trampoline);
10679 
10680 	    if (ELF32_R_TYPE(rel->r_info) == R_ARM_TLS_CALL)
10681 	      {
10682 		unsigned long inst;
10683 
10684 		offset -= (input_section->output_section->vma
10685 			   + input_section->output_offset
10686 			   + rel->r_offset + 8);
10687 
10688 		inst = offset >> 2;
10689 		inst &= 0x00ffffff;
10690 		value = inst | (globals->use_blx ? 0xfa000000 : 0xeb000000);
10691 	      }
10692 	    else
10693 	      {
10694 		/* Thumb blx encodes the offset in a complicated
10695 		   fashion.  */
10696 		unsigned upper_insn, lower_insn;
10697 		unsigned neg;
10698 
10699 		offset -= (input_section->output_section->vma
10700 			   + input_section->output_offset
10701 			   + rel->r_offset + 4);
10702 
10703 		if (stub_type != arm_stub_none
10704 		    && arm_stub_is_thumb (stub_type))
10705 		  {
10706 		    lower_insn = 0xd000;
10707 		  }
10708 		else
10709 		  {
10710 		    lower_insn = 0xc000;
10711 		    /* Round up the offset to a word boundary.  */
10712 		    offset = (offset + 2) & ~2;
10713 		  }
10714 
10715 		neg = offset < 0;
10716 		upper_insn = (0xf000
10717 			      | ((offset >> 12) & 0x3ff)
10718 			      | (neg << 10));
10719 		lower_insn |= (((!((offset >> 23) & 1)) ^ neg) << 13)
10720 			      | (((!((offset >> 22) & 1)) ^ neg) << 11)
10721 			      | ((offset >> 1) & 0x7ff);
10722 		bfd_put_16 (input_bfd, upper_insn, hit_data);
10723 		bfd_put_16 (input_bfd, lower_insn, hit_data + 2);
10724 		return bfd_reloc_ok;
10725 	      }
10726 	  }
10727 	/* These relocations needs special care, as besides the fact
10728 	   they point somewhere in .gotplt, the addend must be
10729 	   adjusted accordingly depending on the type of instruction
10730 	   we refer to.  */
10731 	else if ((r_type == R_ARM_TLS_GOTDESC) && (tls_type & GOT_TLS_GDESC))
10732 	  {
10733 	    unsigned long data, insn;
10734 	    unsigned thumb;
10735 
10736 	    data = bfd_get_32 (input_bfd, hit_data);
10737 	    thumb = data & 1;
10738 	    data &= ~1u;
10739 
10740 	    if (thumb)
10741 	      {
10742 		insn = bfd_get_16 (input_bfd, contents + rel->r_offset - data);
10743 		if ((insn & 0xf000) == 0xf000 || (insn & 0xf800) == 0xe800)
10744 		  insn = (insn << 16)
10745 		    | bfd_get_16 (input_bfd,
10746 				  contents + rel->r_offset - data + 2);
10747 		if ((insn & 0xf800c000) == 0xf000c000)
10748 		  /* bl/blx */
10749 		  value = -6;
10750 		else if ((insn & 0xffffff00) == 0x4400)
10751 		  /* add */
10752 		  value = -5;
10753 		else
10754 		  {
10755 		    (*_bfd_error_handler)
10756 		      (_("%B(%A+0x%lx):unexpected Thumb instruction '0x%x' referenced by TLS_GOTDESC"),
10757 		       input_bfd, input_section,
10758 		       (unsigned long)rel->r_offset, insn);
10759 		    return bfd_reloc_notsupported;
10760 		  }
10761 	      }
10762 	    else
10763 	      {
10764 		insn = bfd_get_32 (input_bfd, contents + rel->r_offset - data);
10765 
10766 		switch (insn >> 24)
10767 		  {
10768 		  case 0xeb:  /* bl */
10769 		  case 0xfa:  /* blx */
10770 		    value = -4;
10771 		    break;
10772 
10773 		  case 0xe0:	/* add */
10774 		    value = -8;
10775 		    break;
10776 
10777 		  default:
10778 		    (*_bfd_error_handler)
10779 		      (_("%B(%A+0x%lx):unexpected ARM instruction '0x%x' referenced by TLS_GOTDESC"),
10780 		       input_bfd, input_section,
10781 		       (unsigned long)rel->r_offset, insn);
10782 		    return bfd_reloc_notsupported;
10783 		  }
10784 	      }
10785 
10786 	    value += ((globals->root.sgotplt->output_section->vma
10787 		       + globals->root.sgotplt->output_offset + off)
10788 		      - (input_section->output_section->vma
10789 			 + input_section->output_offset
10790 			 + rel->r_offset)
10791 		      + globals->sgotplt_jump_table_size);
10792 	  }
10793 	else
10794 	  value = ((globals->root.sgot->output_section->vma
10795 		    + globals->root.sgot->output_offset + off)
10796 		   - (input_section->output_section->vma
10797 		      + input_section->output_offset + rel->r_offset));
10798 
10799 	return _bfd_final_link_relocate (howto, input_bfd, input_section,
10800 					 contents, rel->r_offset, value,
10801 					 rel->r_addend);
10802       }
10803 
10804     case R_ARM_TLS_LE32:
10805       if (bfd_link_dll (info))
10806 	{
10807 	  (*_bfd_error_handler)
10808 	    (_("%B(%A+0x%lx): R_ARM_TLS_LE32 relocation not permitted in shared object"),
10809 	     input_bfd, input_section,
10810 	     (long) rel->r_offset, howto->name);
10811 	  return bfd_reloc_notsupported;
10812 	}
10813       else
10814 	value = tpoff (info, value);
10815 
10816       return _bfd_final_link_relocate (howto, input_bfd, input_section,
10817 				       contents, rel->r_offset, value,
10818 				       rel->r_addend);
10819 
10820     case R_ARM_V4BX:
10821       if (globals->fix_v4bx)
10822 	{
10823 	  bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10824 
10825 	  /* Ensure that we have a BX instruction.  */
10826 	  BFD_ASSERT ((insn & 0x0ffffff0) == 0x012fff10);
10827 
10828 	  if (globals->fix_v4bx == 2 && (insn & 0xf) != 0xf)
10829 	    {
10830 	      /* Branch to veneer.  */
10831 	      bfd_vma glue_addr;
10832 	      glue_addr = elf32_arm_bx_glue (info, insn & 0xf);
10833 	      glue_addr -= input_section->output_section->vma
10834 			   + input_section->output_offset
10835 			   + rel->r_offset + 8;
10836 	      insn = (insn & 0xf0000000) | 0x0a000000
10837 		     | ((glue_addr >> 2) & 0x00ffffff);
10838 	    }
10839 	  else
10840 	    {
10841 	      /* Preserve Rm (lowest four bits) and the condition code
10842 		 (highest four bits). Other bits encode MOV PC,Rm.  */
10843 	      insn = (insn & 0xf000000f) | 0x01a0f000;
10844 	    }
10845 
10846 	  bfd_put_32 (input_bfd, insn, hit_data);
10847 	}
10848       return bfd_reloc_ok;
10849 
10850     case R_ARM_MOVW_ABS_NC:
10851     case R_ARM_MOVT_ABS:
10852     case R_ARM_MOVW_PREL_NC:
10853     case R_ARM_MOVT_PREL:
10854     /* Until we properly support segment-base-relative addressing then
10855        we assume the segment base to be zero, as for the group relocations.
10856        Thus R_ARM_MOVW_BREL_NC has the same semantics as R_ARM_MOVW_ABS_NC
10857        and R_ARM_MOVT_BREL has the same semantics as R_ARM_MOVT_ABS.  */
10858     case R_ARM_MOVW_BREL_NC:
10859     case R_ARM_MOVW_BREL:
10860     case R_ARM_MOVT_BREL:
10861       {
10862 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10863 
10864 	if (globals->use_rel)
10865 	  {
10866 	    addend = ((insn >> 4) & 0xf000) | (insn & 0xfff);
10867 	    signed_addend = (addend ^ 0x8000) - 0x8000;
10868 	  }
10869 
10870 	value += signed_addend;
10871 
10872 	if (r_type == R_ARM_MOVW_PREL_NC || r_type == R_ARM_MOVT_PREL)
10873 	  value -= (input_section->output_section->vma
10874 		    + input_section->output_offset + rel->r_offset);
10875 
10876 	if (r_type == R_ARM_MOVW_BREL && value >= 0x10000)
10877 	  return bfd_reloc_overflow;
10878 
10879 	if (branch_type == ST_BRANCH_TO_THUMB)
10880 	  value |= 1;
10881 
10882 	if (r_type == R_ARM_MOVT_ABS || r_type == R_ARM_MOVT_PREL
10883 	    || r_type == R_ARM_MOVT_BREL)
10884 	  value >>= 16;
10885 
10886 	insn &= 0xfff0f000;
10887 	insn |= value & 0xfff;
10888 	insn |= (value & 0xf000) << 4;
10889 	bfd_put_32 (input_bfd, insn, hit_data);
10890       }
10891       return bfd_reloc_ok;
10892 
10893     case R_ARM_THM_MOVW_ABS_NC:
10894     case R_ARM_THM_MOVT_ABS:
10895     case R_ARM_THM_MOVW_PREL_NC:
10896     case R_ARM_THM_MOVT_PREL:
10897     /* Until we properly support segment-base-relative addressing then
10898        we assume the segment base to be zero, as for the above relocations.
10899        Thus R_ARM_THM_MOVW_BREL_NC has the same semantics as
10900        R_ARM_THM_MOVW_ABS_NC and R_ARM_THM_MOVT_BREL has the same semantics
10901        as R_ARM_THM_MOVT_ABS.  */
10902     case R_ARM_THM_MOVW_BREL_NC:
10903     case R_ARM_THM_MOVW_BREL:
10904     case R_ARM_THM_MOVT_BREL:
10905       {
10906 	bfd_vma insn;
10907 
10908 	insn = bfd_get_16 (input_bfd, hit_data) << 16;
10909 	insn |= bfd_get_16 (input_bfd, hit_data + 2);
10910 
10911 	if (globals->use_rel)
10912 	  {
10913 	    addend = ((insn >> 4)  & 0xf000)
10914 		   | ((insn >> 15) & 0x0800)
10915 		   | ((insn >> 4)  & 0x0700)
10916 		   | (insn         & 0x00ff);
10917 	    signed_addend = (addend ^ 0x8000) - 0x8000;
10918 	  }
10919 
10920 	value += signed_addend;
10921 
10922 	if (r_type == R_ARM_THM_MOVW_PREL_NC || r_type == R_ARM_THM_MOVT_PREL)
10923 	  value -= (input_section->output_section->vma
10924 		    + input_section->output_offset + rel->r_offset);
10925 
10926 	if (r_type == R_ARM_THM_MOVW_BREL && value >= 0x10000)
10927 	  return bfd_reloc_overflow;
10928 
10929 	if (branch_type == ST_BRANCH_TO_THUMB)
10930 	  value |= 1;
10931 
10932 	if (r_type == R_ARM_THM_MOVT_ABS || r_type == R_ARM_THM_MOVT_PREL
10933 	    || r_type == R_ARM_THM_MOVT_BREL)
10934 	  value >>= 16;
10935 
10936 	insn &= 0xfbf08f00;
10937 	insn |= (value & 0xf000) << 4;
10938 	insn |= (value & 0x0800) << 15;
10939 	insn |= (value & 0x0700) << 4;
10940 	insn |= (value & 0x00ff);
10941 
10942 	bfd_put_16 (input_bfd, insn >> 16, hit_data);
10943 	bfd_put_16 (input_bfd, insn & 0xffff, hit_data + 2);
10944       }
10945       return bfd_reloc_ok;
10946 
10947     case R_ARM_ALU_PC_G0_NC:
10948     case R_ARM_ALU_PC_G1_NC:
10949     case R_ARM_ALU_PC_G0:
10950     case R_ARM_ALU_PC_G1:
10951     case R_ARM_ALU_PC_G2:
10952     case R_ARM_ALU_SB_G0_NC:
10953     case R_ARM_ALU_SB_G1_NC:
10954     case R_ARM_ALU_SB_G0:
10955     case R_ARM_ALU_SB_G1:
10956     case R_ARM_ALU_SB_G2:
10957       {
10958 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
10959 	bfd_vma pc = input_section->output_section->vma
10960 		     + input_section->output_offset + rel->r_offset;
10961 	/* sb is the origin of the *segment* containing the symbol.  */
10962 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
10963 	bfd_vma residual;
10964 	bfd_vma g_n;
10965 	bfd_signed_vma signed_value;
10966 	int group = 0;
10967 
10968 	/* Determine which group of bits to select.  */
10969 	switch (r_type)
10970 	  {
10971 	  case R_ARM_ALU_PC_G0_NC:
10972 	  case R_ARM_ALU_PC_G0:
10973 	  case R_ARM_ALU_SB_G0_NC:
10974 	  case R_ARM_ALU_SB_G0:
10975 	    group = 0;
10976 	    break;
10977 
10978 	  case R_ARM_ALU_PC_G1_NC:
10979 	  case R_ARM_ALU_PC_G1:
10980 	  case R_ARM_ALU_SB_G1_NC:
10981 	  case R_ARM_ALU_SB_G1:
10982 	    group = 1;
10983 	    break;
10984 
10985 	  case R_ARM_ALU_PC_G2:
10986 	  case R_ARM_ALU_SB_G2:
10987 	    group = 2;
10988 	    break;
10989 
10990 	  default:
10991 	    abort ();
10992 	  }
10993 
10994 	/* If REL, extract the addend from the insn.  If RELA, it will
10995 	   have already been fetched for us.  */
10996 	if (globals->use_rel)
10997 	  {
10998 	    int negative;
10999 	    bfd_vma constant = insn & 0xff;
11000 	    bfd_vma rotation = (insn & 0xf00) >> 8;
11001 
11002 	    if (rotation == 0)
11003 	      signed_addend = constant;
11004 	    else
11005 	      {
11006 		/* Compensate for the fact that in the instruction, the
11007 		   rotation is stored in multiples of 2 bits.  */
11008 		rotation *= 2;
11009 
11010 		/* Rotate "constant" right by "rotation" bits.  */
11011 		signed_addend = (constant >> rotation) |
11012 				(constant << (8 * sizeof (bfd_vma) - rotation));
11013 	      }
11014 
11015 	    /* Determine if the instruction is an ADD or a SUB.
11016 	       (For REL, this determines the sign of the addend.)  */
11017 	    negative = identify_add_or_sub (insn);
11018 	    if (negative == 0)
11019 	      {
11020 		(*_bfd_error_handler)
11021 		  (_("%B(%A+0x%lx): Only ADD or SUB instructions are allowed for ALU group relocations"),
11022 		  input_bfd, input_section,
11023 		  (long) rel->r_offset, howto->name);
11024 		return bfd_reloc_overflow;
11025 	      }
11026 
11027 	    signed_addend *= negative;
11028 	  }
11029 
11030 	/* Compute the value (X) to go in the place.  */
11031 	if (r_type == R_ARM_ALU_PC_G0_NC
11032 	    || r_type == R_ARM_ALU_PC_G1_NC
11033 	    || r_type == R_ARM_ALU_PC_G0
11034 	    || r_type == R_ARM_ALU_PC_G1
11035 	    || r_type == R_ARM_ALU_PC_G2)
11036 	  /* PC relative.  */
11037 	  signed_value = value - pc + signed_addend;
11038 	else
11039 	  /* Section base relative.  */
11040 	  signed_value = value - sb + signed_addend;
11041 
11042 	/* If the target symbol is a Thumb function, then set the
11043 	   Thumb bit in the address.  */
11044 	if (branch_type == ST_BRANCH_TO_THUMB)
11045 	  signed_value |= 1;
11046 
11047 	/* Calculate the value of the relevant G_n, in encoded
11048 	   constant-with-rotation format.  */
11049 	g_n = calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11050 					  group, &residual);
11051 
11052 	/* Check for overflow if required.  */
11053 	if ((r_type == R_ARM_ALU_PC_G0
11054 	     || r_type == R_ARM_ALU_PC_G1
11055 	     || r_type == R_ARM_ALU_PC_G2
11056 	     || r_type == R_ARM_ALU_SB_G0
11057 	     || r_type == R_ARM_ALU_SB_G1
11058 	     || r_type == R_ARM_ALU_SB_G2) && residual != 0)
11059 	  {
11060 	    (*_bfd_error_handler)
11061 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11062 	      input_bfd, input_section,
11063 	       (long) rel->r_offset, signed_value < 0 ? - signed_value : signed_value,
11064 	       howto->name);
11065 	    return bfd_reloc_overflow;
11066 	  }
11067 
11068 	/* Mask out the value and the ADD/SUB part of the opcode; take care
11069 	   not to destroy the S bit.  */
11070 	insn &= 0xff1ff000;
11071 
11072 	/* Set the opcode according to whether the value to go in the
11073 	   place is negative.  */
11074 	if (signed_value < 0)
11075 	  insn |= 1 << 22;
11076 	else
11077 	  insn |= 1 << 23;
11078 
11079 	/* Encode the offset.  */
11080 	insn |= g_n;
11081 
11082 	bfd_put_32 (input_bfd, insn, hit_data);
11083       }
11084       return bfd_reloc_ok;
11085 
11086     case R_ARM_LDR_PC_G0:
11087     case R_ARM_LDR_PC_G1:
11088     case R_ARM_LDR_PC_G2:
11089     case R_ARM_LDR_SB_G0:
11090     case R_ARM_LDR_SB_G1:
11091     case R_ARM_LDR_SB_G2:
11092       {
11093 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11094 	bfd_vma pc = input_section->output_section->vma
11095 		     + input_section->output_offset + rel->r_offset;
11096 	/* sb is the origin of the *segment* containing the symbol.  */
11097 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11098 	bfd_vma residual;
11099 	bfd_signed_vma signed_value;
11100 	int group = 0;
11101 
11102 	/* Determine which groups of bits to calculate.  */
11103 	switch (r_type)
11104 	  {
11105 	  case R_ARM_LDR_PC_G0:
11106 	  case R_ARM_LDR_SB_G0:
11107 	    group = 0;
11108 	    break;
11109 
11110 	  case R_ARM_LDR_PC_G1:
11111 	  case R_ARM_LDR_SB_G1:
11112 	    group = 1;
11113 	    break;
11114 
11115 	  case R_ARM_LDR_PC_G2:
11116 	  case R_ARM_LDR_SB_G2:
11117 	    group = 2;
11118 	    break;
11119 
11120 	  default:
11121 	    abort ();
11122 	  }
11123 
11124 	/* If REL, extract the addend from the insn.  If RELA, it will
11125 	   have already been fetched for us.  */
11126 	if (globals->use_rel)
11127 	  {
11128 	    int negative = (insn & (1 << 23)) ? 1 : -1;
11129 	    signed_addend = negative * (insn & 0xfff);
11130 	  }
11131 
11132 	/* Compute the value (X) to go in the place.  */
11133 	if (r_type == R_ARM_LDR_PC_G0
11134 	    || r_type == R_ARM_LDR_PC_G1
11135 	    || r_type == R_ARM_LDR_PC_G2)
11136 	  /* PC relative.  */
11137 	  signed_value = value - pc + signed_addend;
11138 	else
11139 	  /* Section base relative.  */
11140 	  signed_value = value - sb + signed_addend;
11141 
11142 	/* Calculate the value of the relevant G_{n-1} to obtain
11143 	   the residual at that stage.  */
11144 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11145 				    group - 1, &residual);
11146 
11147 	/* Check for overflow.  */
11148 	if (residual >= 0x1000)
11149 	  {
11150 	    (*_bfd_error_handler)
11151 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11152 	       input_bfd, input_section,
11153 	       (long) rel->r_offset, labs (signed_value), howto->name);
11154 	    return bfd_reloc_overflow;
11155 	  }
11156 
11157 	/* Mask out the value and U bit.  */
11158 	insn &= 0xff7ff000;
11159 
11160 	/* Set the U bit if the value to go in the place is non-negative.  */
11161 	if (signed_value >= 0)
11162 	  insn |= 1 << 23;
11163 
11164 	/* Encode the offset.  */
11165 	insn |= residual;
11166 
11167 	bfd_put_32 (input_bfd, insn, hit_data);
11168       }
11169       return bfd_reloc_ok;
11170 
11171     case R_ARM_LDRS_PC_G0:
11172     case R_ARM_LDRS_PC_G1:
11173     case R_ARM_LDRS_PC_G2:
11174     case R_ARM_LDRS_SB_G0:
11175     case R_ARM_LDRS_SB_G1:
11176     case R_ARM_LDRS_SB_G2:
11177       {
11178 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11179 	bfd_vma pc = input_section->output_section->vma
11180 		     + input_section->output_offset + rel->r_offset;
11181 	/* sb is the origin of the *segment* containing the symbol.  */
11182 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11183 	bfd_vma residual;
11184 	bfd_signed_vma signed_value;
11185 	int group = 0;
11186 
11187 	/* Determine which groups of bits to calculate.  */
11188 	switch (r_type)
11189 	  {
11190 	  case R_ARM_LDRS_PC_G0:
11191 	  case R_ARM_LDRS_SB_G0:
11192 	    group = 0;
11193 	    break;
11194 
11195 	  case R_ARM_LDRS_PC_G1:
11196 	  case R_ARM_LDRS_SB_G1:
11197 	    group = 1;
11198 	    break;
11199 
11200 	  case R_ARM_LDRS_PC_G2:
11201 	  case R_ARM_LDRS_SB_G2:
11202 	    group = 2;
11203 	    break;
11204 
11205 	  default:
11206 	    abort ();
11207 	  }
11208 
11209 	/* If REL, extract the addend from the insn.  If RELA, it will
11210 	   have already been fetched for us.  */
11211 	if (globals->use_rel)
11212 	  {
11213 	    int negative = (insn & (1 << 23)) ? 1 : -1;
11214 	    signed_addend = negative * (((insn & 0xf00) >> 4) + (insn & 0xf));
11215 	  }
11216 
11217 	/* Compute the value (X) to go in the place.  */
11218 	if (r_type == R_ARM_LDRS_PC_G0
11219 	    || r_type == R_ARM_LDRS_PC_G1
11220 	    || r_type == R_ARM_LDRS_PC_G2)
11221 	  /* PC relative.  */
11222 	  signed_value = value - pc + signed_addend;
11223 	else
11224 	  /* Section base relative.  */
11225 	  signed_value = value - sb + signed_addend;
11226 
11227 	/* Calculate the value of the relevant G_{n-1} to obtain
11228 	   the residual at that stage.  */
11229 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11230 				    group - 1, &residual);
11231 
11232 	/* Check for overflow.  */
11233 	if (residual >= 0x100)
11234 	  {
11235 	    (*_bfd_error_handler)
11236 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11237 	       input_bfd, input_section,
11238 	       (long) rel->r_offset, labs (signed_value), howto->name);
11239 	    return bfd_reloc_overflow;
11240 	  }
11241 
11242 	/* Mask out the value and U bit.  */
11243 	insn &= 0xff7ff0f0;
11244 
11245 	/* Set the U bit if the value to go in the place is non-negative.  */
11246 	if (signed_value >= 0)
11247 	  insn |= 1 << 23;
11248 
11249 	/* Encode the offset.  */
11250 	insn |= ((residual & 0xf0) << 4) | (residual & 0xf);
11251 
11252 	bfd_put_32 (input_bfd, insn, hit_data);
11253       }
11254       return bfd_reloc_ok;
11255 
11256     case R_ARM_LDC_PC_G0:
11257     case R_ARM_LDC_PC_G1:
11258     case R_ARM_LDC_PC_G2:
11259     case R_ARM_LDC_SB_G0:
11260     case R_ARM_LDC_SB_G1:
11261     case R_ARM_LDC_SB_G2:
11262       {
11263 	bfd_vma insn = bfd_get_32 (input_bfd, hit_data);
11264 	bfd_vma pc = input_section->output_section->vma
11265 		     + input_section->output_offset + rel->r_offset;
11266 	/* sb is the origin of the *segment* containing the symbol.  */
11267 	bfd_vma sb = sym_sec ? sym_sec->output_section->vma : 0;
11268 	bfd_vma residual;
11269 	bfd_signed_vma signed_value;
11270 	int group = 0;
11271 
11272 	/* Determine which groups of bits to calculate.  */
11273 	switch (r_type)
11274 	  {
11275 	  case R_ARM_LDC_PC_G0:
11276 	  case R_ARM_LDC_SB_G0:
11277 	    group = 0;
11278 	    break;
11279 
11280 	  case R_ARM_LDC_PC_G1:
11281 	  case R_ARM_LDC_SB_G1:
11282 	    group = 1;
11283 	    break;
11284 
11285 	  case R_ARM_LDC_PC_G2:
11286 	  case R_ARM_LDC_SB_G2:
11287 	    group = 2;
11288 	    break;
11289 
11290 	  default:
11291 	    abort ();
11292 	  }
11293 
11294 	/* If REL, extract the addend from the insn.  If RELA, it will
11295 	   have already been fetched for us.  */
11296 	if (globals->use_rel)
11297 	  {
11298 	    int negative = (insn & (1 << 23)) ? 1 : -1;
11299 	    signed_addend = negative * ((insn & 0xff) << 2);
11300 	  }
11301 
11302 	/* Compute the value (X) to go in the place.  */
11303 	if (r_type == R_ARM_LDC_PC_G0
11304 	    || r_type == R_ARM_LDC_PC_G1
11305 	    || r_type == R_ARM_LDC_PC_G2)
11306 	  /* PC relative.  */
11307 	  signed_value = value - pc + signed_addend;
11308 	else
11309 	  /* Section base relative.  */
11310 	  signed_value = value - sb + signed_addend;
11311 
11312 	/* Calculate the value of the relevant G_{n-1} to obtain
11313 	   the residual at that stage.  */
11314 	calculate_group_reloc_mask (signed_value < 0 ? - signed_value : signed_value,
11315 				    group - 1, &residual);
11316 
11317 	/* Check for overflow.  (The absolute value to go in the place must be
11318 	   divisible by four and, after having been divided by four, must
11319 	   fit in eight bits.)  */
11320 	if ((residual & 0x3) != 0 || residual >= 0x400)
11321 	  {
11322 	    (*_bfd_error_handler)
11323 	      (_("%B(%A+0x%lx): Overflow whilst splitting 0x%lx for group relocation %s"),
11324 	      input_bfd, input_section,
11325 	      (long) rel->r_offset, labs (signed_value), howto->name);
11326 	    return bfd_reloc_overflow;
11327 	  }
11328 
11329 	/* Mask out the value and U bit.  */
11330 	insn &= 0xff7fff00;
11331 
11332 	/* Set the U bit if the value to go in the place is non-negative.  */
11333 	if (signed_value >= 0)
11334 	  insn |= 1 << 23;
11335 
11336 	/* Encode the offset.  */
11337 	insn |= residual >> 2;
11338 
11339 	bfd_put_32 (input_bfd, insn, hit_data);
11340       }
11341       return bfd_reloc_ok;
11342 
11343     case R_ARM_THM_ALU_ABS_G0_NC:
11344     case R_ARM_THM_ALU_ABS_G1_NC:
11345     case R_ARM_THM_ALU_ABS_G2_NC:
11346     case R_ARM_THM_ALU_ABS_G3_NC:
11347 	{
11348 	    const int shift_array[4] = {0, 8, 16, 24};
11349 	    bfd_vma insn = bfd_get_16 (input_bfd, hit_data);
11350 	    bfd_vma addr = value;
11351 	    int shift = shift_array[r_type - R_ARM_THM_ALU_ABS_G0_NC];
11352 
11353 	    /* Compute address.  */
11354 	    if (globals->use_rel)
11355 		signed_addend = insn & 0xff;
11356 	    addr += signed_addend;
11357 	    if (branch_type == ST_BRANCH_TO_THUMB)
11358 		addr |= 1;
11359 	    /* Clean imm8 insn.  */
11360 	    insn &= 0xff00;
11361 	    /* And update with correct part of address.  */
11362 	    insn |= (addr >> shift) & 0xff;
11363 	    /* Update insn.  */
11364 	    bfd_put_16 (input_bfd, insn, hit_data);
11365 	}
11366 
11367 	*unresolved_reloc_p = FALSE;
11368 	return bfd_reloc_ok;
11369 
11370     default:
11371       return bfd_reloc_notsupported;
11372     }
11373 }
11374 
11375 /* Add INCREMENT to the reloc (of type HOWTO) at ADDRESS.  */
11376 static void
11377 arm_add_to_rel (bfd *              abfd,
11378 		bfd_byte *         address,
11379 		reloc_howto_type * howto,
11380 		bfd_signed_vma     increment)
11381 {
11382   bfd_signed_vma addend;
11383 
11384   if (howto->type == R_ARM_THM_CALL
11385       || howto->type == R_ARM_THM_JUMP24)
11386     {
11387       int upper_insn, lower_insn;
11388       int upper, lower;
11389 
11390       upper_insn = bfd_get_16 (abfd, address);
11391       lower_insn = bfd_get_16 (abfd, address + 2);
11392       upper = upper_insn & 0x7ff;
11393       lower = lower_insn & 0x7ff;
11394 
11395       addend = (upper << 12) | (lower << 1);
11396       addend += increment;
11397       addend >>= 1;
11398 
11399       upper_insn = (upper_insn & 0xf800) | ((addend >> 11) & 0x7ff);
11400       lower_insn = (lower_insn & 0xf800) | (addend & 0x7ff);
11401 
11402       bfd_put_16 (abfd, (bfd_vma) upper_insn, address);
11403       bfd_put_16 (abfd, (bfd_vma) lower_insn, address + 2);
11404     }
11405   else
11406     {
11407       bfd_vma        contents;
11408 
11409       contents = bfd_get_32 (abfd, address);
11410 
11411       /* Get the (signed) value from the instruction.  */
11412       addend = contents & howto->src_mask;
11413       if (addend & ((howto->src_mask + 1) >> 1))
11414 	{
11415 	  bfd_signed_vma mask;
11416 
11417 	  mask = -1;
11418 	  mask &= ~ howto->src_mask;
11419 	  addend |= mask;
11420 	}
11421 
11422       /* Add in the increment, (which is a byte value).  */
11423       switch (howto->type)
11424 	{
11425 	default:
11426 	  addend += increment;
11427 	  break;
11428 
11429 	case R_ARM_PC24:
11430 	case R_ARM_PLT32:
11431 	case R_ARM_CALL:
11432 	case R_ARM_JUMP24:
11433 	  addend <<= howto->size;
11434 	  addend += increment;
11435 
11436 	  /* Should we check for overflow here ?  */
11437 
11438 	  /* Drop any undesired bits.  */
11439 	  addend >>= howto->rightshift;
11440 	  break;
11441 	}
11442 
11443       contents = (contents & ~ howto->dst_mask) | (addend & howto->dst_mask);
11444 
11445       bfd_put_32 (abfd, contents, address);
11446     }
11447 }
11448 
11449 #define IS_ARM_TLS_RELOC(R_TYPE)	\
11450   ((R_TYPE) == R_ARM_TLS_GD32		\
11451    || (R_TYPE) == R_ARM_TLS_LDO32	\
11452    || (R_TYPE) == R_ARM_TLS_LDM32	\
11453    || (R_TYPE) == R_ARM_TLS_DTPOFF32	\
11454    || (R_TYPE) == R_ARM_TLS_DTPMOD32	\
11455    || (R_TYPE) == R_ARM_TLS_TPOFF32	\
11456    || (R_TYPE) == R_ARM_TLS_LE32	\
11457    || (R_TYPE) == R_ARM_TLS_IE32	\
11458    || IS_ARM_TLS_GNU_RELOC (R_TYPE))
11459 
11460 /* Specific set of relocations for the gnu tls dialect.  */
11461 #define IS_ARM_TLS_GNU_RELOC(R_TYPE)	\
11462   ((R_TYPE) == R_ARM_TLS_GOTDESC	\
11463    || (R_TYPE) == R_ARM_TLS_CALL	\
11464    || (R_TYPE) == R_ARM_THM_TLS_CALL	\
11465    || (R_TYPE) == R_ARM_TLS_DESCSEQ	\
11466    || (R_TYPE) == R_ARM_THM_TLS_DESCSEQ)
11467 
11468 /* Relocate an ARM ELF section.  */
11469 
11470 static bfd_boolean
11471 elf32_arm_relocate_section (bfd *                  output_bfd,
11472 			    struct bfd_link_info * info,
11473 			    bfd *                  input_bfd,
11474 			    asection *             input_section,
11475 			    bfd_byte *             contents,
11476 			    Elf_Internal_Rela *    relocs,
11477 			    Elf_Internal_Sym *     local_syms,
11478 			    asection **            local_sections)
11479 {
11480   Elf_Internal_Shdr *symtab_hdr;
11481   struct elf_link_hash_entry **sym_hashes;
11482   Elf_Internal_Rela *rel;
11483   Elf_Internal_Rela *relend;
11484   const char *name;
11485   struct elf32_arm_link_hash_table * globals;
11486 
11487   globals = elf32_arm_hash_table (info);
11488   if (globals == NULL)
11489     return FALSE;
11490 
11491   symtab_hdr = & elf_symtab_hdr (input_bfd);
11492   sym_hashes = elf_sym_hashes (input_bfd);
11493 
11494   rel = relocs;
11495   relend = relocs + input_section->reloc_count;
11496   for (; rel < relend; rel++)
11497     {
11498       int                          r_type;
11499       reloc_howto_type *           howto;
11500       unsigned long                r_symndx;
11501       Elf_Internal_Sym *           sym;
11502       asection *                   sec;
11503       struct elf_link_hash_entry * h;
11504       bfd_vma                      relocation;
11505       bfd_reloc_status_type        r;
11506       arelent                      bfd_reloc;
11507       char                         sym_type;
11508       bfd_boolean                  unresolved_reloc = FALSE;
11509       char *error_message = NULL;
11510 
11511       r_symndx = ELF32_R_SYM (rel->r_info);
11512       r_type   = ELF32_R_TYPE (rel->r_info);
11513       r_type   = arm_real_reloc_type (globals, r_type);
11514 
11515       if (   r_type == R_ARM_GNU_VTENTRY
11516 	  || r_type == R_ARM_GNU_VTINHERIT)
11517 	continue;
11518 
11519       bfd_reloc.howto = elf32_arm_howto_from_type (r_type);
11520       howto = bfd_reloc.howto;
11521 
11522       h = NULL;
11523       sym = NULL;
11524       sec = NULL;
11525 
11526       if (r_symndx < symtab_hdr->sh_info)
11527 	{
11528 	  sym = local_syms + r_symndx;
11529 	  sym_type = ELF32_ST_TYPE (sym->st_info);
11530 	  sec = local_sections[r_symndx];
11531 
11532 	  /* An object file might have a reference to a local
11533 	     undefined symbol.  This is a daft object file, but we
11534 	     should at least do something about it.  V4BX & NONE
11535 	     relocations do not use the symbol and are explicitly
11536 	     allowed to use the undefined symbol, so allow those.
11537 	     Likewise for relocations against STN_UNDEF.  */
11538 	  if (r_type != R_ARM_V4BX
11539 	      && r_type != R_ARM_NONE
11540 	      && r_symndx != STN_UNDEF
11541 	      && bfd_is_und_section (sec)
11542 	      && ELF_ST_BIND (sym->st_info) != STB_WEAK)
11543 	    (*info->callbacks->undefined_symbol)
11544 	      (info, bfd_elf_string_from_elf_section
11545 	       (input_bfd, symtab_hdr->sh_link, sym->st_name),
11546 	       input_bfd, input_section,
11547 	       rel->r_offset, TRUE);
11548 
11549 	  if (globals->use_rel)
11550 	    {
11551 	      relocation = (sec->output_section->vma
11552 			    + sec->output_offset
11553 			    + sym->st_value);
11554 	      if (!bfd_link_relocatable (info)
11555 		  && (sec->flags & SEC_MERGE)
11556 		  && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11557 		{
11558 		  asection *msec;
11559 		  bfd_vma addend, value;
11560 
11561 		  switch (r_type)
11562 		    {
11563 		    case R_ARM_MOVW_ABS_NC:
11564 		    case R_ARM_MOVT_ABS:
11565 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11566 		      addend = ((value & 0xf0000) >> 4) | (value & 0xfff);
11567 		      addend = (addend ^ 0x8000) - 0x8000;
11568 		      break;
11569 
11570 		    case R_ARM_THM_MOVW_ABS_NC:
11571 		    case R_ARM_THM_MOVT_ABS:
11572 		      value = bfd_get_16 (input_bfd, contents + rel->r_offset)
11573 			      << 16;
11574 		      value |= bfd_get_16 (input_bfd,
11575 					   contents + rel->r_offset + 2);
11576 		      addend = ((value & 0xf7000) >> 4) | (value & 0xff)
11577 			       | ((value & 0x04000000) >> 15);
11578 		      addend = (addend ^ 0x8000) - 0x8000;
11579 		      break;
11580 
11581 		    default:
11582 		      if (howto->rightshift
11583 			  || (howto->src_mask & (howto->src_mask + 1)))
11584 			{
11585 			  (*_bfd_error_handler)
11586 			    (_("%B(%A+0x%lx): %s relocation against SEC_MERGE section"),
11587 			     input_bfd, input_section,
11588 			     (long) rel->r_offset, howto->name);
11589 			  return FALSE;
11590 			}
11591 
11592 		      value = bfd_get_32 (input_bfd, contents + rel->r_offset);
11593 
11594 		      /* Get the (signed) value from the instruction.  */
11595 		      addend = value & howto->src_mask;
11596 		      if (addend & ((howto->src_mask + 1) >> 1))
11597 			{
11598 			  bfd_signed_vma mask;
11599 
11600 			  mask = -1;
11601 			  mask &= ~ howto->src_mask;
11602 			  addend |= mask;
11603 			}
11604 		      break;
11605 		    }
11606 
11607 		  msec = sec;
11608 		  addend =
11609 		    _bfd_elf_rel_local_sym (output_bfd, sym, &msec, addend)
11610 		    - relocation;
11611 		  addend += msec->output_section->vma + msec->output_offset;
11612 
11613 		  /* Cases here must match those in the preceding
11614 		     switch statement.  */
11615 		  switch (r_type)
11616 		    {
11617 		    case R_ARM_MOVW_ABS_NC:
11618 		    case R_ARM_MOVT_ABS:
11619 		      value = (value & 0xfff0f000) | ((addend & 0xf000) << 4)
11620 			      | (addend & 0xfff);
11621 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11622 		      break;
11623 
11624 		    case R_ARM_THM_MOVW_ABS_NC:
11625 		    case R_ARM_THM_MOVT_ABS:
11626 		      value = (value & 0xfbf08f00) | ((addend & 0xf700) << 4)
11627 			      | (addend & 0xff) | ((addend & 0x0800) << 15);
11628 		      bfd_put_16 (input_bfd, value >> 16,
11629 				  contents + rel->r_offset);
11630 		      bfd_put_16 (input_bfd, value,
11631 				  contents + rel->r_offset + 2);
11632 		      break;
11633 
11634 		    default:
11635 		      value = (value & ~ howto->dst_mask)
11636 			      | (addend & howto->dst_mask);
11637 		      bfd_put_32 (input_bfd, value, contents + rel->r_offset);
11638 		      break;
11639 		    }
11640 		}
11641 	    }
11642 	  else
11643 	    relocation = _bfd_elf_rela_local_sym (output_bfd, sym, &sec, rel);
11644 	}
11645       else
11646 	{
11647 	  bfd_boolean warned, ignored;
11648 
11649 	  RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
11650 				   r_symndx, symtab_hdr, sym_hashes,
11651 				   h, sec, relocation,
11652 				   unresolved_reloc, warned, ignored);
11653 
11654 	  sym_type = h->type;
11655 	}
11656 
11657       if (sec != NULL && discarded_section (sec))
11658 	RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section,
11659 					 rel, 1, relend, howto, 0, contents);
11660 
11661       if (bfd_link_relocatable (info))
11662 	{
11663 	  /* This is a relocatable link.  We don't have to change
11664 	     anything, unless the reloc is against a section symbol,
11665 	     in which case we have to adjust according to where the
11666 	     section symbol winds up in the output section.  */
11667 	  if (sym != NULL && ELF_ST_TYPE (sym->st_info) == STT_SECTION)
11668 	    {
11669 	      if (globals->use_rel)
11670 		arm_add_to_rel (input_bfd, contents + rel->r_offset,
11671 				howto, (bfd_signed_vma) sec->output_offset);
11672 	      else
11673 		rel->r_addend += sec->output_offset;
11674 	    }
11675 	  continue;
11676 	}
11677 
11678       if (h != NULL)
11679 	name = h->root.root.string;
11680       else
11681 	{
11682 	  name = (bfd_elf_string_from_elf_section
11683 		  (input_bfd, symtab_hdr->sh_link, sym->st_name));
11684 	  if (name == NULL || *name == '\0')
11685 	    name = bfd_section_name (input_bfd, sec);
11686 	}
11687 
11688       if (r_symndx != STN_UNDEF
11689 	  && r_type != R_ARM_NONE
11690 	  && (h == NULL
11691 	      || h->root.type == bfd_link_hash_defined
11692 	      || h->root.type == bfd_link_hash_defweak)
11693 	  && IS_ARM_TLS_RELOC (r_type) != (sym_type == STT_TLS))
11694 	{
11695 	  (*_bfd_error_handler)
11696 	    ((sym_type == STT_TLS
11697 	      ? _("%B(%A+0x%lx): %s used with TLS symbol %s")
11698 	      : _("%B(%A+0x%lx): %s used with non-TLS symbol %s")),
11699 	     input_bfd,
11700 	     input_section,
11701 	     (long) rel->r_offset,
11702 	     howto->name,
11703 	     name);
11704 	}
11705 
11706       /* We call elf32_arm_final_link_relocate unless we're completely
11707 	 done, i.e., the relaxation produced the final output we want,
11708 	 and we won't let anybody mess with it. Also, we have to do
11709 	 addend adjustments in case of a R_ARM_TLS_GOTDESC relocation
11710 	 both in relaxed and non-relaxed cases.  */
11711       if ((elf32_arm_tls_transition (info, r_type, h) != (unsigned)r_type)
11712 	  || (IS_ARM_TLS_GNU_RELOC (r_type)
11713 	      && !((h ? elf32_arm_hash_entry (h)->tls_type :
11714 		    elf32_arm_local_got_tls_type (input_bfd)[r_symndx])
11715 		   & GOT_TLS_GDESC)))
11716 	{
11717 	  r = elf32_arm_tls_relax (globals, input_bfd, input_section,
11718 				   contents, rel, h == NULL);
11719 	  /* This may have been marked unresolved because it came from
11720 	     a shared library.  But we've just dealt with that.  */
11721 	  unresolved_reloc = 0;
11722 	}
11723       else
11724 	r = bfd_reloc_continue;
11725 
11726       if (r == bfd_reloc_continue)
11727 	{
11728 	  unsigned char branch_type =
11729 	    h ? ARM_GET_SYM_BRANCH_TYPE (h->target_internal)
11730 	      : ARM_GET_SYM_BRANCH_TYPE (sym->st_target_internal);
11731 
11732 	  r = elf32_arm_final_link_relocate (howto, input_bfd, output_bfd,
11733 					     input_section, contents, rel,
11734 					     relocation, info, sec, name,
11735 					     sym_type, branch_type, h,
11736 					     &unresolved_reloc,
11737 					     &error_message);
11738 	}
11739 
11740       /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
11741 	 because such sections are not SEC_ALLOC and thus ld.so will
11742 	 not process them.  */
11743       if (unresolved_reloc
11744 	  && !((input_section->flags & SEC_DEBUGGING) != 0
11745 	       && h->def_dynamic)
11746 	  && _bfd_elf_section_offset (output_bfd, info, input_section,
11747 				      rel->r_offset) != (bfd_vma) -1)
11748 	{
11749 	  (*_bfd_error_handler)
11750 	    (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"),
11751 	     input_bfd,
11752 	     input_section,
11753 	     (long) rel->r_offset,
11754 	     howto->name,
11755 	     h->root.root.string);
11756 	  return FALSE;
11757 	}
11758 
11759       if (r != bfd_reloc_ok)
11760 	{
11761 	  switch (r)
11762 	    {
11763 	    case bfd_reloc_overflow:
11764 	      /* If the overflowing reloc was to an undefined symbol,
11765 		 we have already printed one error message and there
11766 		 is no point complaining again.  */
11767 	      if (!h || h->root.type != bfd_link_hash_undefined)
11768 		(*info->callbacks->reloc_overflow)
11769 		  (info, (h ? &h->root : NULL), name, howto->name,
11770 		   (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
11771 	      break;
11772 
11773 	    case bfd_reloc_undefined:
11774 	      (*info->callbacks->undefined_symbol)
11775 		(info, name, input_bfd, input_section, rel->r_offset, TRUE);
11776 	      break;
11777 
11778 	    case bfd_reloc_outofrange:
11779 	      error_message = _("out of range");
11780 	      goto common_error;
11781 
11782 	    case bfd_reloc_notsupported:
11783 	      error_message = _("unsupported relocation");
11784 	      goto common_error;
11785 
11786 	    case bfd_reloc_dangerous:
11787 	      /* error_message should already be set.  */
11788 	      goto common_error;
11789 
11790 	    default:
11791 	      error_message = _("unknown error");
11792 	      /* Fall through.  */
11793 
11794 	    common_error:
11795 	      BFD_ASSERT (error_message != NULL);
11796 	      (*info->callbacks->reloc_dangerous)
11797 		(info, error_message, input_bfd, input_section, rel->r_offset);
11798 	      break;
11799 	    }
11800 	}
11801     }
11802 
11803   return TRUE;
11804 }
11805 
11806 /* Add a new unwind edit to the list described by HEAD, TAIL.  If TINDEX is zero,
11807    adds the edit to the start of the list.  (The list must be built in order of
11808    ascending TINDEX: the function's callers are primarily responsible for
11809    maintaining that condition).  */
11810 
11811 static void
11812 add_unwind_table_edit (arm_unwind_table_edit **head,
11813 		       arm_unwind_table_edit **tail,
11814 		       arm_unwind_edit_type type,
11815 		       asection *linked_section,
11816 		       unsigned int tindex)
11817 {
11818   arm_unwind_table_edit *new_edit = (arm_unwind_table_edit *)
11819       xmalloc (sizeof (arm_unwind_table_edit));
11820 
11821   new_edit->type = type;
11822   new_edit->linked_section = linked_section;
11823   new_edit->index = tindex;
11824 
11825   if (tindex > 0)
11826     {
11827       new_edit->next = NULL;
11828 
11829       if (*tail)
11830 	(*tail)->next = new_edit;
11831 
11832       (*tail) = new_edit;
11833 
11834       if (!*head)
11835 	(*head) = new_edit;
11836     }
11837   else
11838     {
11839       new_edit->next = *head;
11840 
11841       if (!*tail)
11842 	*tail = new_edit;
11843 
11844       *head = new_edit;
11845     }
11846 }
11847 
11848 static _arm_elf_section_data *get_arm_elf_section_data (asection *);
11849 
11850 /* Increase the size of EXIDX_SEC by ADJUST bytes.  ADJUST mau be negative.  */
11851 static void
11852 adjust_exidx_size(asection *exidx_sec, int adjust)
11853 {
11854   asection *out_sec;
11855 
11856   if (!exidx_sec->rawsize)
11857     exidx_sec->rawsize = exidx_sec->size;
11858 
11859   bfd_set_section_size (exidx_sec->owner, exidx_sec, exidx_sec->size + adjust);
11860   out_sec = exidx_sec->output_section;
11861   /* Adjust size of output section.  */
11862   bfd_set_section_size (out_sec->owner, out_sec, out_sec->size +adjust);
11863 }
11864 
11865 /* Insert an EXIDX_CANTUNWIND marker at the end of a section.  */
11866 static void
11867 insert_cantunwind_after(asection *text_sec, asection *exidx_sec)
11868 {
11869   struct _arm_elf_section_data *exidx_arm_data;
11870 
11871   exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11872   add_unwind_table_edit (
11873     &exidx_arm_data->u.exidx.unwind_edit_list,
11874     &exidx_arm_data->u.exidx.unwind_edit_tail,
11875     INSERT_EXIDX_CANTUNWIND_AT_END, text_sec, UINT_MAX);
11876 
11877   exidx_arm_data->additional_reloc_count++;
11878 
11879   adjust_exidx_size(exidx_sec, 8);
11880 }
11881 
11882 /* Scan .ARM.exidx tables, and create a list describing edits which should be
11883    made to those tables, such that:
11884 
11885      1. Regions without unwind data are marked with EXIDX_CANTUNWIND entries.
11886      2. Duplicate entries are merged together (EXIDX_CANTUNWIND, or unwind
11887 	codes which have been inlined into the index).
11888 
11889    If MERGE_EXIDX_ENTRIES is false, duplicate entries are not merged.
11890 
11891    The edits are applied when the tables are written
11892    (in elf32_arm_write_section).  */
11893 
11894 bfd_boolean
11895 elf32_arm_fix_exidx_coverage (asection **text_section_order,
11896 			      unsigned int num_text_sections,
11897 			      struct bfd_link_info *info,
11898 			      bfd_boolean merge_exidx_entries)
11899 {
11900   bfd *inp;
11901   unsigned int last_second_word = 0, i;
11902   asection *last_exidx_sec = NULL;
11903   asection *last_text_sec = NULL;
11904   int last_unwind_type = -1;
11905 
11906   /* Walk over all EXIDX sections, and create backlinks from the corrsponding
11907      text sections.  */
11908   for (inp = info->input_bfds; inp != NULL; inp = inp->link.next)
11909     {
11910       asection *sec;
11911 
11912       for (sec = inp->sections; sec != NULL; sec = sec->next)
11913 	{
11914 	  struct bfd_elf_section_data *elf_sec = elf_section_data (sec);
11915 	  Elf_Internal_Shdr *hdr = &elf_sec->this_hdr;
11916 
11917 	  if (!hdr || hdr->sh_type != SHT_ARM_EXIDX)
11918 	    continue;
11919 
11920 	  if (elf_sec->linked_to)
11921 	    {
11922 	      Elf_Internal_Shdr *linked_hdr
11923 		= &elf_section_data (elf_sec->linked_to)->this_hdr;
11924 	      struct _arm_elf_section_data *linked_sec_arm_data
11925 		= get_arm_elf_section_data (linked_hdr->bfd_section);
11926 
11927 	      if (linked_sec_arm_data == NULL)
11928 		continue;
11929 
11930 	      /* Link this .ARM.exidx section back from the text section it
11931 		 describes.  */
11932 	      linked_sec_arm_data->u.text.arm_exidx_sec = sec;
11933 	    }
11934 	}
11935     }
11936 
11937   /* Walk all text sections in order of increasing VMA.  Eilminate duplicate
11938      index table entries (EXIDX_CANTUNWIND and inlined unwind opcodes),
11939      and add EXIDX_CANTUNWIND entries for sections with no unwind table data.  */
11940 
11941   for (i = 0; i < num_text_sections; i++)
11942     {
11943       asection *sec = text_section_order[i];
11944       asection *exidx_sec;
11945       struct _arm_elf_section_data *arm_data = get_arm_elf_section_data (sec);
11946       struct _arm_elf_section_data *exidx_arm_data;
11947       bfd_byte *contents = NULL;
11948       int deleted_exidx_bytes = 0;
11949       bfd_vma j;
11950       arm_unwind_table_edit *unwind_edit_head = NULL;
11951       arm_unwind_table_edit *unwind_edit_tail = NULL;
11952       Elf_Internal_Shdr *hdr;
11953       bfd *ibfd;
11954 
11955       if (arm_data == NULL)
11956 	continue;
11957 
11958       exidx_sec = arm_data->u.text.arm_exidx_sec;
11959       if (exidx_sec == NULL)
11960 	{
11961 	  /* Section has no unwind data.  */
11962 	  if (last_unwind_type == 0 || !last_exidx_sec)
11963 	    continue;
11964 
11965 	  /* Ignore zero sized sections.  */
11966 	  if (sec->size == 0)
11967 	    continue;
11968 
11969 	  insert_cantunwind_after(last_text_sec, last_exidx_sec);
11970 	  last_unwind_type = 0;
11971 	  continue;
11972 	}
11973 
11974       /* Skip /DISCARD/ sections.  */
11975       if (bfd_is_abs_section (exidx_sec->output_section))
11976 	continue;
11977 
11978       hdr = &elf_section_data (exidx_sec)->this_hdr;
11979       if (hdr->sh_type != SHT_ARM_EXIDX)
11980 	continue;
11981 
11982       exidx_arm_data = get_arm_elf_section_data (exidx_sec);
11983       if (exidx_arm_data == NULL)
11984 	continue;
11985 
11986       ibfd = exidx_sec->owner;
11987 
11988       if (hdr->contents != NULL)
11989 	contents = hdr->contents;
11990       else if (! bfd_malloc_and_get_section (ibfd, exidx_sec, &contents))
11991 	/* An error?  */
11992 	continue;
11993 
11994       if (last_unwind_type > 0)
11995 	{
11996 	  unsigned int first_word = bfd_get_32 (ibfd, contents);
11997 	  /* Add cantunwind if first unwind item does not match section
11998 	     start.  */
11999 	  if (first_word != sec->vma)
12000 	    {
12001 	      insert_cantunwind_after (last_text_sec, last_exidx_sec);
12002 	      last_unwind_type = 0;
12003 	    }
12004 	}
12005 
12006       for (j = 0; j < hdr->sh_size; j += 8)
12007 	{
12008 	  unsigned int second_word = bfd_get_32 (ibfd, contents + j + 4);
12009 	  int unwind_type;
12010 	  int elide = 0;
12011 
12012 	  /* An EXIDX_CANTUNWIND entry.  */
12013 	  if (second_word == 1)
12014 	    {
12015 	      if (last_unwind_type == 0)
12016 		elide = 1;
12017 	      unwind_type = 0;
12018 	    }
12019 	  /* Inlined unwinding data.  Merge if equal to previous.  */
12020 	  else if ((second_word & 0x80000000) != 0)
12021 	    {
12022 	      if (merge_exidx_entries
12023 		   && last_second_word == second_word && last_unwind_type == 1)
12024 		elide = 1;
12025 	      unwind_type = 1;
12026 	      last_second_word = second_word;
12027 	    }
12028 	  /* Normal table entry.  In theory we could merge these too,
12029 	     but duplicate entries are likely to be much less common.  */
12030 	  else
12031 	    unwind_type = 2;
12032 
12033 	  if (elide && !bfd_link_relocatable (info))
12034 	    {
12035 	      add_unwind_table_edit (&unwind_edit_head, &unwind_edit_tail,
12036 				     DELETE_EXIDX_ENTRY, NULL, j / 8);
12037 
12038 	      deleted_exidx_bytes += 8;
12039 	    }
12040 
12041 	  last_unwind_type = unwind_type;
12042 	}
12043 
12044       /* Free contents if we allocated it ourselves.  */
12045       if (contents != hdr->contents)
12046 	free (contents);
12047 
12048       /* Record edits to be applied later (in elf32_arm_write_section).  */
12049       exidx_arm_data->u.exidx.unwind_edit_list = unwind_edit_head;
12050       exidx_arm_data->u.exidx.unwind_edit_tail = unwind_edit_tail;
12051 
12052       if (deleted_exidx_bytes > 0)
12053 	adjust_exidx_size(exidx_sec, -deleted_exidx_bytes);
12054 
12055       last_exidx_sec = exidx_sec;
12056       last_text_sec = sec;
12057     }
12058 
12059   /* Add terminating CANTUNWIND entry.  */
12060   if (!bfd_link_relocatable (info) && last_exidx_sec
12061       && last_unwind_type != 0)
12062     insert_cantunwind_after(last_text_sec, last_exidx_sec);
12063 
12064   return TRUE;
12065 }
12066 
12067 static bfd_boolean
12068 elf32_arm_output_glue_section (struct bfd_link_info *info, bfd *obfd,
12069 			       bfd *ibfd, const char *name)
12070 {
12071   asection *sec, *osec;
12072 
12073   sec = bfd_get_linker_section (ibfd, name);
12074   if (sec == NULL || (sec->flags & SEC_EXCLUDE) != 0)
12075     return TRUE;
12076 
12077   osec = sec->output_section;
12078   if (elf32_arm_write_section (obfd, info, sec, sec->contents))
12079     return TRUE;
12080 
12081   if (! bfd_set_section_contents (obfd, osec, sec->contents,
12082 				  sec->output_offset, sec->size))
12083     return FALSE;
12084 
12085   return TRUE;
12086 }
12087 
12088 static bfd_boolean
12089 elf32_arm_final_link (bfd *abfd, struct bfd_link_info *info)
12090 {
12091   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (info);
12092   asection *sec, *osec;
12093 
12094   if (globals == NULL)
12095     return FALSE;
12096 
12097   /* Invoke the regular ELF backend linker to do all the work.  */
12098   if (!bfd_elf_final_link (abfd, info))
12099     return FALSE;
12100 
12101   /* Process stub sections (eg BE8 encoding, ...).  */
12102   struct elf32_arm_link_hash_table *htab = elf32_arm_hash_table (info);
12103   unsigned int i;
12104   for (i=0; i<htab->top_id; i++)
12105     {
12106       sec = htab->stub_group[i].stub_sec;
12107       /* Only process it once, in its link_sec slot.  */
12108       if (sec && i == htab->stub_group[i].link_sec->id)
12109 	{
12110 	  osec = sec->output_section;
12111 	  elf32_arm_write_section (abfd, info, sec, sec->contents);
12112 	  if (! bfd_set_section_contents (abfd, osec, sec->contents,
12113 					  sec->output_offset, sec->size))
12114 	    return FALSE;
12115 	}
12116     }
12117 
12118   /* Write out any glue sections now that we have created all the
12119      stubs.  */
12120   if (globals->bfd_of_glue_owner != NULL)
12121     {
12122       if (! elf32_arm_output_glue_section (info, abfd,
12123 					   globals->bfd_of_glue_owner,
12124 					   ARM2THUMB_GLUE_SECTION_NAME))
12125 	return FALSE;
12126 
12127       if (! elf32_arm_output_glue_section (info, abfd,
12128 					   globals->bfd_of_glue_owner,
12129 					   THUMB2ARM_GLUE_SECTION_NAME))
12130 	return FALSE;
12131 
12132       if (! elf32_arm_output_glue_section (info, abfd,
12133 					   globals->bfd_of_glue_owner,
12134 					   VFP11_ERRATUM_VENEER_SECTION_NAME))
12135 	return FALSE;
12136 
12137       if (! elf32_arm_output_glue_section (info, abfd,
12138 					   globals->bfd_of_glue_owner,
12139 					   STM32L4XX_ERRATUM_VENEER_SECTION_NAME))
12140 	return FALSE;
12141 
12142       if (! elf32_arm_output_glue_section (info, abfd,
12143 					   globals->bfd_of_glue_owner,
12144 					   ARM_BX_GLUE_SECTION_NAME))
12145 	return FALSE;
12146     }
12147 
12148   return TRUE;
12149 }
12150 
12151 /* Return a best guess for the machine number based on the attributes.  */
12152 
12153 static unsigned int
12154 bfd_arm_get_mach_from_attributes (bfd * abfd)
12155 {
12156   int arch = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_CPU_arch);
12157 
12158   switch (arch)
12159     {
12160     case TAG_CPU_ARCH_V4: return bfd_mach_arm_4;
12161     case TAG_CPU_ARCH_V4T: return bfd_mach_arm_4T;
12162     case TAG_CPU_ARCH_V5T: return bfd_mach_arm_5T;
12163 
12164     case TAG_CPU_ARCH_V5TE:
12165       {
12166 	char * name;
12167 
12168 	BFD_ASSERT (Tag_CPU_name < NUM_KNOWN_OBJ_ATTRIBUTES);
12169 	name = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_CPU_name].s;
12170 
12171 	if (name)
12172 	  {
12173 	    if (strcmp (name, "IWMMXT2") == 0)
12174 	      return bfd_mach_arm_iWMMXt2;
12175 
12176 	    if (strcmp (name, "IWMMXT") == 0)
12177 	      return bfd_mach_arm_iWMMXt;
12178 
12179 	    if (strcmp (name, "XSCALE") == 0)
12180 	      {
12181 		int wmmx;
12182 
12183 		BFD_ASSERT (Tag_WMMX_arch < NUM_KNOWN_OBJ_ATTRIBUTES);
12184 		wmmx = elf_known_obj_attributes (abfd) [OBJ_ATTR_PROC][Tag_WMMX_arch].i;
12185 		switch (wmmx)
12186 		  {
12187 		  case 1: return bfd_mach_arm_iWMMXt;
12188 		  case 2: return bfd_mach_arm_iWMMXt2;
12189 		  default: return bfd_mach_arm_XScale;
12190 		  }
12191 	      }
12192 	  }
12193 
12194 	return bfd_mach_arm_5TE;
12195       }
12196 
12197     default:
12198       return bfd_mach_arm_unknown;
12199     }
12200 }
12201 
12202 /* Set the right machine number.  */
12203 
12204 static bfd_boolean
12205 elf32_arm_object_p (bfd *abfd)
12206 {
12207   unsigned int mach;
12208 
12209   mach = bfd_arm_get_mach_from_notes (abfd, ARM_NOTE_SECTION);
12210 
12211   if (mach == bfd_mach_arm_unknown)
12212     {
12213       if (elf_elfheader (abfd)->e_flags & EF_ARM_MAVERICK_FLOAT)
12214 	mach = bfd_mach_arm_ep9312;
12215       else
12216 	mach = bfd_arm_get_mach_from_attributes (abfd);
12217     }
12218 
12219   bfd_default_set_arch_mach (abfd, bfd_arch_arm, mach);
12220   return TRUE;
12221 }
12222 
12223 /* Function to keep ARM specific flags in the ELF header.  */
12224 
12225 static bfd_boolean
12226 elf32_arm_set_private_flags (bfd *abfd, flagword flags)
12227 {
12228   if (elf_flags_init (abfd)
12229       && elf_elfheader (abfd)->e_flags != flags)
12230     {
12231       if (EF_ARM_EABI_VERSION (flags) == EF_ARM_EABI_UNKNOWN)
12232 	{
12233 	  if (flags & EF_ARM_INTERWORK)
12234 	    (*_bfd_error_handler)
12235 	      (_("Warning: Not setting interworking flag of %B since it has already been specified as non-interworking"),
12236 	       abfd);
12237 	  else
12238 	    _bfd_error_handler
12239 	      (_("Warning: Clearing the interworking flag of %B due to outside request"),
12240 	       abfd);
12241 	}
12242     }
12243   else
12244     {
12245       elf_elfheader (abfd)->e_flags = flags;
12246       elf_flags_init (abfd) = TRUE;
12247     }
12248 
12249   return TRUE;
12250 }
12251 
12252 /* Copy backend specific data from one object module to another.  */
12253 
12254 static bfd_boolean
12255 elf32_arm_copy_private_bfd_data (bfd *ibfd, bfd *obfd)
12256 {
12257   flagword in_flags;
12258   flagword out_flags;
12259 
12260   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
12261     return TRUE;
12262 
12263   in_flags  = elf_elfheader (ibfd)->e_flags;
12264   out_flags = elf_elfheader (obfd)->e_flags;
12265 
12266   if (elf_flags_init (obfd)
12267       && EF_ARM_EABI_VERSION (out_flags) == EF_ARM_EABI_UNKNOWN
12268       && in_flags != out_flags)
12269     {
12270       /* Cannot mix APCS26 and APCS32 code.  */
12271       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
12272 	return FALSE;
12273 
12274       /* Cannot mix float APCS and non-float APCS code.  */
12275       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
12276 	return FALSE;
12277 
12278       /* If the src and dest have different interworking flags
12279 	 then turn off the interworking bit.  */
12280       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
12281 	{
12282 	  if (out_flags & EF_ARM_INTERWORK)
12283 	    _bfd_error_handler
12284 	      (_("Warning: Clearing the interworking flag of %B because non-interworking code in %B has been linked with it"),
12285 	       obfd, ibfd);
12286 
12287 	  in_flags &= ~EF_ARM_INTERWORK;
12288 	}
12289 
12290       /* Likewise for PIC, though don't warn for this case.  */
12291       if ((in_flags & EF_ARM_PIC) != (out_flags & EF_ARM_PIC))
12292 	in_flags &= ~EF_ARM_PIC;
12293     }
12294 
12295   elf_elfheader (obfd)->e_flags = in_flags;
12296   elf_flags_init (obfd) = TRUE;
12297 
12298   return _bfd_elf_copy_private_bfd_data (ibfd, obfd);
12299 }
12300 
12301 /* Values for Tag_ABI_PCS_R9_use.  */
12302 enum
12303 {
12304   AEABI_R9_V6,
12305   AEABI_R9_SB,
12306   AEABI_R9_TLS,
12307   AEABI_R9_unused
12308 };
12309 
12310 /* Values for Tag_ABI_PCS_RW_data.  */
12311 enum
12312 {
12313   AEABI_PCS_RW_data_absolute,
12314   AEABI_PCS_RW_data_PCrel,
12315   AEABI_PCS_RW_data_SBrel,
12316   AEABI_PCS_RW_data_unused
12317 };
12318 
12319 /* Values for Tag_ABI_enum_size.  */
12320 enum
12321 {
12322   AEABI_enum_unused,
12323   AEABI_enum_short,
12324   AEABI_enum_wide,
12325   AEABI_enum_forced_wide
12326 };
12327 
12328 /* Determine whether an object attribute tag takes an integer, a
12329    string or both.  */
12330 
12331 static int
12332 elf32_arm_obj_attrs_arg_type (int tag)
12333 {
12334   if (tag == Tag_compatibility)
12335     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_STR_VAL;
12336   else if (tag == Tag_nodefaults)
12337     return ATTR_TYPE_FLAG_INT_VAL | ATTR_TYPE_FLAG_NO_DEFAULT;
12338   else if (tag == Tag_CPU_raw_name || tag == Tag_CPU_name)
12339     return ATTR_TYPE_FLAG_STR_VAL;
12340   else if (tag < 32)
12341     return ATTR_TYPE_FLAG_INT_VAL;
12342   else
12343     return (tag & 1) != 0 ? ATTR_TYPE_FLAG_STR_VAL : ATTR_TYPE_FLAG_INT_VAL;
12344 }
12345 
12346 /* The ABI defines that Tag_conformance should be emitted first, and that
12347    Tag_nodefaults should be second (if either is defined).  This sets those
12348    two positions, and bumps up the position of all the remaining tags to
12349    compensate.  */
12350 static int
12351 elf32_arm_obj_attrs_order (int num)
12352 {
12353   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE)
12354     return Tag_conformance;
12355   if (num == LEAST_KNOWN_OBJ_ATTRIBUTE + 1)
12356     return Tag_nodefaults;
12357   if ((num - 2) < Tag_nodefaults)
12358     return num - 2;
12359   if ((num - 1) < Tag_conformance)
12360     return num - 1;
12361   return num;
12362 }
12363 
12364 /* Attribute numbers >=64 (mod 128) can be safely ignored.  */
12365 static bfd_boolean
12366 elf32_arm_obj_attrs_handle_unknown (bfd *abfd, int tag)
12367 {
12368   if ((tag & 127) < 64)
12369     {
12370       _bfd_error_handler
12371 	(_("%B: Unknown mandatory EABI object attribute %d"),
12372 	 abfd, tag);
12373       bfd_set_error (bfd_error_bad_value);
12374       return FALSE;
12375     }
12376   else
12377     {
12378       _bfd_error_handler
12379 	(_("Warning: %B: Unknown EABI object attribute %d"),
12380 	 abfd, tag);
12381       return TRUE;
12382     }
12383 }
12384 
12385 /* Read the architecture from the Tag_also_compatible_with attribute, if any.
12386    Returns -1 if no architecture could be read.  */
12387 
12388 static int
12389 get_secondary_compatible_arch (bfd *abfd)
12390 {
12391   obj_attribute *attr =
12392     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12393 
12394   /* Note: the tag and its argument below are uleb128 values, though
12395      currently-defined values fit in one byte for each.  */
12396   if (attr->s
12397       && attr->s[0] == Tag_CPU_arch
12398       && (attr->s[1] & 128) != 128
12399       && attr->s[2] == 0)
12400    return attr->s[1];
12401 
12402   /* This tag is "safely ignorable", so don't complain if it looks funny.  */
12403   return -1;
12404 }
12405 
12406 /* Set, or unset, the architecture of the Tag_also_compatible_with attribute.
12407    The tag is removed if ARCH is -1.  */
12408 
12409 static void
12410 set_secondary_compatible_arch (bfd *abfd, int arch)
12411 {
12412   obj_attribute *attr =
12413     &elf_known_obj_attributes_proc (abfd)[Tag_also_compatible_with];
12414 
12415   if (arch == -1)
12416     {
12417       attr->s = NULL;
12418       return;
12419     }
12420 
12421   /* Note: the tag and its argument below are uleb128 values, though
12422      currently-defined values fit in one byte for each.  */
12423   if (!attr->s)
12424     attr->s = (char *) bfd_alloc (abfd, 3);
12425   attr->s[0] = Tag_CPU_arch;
12426   attr->s[1] = arch;
12427   attr->s[2] = '\0';
12428 }
12429 
12430 /* Combine two values for Tag_CPU_arch, taking secondary compatibility tags
12431    into account.  */
12432 
12433 static int
12434 tag_cpu_arch_combine (bfd *ibfd, int oldtag, int *secondary_compat_out,
12435 		      int newtag, int secondary_compat)
12436 {
12437 #define T(X) TAG_CPU_ARCH_##X
12438   int tagl, tagh, result;
12439   const int v6t2[] =
12440     {
12441       T(V6T2),   /* PRE_V4.  */
12442       T(V6T2),   /* V4.  */
12443       T(V6T2),   /* V4T.  */
12444       T(V6T2),   /* V5T.  */
12445       T(V6T2),   /* V5TE.  */
12446       T(V6T2),   /* V5TEJ.  */
12447       T(V6T2),   /* V6.  */
12448       T(V7),     /* V6KZ.  */
12449       T(V6T2)    /* V6T2.  */
12450     };
12451   const int v6k[] =
12452     {
12453       T(V6K),    /* PRE_V4.  */
12454       T(V6K),    /* V4.  */
12455       T(V6K),    /* V4T.  */
12456       T(V6K),    /* V5T.  */
12457       T(V6K),    /* V5TE.  */
12458       T(V6K),    /* V5TEJ.  */
12459       T(V6K),    /* V6.  */
12460       T(V6KZ),   /* V6KZ.  */
12461       T(V7),     /* V6T2.  */
12462       T(V6K)     /* V6K.  */
12463     };
12464   const int v7[] =
12465     {
12466       T(V7),     /* PRE_V4.  */
12467       T(V7),     /* V4.  */
12468       T(V7),     /* V4T.  */
12469       T(V7),     /* V5T.  */
12470       T(V7),     /* V5TE.  */
12471       T(V7),     /* V5TEJ.  */
12472       T(V7),     /* V6.  */
12473       T(V7),     /* V6KZ.  */
12474       T(V7),     /* V6T2.  */
12475       T(V7),     /* V6K.  */
12476       T(V7)      /* V7.  */
12477     };
12478   const int v6_m[] =
12479     {
12480       -1,        /* PRE_V4.  */
12481       -1,        /* V4.  */
12482       T(V6K),    /* V4T.  */
12483       T(V6K),    /* V5T.  */
12484       T(V6K),    /* V5TE.  */
12485       T(V6K),    /* V5TEJ.  */
12486       T(V6K),    /* V6.  */
12487       T(V6KZ),   /* V6KZ.  */
12488       T(V7),     /* V6T2.  */
12489       T(V6K),    /* V6K.  */
12490       T(V7),     /* V7.  */
12491       T(V6_M)    /* V6_M.  */
12492     };
12493   const int v6s_m[] =
12494     {
12495       -1,        /* PRE_V4.  */
12496       -1,        /* V4.  */
12497       T(V6K),    /* V4T.  */
12498       T(V6K),    /* V5T.  */
12499       T(V6K),    /* V5TE.  */
12500       T(V6K),    /* V5TEJ.  */
12501       T(V6K),    /* V6.  */
12502       T(V6KZ),   /* V6KZ.  */
12503       T(V7),     /* V6T2.  */
12504       T(V6K),    /* V6K.  */
12505       T(V7),     /* V7.  */
12506       T(V6S_M),  /* V6_M.  */
12507       T(V6S_M)   /* V6S_M.  */
12508     };
12509   const int v7e_m[] =
12510     {
12511       -1,        /* PRE_V4.  */
12512       -1,        /* V4.  */
12513       T(V7E_M),  /* V4T.  */
12514       T(V7E_M),  /* V5T.  */
12515       T(V7E_M),  /* V5TE.  */
12516       T(V7E_M),  /* V5TEJ.  */
12517       T(V7E_M),  /* V6.  */
12518       T(V7E_M),  /* V6KZ.  */
12519       T(V7E_M),  /* V6T2.  */
12520       T(V7E_M),  /* V6K.  */
12521       T(V7E_M),  /* V7.  */
12522       T(V7E_M),  /* V6_M.  */
12523       T(V7E_M),  /* V6S_M.  */
12524       T(V7E_M)   /* V7E_M.  */
12525     };
12526   const int v8[] =
12527     {
12528       T(V8),		/* PRE_V4.  */
12529       T(V8),		/* V4.  */
12530       T(V8),		/* V4T.  */
12531       T(V8),		/* V5T.  */
12532       T(V8),		/* V5TE.  */
12533       T(V8),		/* V5TEJ.  */
12534       T(V8),		/* V6.  */
12535       T(V8),		/* V6KZ.  */
12536       T(V8),		/* V6T2.  */
12537       T(V8),		/* V6K.  */
12538       T(V8),		/* V7.  */
12539       T(V8),		/* V6_M.  */
12540       T(V8),		/* V6S_M.  */
12541       T(V8),		/* V7E_M.  */
12542       T(V8)		/* V8.  */
12543     };
12544   const int v8m_baseline[] =
12545     {
12546       -1,		/* PRE_V4.  */
12547       -1,		/* V4.  */
12548       -1,		/* V4T.  */
12549       -1,		/* V5T.  */
12550       -1,		/* V5TE.  */
12551       -1,		/* V5TEJ.  */
12552       -1,		/* V6.  */
12553       -1,		/* V6KZ.  */
12554       -1,		/* V6T2.  */
12555       -1,		/* V6K.  */
12556       -1,		/* V7.  */
12557       T(V8M_BASE),	/* V6_M.  */
12558       T(V8M_BASE),	/* V6S_M.  */
12559       -1,		/* V7E_M.  */
12560       -1,		/* V8.  */
12561       -1,
12562       T(V8M_BASE)	/* V8-M BASELINE.  */
12563     };
12564   const int v8m_mainline[] =
12565     {
12566       -1,		/* PRE_V4.  */
12567       -1,		/* V4.  */
12568       -1,		/* V4T.  */
12569       -1,		/* V5T.  */
12570       -1,		/* V5TE.  */
12571       -1,		/* V5TEJ.  */
12572       -1,		/* V6.  */
12573       -1,		/* V6KZ.  */
12574       -1,		/* V6T2.  */
12575       -1,		/* V6K.  */
12576       T(V8M_MAIN),	/* V7.  */
12577       T(V8M_MAIN),	/* V6_M.  */
12578       T(V8M_MAIN),	/* V6S_M.  */
12579       T(V8M_MAIN),	/* V7E_M.  */
12580       -1,		/* V8.  */
12581       -1,
12582       T(V8M_MAIN),	/* V8-M BASELINE.  */
12583       T(V8M_MAIN)	/* V8-M MAINLINE.  */
12584     };
12585   const int v4t_plus_v6_m[] =
12586     {
12587       -1,		/* PRE_V4.  */
12588       -1,		/* V4.  */
12589       T(V4T),		/* V4T.  */
12590       T(V5T),		/* V5T.  */
12591       T(V5TE),		/* V5TE.  */
12592       T(V5TEJ),		/* V5TEJ.  */
12593       T(V6),		/* V6.  */
12594       T(V6KZ),		/* V6KZ.  */
12595       T(V6T2),		/* V6T2.  */
12596       T(V6K),		/* V6K.  */
12597       T(V7),		/* V7.  */
12598       T(V6_M),		/* V6_M.  */
12599       T(V6S_M),		/* V6S_M.  */
12600       T(V7E_M),		/* V7E_M.  */
12601       T(V8),		/* V8.  */
12602       -1,		/* Unused.  */
12603       T(V8M_BASE),	/* V8-M BASELINE.  */
12604       T(V8M_MAIN),	/* V8-M MAINLINE.  */
12605       T(V4T_PLUS_V6_M)	/* V4T plus V6_M.  */
12606     };
12607   const int *comb[] =
12608     {
12609       v6t2,
12610       v6k,
12611       v7,
12612       v6_m,
12613       v6s_m,
12614       v7e_m,
12615       v8,
12616       NULL,
12617       v8m_baseline,
12618       v8m_mainline,
12619       /* Pseudo-architecture.  */
12620       v4t_plus_v6_m
12621     };
12622 
12623   /* Check we've not got a higher architecture than we know about.  */
12624 
12625   if (oldtag > MAX_TAG_CPU_ARCH || newtag > MAX_TAG_CPU_ARCH)
12626     {
12627       _bfd_error_handler (_("error: %B: Unknown CPU architecture"), ibfd);
12628       return -1;
12629     }
12630 
12631   /* Override old tag if we have a Tag_also_compatible_with on the output.  */
12632 
12633   if ((oldtag == T(V6_M) && *secondary_compat_out == T(V4T))
12634       || (oldtag == T(V4T) && *secondary_compat_out == T(V6_M)))
12635     oldtag = T(V4T_PLUS_V6_M);
12636 
12637   /* And override the new tag if we have a Tag_also_compatible_with on the
12638      input.  */
12639 
12640   if ((newtag == T(V6_M) && secondary_compat == T(V4T))
12641       || (newtag == T(V4T) && secondary_compat == T(V6_M)))
12642     newtag = T(V4T_PLUS_V6_M);
12643 
12644   tagl = (oldtag < newtag) ? oldtag : newtag;
12645   result = tagh = (oldtag > newtag) ? oldtag : newtag;
12646 
12647   /* Architectures before V6KZ add features monotonically.  */
12648   if (tagh <= TAG_CPU_ARCH_V6KZ)
12649     return result;
12650 
12651   result = comb[tagh - T(V6T2)] ? comb[tagh - T(V6T2)][tagl] : -1;
12652 
12653   /* Use Tag_CPU_arch == V4T and Tag_also_compatible_with (Tag_CPU_arch V6_M)
12654      as the canonical version.  */
12655   if (result == T(V4T_PLUS_V6_M))
12656     {
12657       result = T(V4T);
12658       *secondary_compat_out = T(V6_M);
12659     }
12660   else
12661     *secondary_compat_out = -1;
12662 
12663   if (result == -1)
12664     {
12665       _bfd_error_handler (_("error: %B: Conflicting CPU architectures %d/%d"),
12666 			  ibfd, oldtag, newtag);
12667       return -1;
12668     }
12669 
12670   return result;
12671 #undef T
12672 }
12673 
12674 /* Query attributes object to see if integer divide instructions may be
12675    present in an object.  */
12676 static bfd_boolean
12677 elf32_arm_attributes_accept_div (const obj_attribute *attr)
12678 {
12679   int arch = attr[Tag_CPU_arch].i;
12680   int profile = attr[Tag_CPU_arch_profile].i;
12681 
12682   switch (attr[Tag_DIV_use].i)
12683     {
12684     case 0:
12685       /* Integer divide allowed if instruction contained in archetecture.  */
12686       if (arch == TAG_CPU_ARCH_V7 && (profile == 'R' || profile == 'M'))
12687 	return TRUE;
12688       else if (arch >= TAG_CPU_ARCH_V7E_M)
12689 	return TRUE;
12690       else
12691 	return FALSE;
12692 
12693     case 1:
12694       /* Integer divide explicitly prohibited.  */
12695       return FALSE;
12696 
12697     default:
12698       /* Unrecognised case - treat as allowing divide everywhere.  */
12699     case 2:
12700       /* Integer divide allowed in ARM state.  */
12701       return TRUE;
12702     }
12703 }
12704 
12705 /* Query attributes object to see if integer divide instructions are
12706    forbidden to be in the object.  This is not the inverse of
12707    elf32_arm_attributes_accept_div.  */
12708 static bfd_boolean
12709 elf32_arm_attributes_forbid_div (const obj_attribute *attr)
12710 {
12711   return attr[Tag_DIV_use].i == 1;
12712 }
12713 
12714 /* Merge EABI object attributes from IBFD into OBFD.  Raise an error if there
12715    are conflicting attributes.  */
12716 
12717 static bfd_boolean
12718 elf32_arm_merge_eabi_attributes (bfd *ibfd, bfd *obfd)
12719 {
12720   obj_attribute *in_attr;
12721   obj_attribute *out_attr;
12722   /* Some tags have 0 = don't care, 1 = strong requirement,
12723      2 = weak requirement.  */
12724   static const int order_021[3] = {0, 2, 1};
12725   int i;
12726   bfd_boolean result = TRUE;
12727   const char *sec_name = get_elf_backend_data (ibfd)->obj_attrs_section;
12728 
12729   /* Skip the linker stubs file.  This preserves previous behavior
12730      of accepting unknown attributes in the first input file - but
12731      is that a bug?  */
12732   if (ibfd->flags & BFD_LINKER_CREATED)
12733     return TRUE;
12734 
12735   /* Skip any input that hasn't attribute section.
12736      This enables to link object files without attribute section with
12737      any others.  */
12738   if (bfd_get_section_by_name (ibfd, sec_name) == NULL)
12739     return TRUE;
12740 
12741   if (!elf_known_obj_attributes_proc (obfd)[0].i)
12742     {
12743       /* This is the first object.  Copy the attributes.  */
12744       _bfd_elf_copy_obj_attributes (ibfd, obfd);
12745 
12746       out_attr = elf_known_obj_attributes_proc (obfd);
12747 
12748       /* Use the Tag_null value to indicate the attributes have been
12749 	 initialized.  */
12750       out_attr[0].i = 1;
12751 
12752       /* We do not output objects with Tag_MPextension_use_legacy - we move
12753 	 the attribute's value to Tag_MPextension_use.  */
12754       if (out_attr[Tag_MPextension_use_legacy].i != 0)
12755 	{
12756 	  if (out_attr[Tag_MPextension_use].i != 0
12757 	      && out_attr[Tag_MPextension_use_legacy].i
12758 		!= out_attr[Tag_MPextension_use].i)
12759 	    {
12760 	      _bfd_error_handler
12761 		(_("Error: %B has both the current and legacy "
12762 		   "Tag_MPextension_use attributes"), ibfd);
12763 	      result = FALSE;
12764 	    }
12765 
12766 	  out_attr[Tag_MPextension_use] =
12767 	    out_attr[Tag_MPextension_use_legacy];
12768 	  out_attr[Tag_MPextension_use_legacy].type = 0;
12769 	  out_attr[Tag_MPextension_use_legacy].i = 0;
12770 	}
12771 
12772       return result;
12773     }
12774 
12775   in_attr = elf_known_obj_attributes_proc (ibfd);
12776   out_attr = elf_known_obj_attributes_proc (obfd);
12777   /* This needs to happen before Tag_ABI_FP_number_model is merged.  */
12778   if (in_attr[Tag_ABI_VFP_args].i != out_attr[Tag_ABI_VFP_args].i)
12779     {
12780       /* Ignore mismatches if the object doesn't use floating point or is
12781 	 floating point ABI independent.  */
12782       if (out_attr[Tag_ABI_FP_number_model].i == AEABI_FP_number_model_none
12783 	  || (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12784 	      && out_attr[Tag_ABI_VFP_args].i == AEABI_VFP_args_compatible))
12785 	out_attr[Tag_ABI_VFP_args].i = in_attr[Tag_ABI_VFP_args].i;
12786       else if (in_attr[Tag_ABI_FP_number_model].i != AEABI_FP_number_model_none
12787 	       && in_attr[Tag_ABI_VFP_args].i != AEABI_VFP_args_compatible)
12788 	{
12789 	  _bfd_error_handler
12790 	    (_("error: %B uses VFP register arguments, %B does not"),
12791 	     in_attr[Tag_ABI_VFP_args].i ? ibfd : obfd,
12792 	     in_attr[Tag_ABI_VFP_args].i ? obfd : ibfd);
12793 	  result = FALSE;
12794 	}
12795     }
12796 
12797   for (i = LEAST_KNOWN_OBJ_ATTRIBUTE; i < NUM_KNOWN_OBJ_ATTRIBUTES; i++)
12798     {
12799       /* Merge this attribute with existing attributes.  */
12800       switch (i)
12801 	{
12802 	case Tag_CPU_raw_name:
12803 	case Tag_CPU_name:
12804 	  /* These are merged after Tag_CPU_arch.  */
12805 	  break;
12806 
12807 	case Tag_ABI_optimization_goals:
12808 	case Tag_ABI_FP_optimization_goals:
12809 	  /* Use the first value seen.  */
12810 	  break;
12811 
12812 	case Tag_CPU_arch:
12813 	  {
12814 	    int secondary_compat = -1, secondary_compat_out = -1;
12815 	    unsigned int saved_out_attr = out_attr[i].i;
12816 	    int arch_attr;
12817 	    static const char *name_table[] =
12818 	      {
12819 		/* These aren't real CPU names, but we can't guess
12820 		   that from the architecture version alone.  */
12821 		"Pre v4",
12822 		"ARM v4",
12823 		"ARM v4T",
12824 		"ARM v5T",
12825 		"ARM v5TE",
12826 		"ARM v5TEJ",
12827 		"ARM v6",
12828 		"ARM v6KZ",
12829 		"ARM v6T2",
12830 		"ARM v6K",
12831 		"ARM v7",
12832 		"ARM v6-M",
12833 		"ARM v6S-M",
12834 		"ARM v8",
12835 		"",
12836 		"ARM v8-M.baseline",
12837 		"ARM v8-M.mainline",
12838 	    };
12839 
12840 	    /* Merge Tag_CPU_arch and Tag_also_compatible_with.  */
12841 	    secondary_compat = get_secondary_compatible_arch (ibfd);
12842 	    secondary_compat_out = get_secondary_compatible_arch (obfd);
12843 	    arch_attr = tag_cpu_arch_combine (ibfd, out_attr[i].i,
12844 					      &secondary_compat_out,
12845 					      in_attr[i].i,
12846 					      secondary_compat);
12847 
12848 	    /* Return with error if failed to merge.  */
12849 	    if (arch_attr == -1)
12850 	      return FALSE;
12851 
12852 	    out_attr[i].i = arch_attr;
12853 
12854 	    set_secondary_compatible_arch (obfd, secondary_compat_out);
12855 
12856 	    /* Merge Tag_CPU_name and Tag_CPU_raw_name.  */
12857 	    if (out_attr[i].i == saved_out_attr)
12858 	      ; /* Leave the names alone.  */
12859 	    else if (out_attr[i].i == in_attr[i].i)
12860 	      {
12861 		/* The output architecture has been changed to match the
12862 		   input architecture.  Use the input names.  */
12863 		out_attr[Tag_CPU_name].s = in_attr[Tag_CPU_name].s
12864 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_name].s)
12865 		  : NULL;
12866 		out_attr[Tag_CPU_raw_name].s = in_attr[Tag_CPU_raw_name].s
12867 		  ? _bfd_elf_attr_strdup (obfd, in_attr[Tag_CPU_raw_name].s)
12868 		  : NULL;
12869 	      }
12870 	    else
12871 	      {
12872 		out_attr[Tag_CPU_name].s = NULL;
12873 		out_attr[Tag_CPU_raw_name].s = NULL;
12874 	      }
12875 
12876 	    /* If we still don't have a value for Tag_CPU_name,
12877 	       make one up now.  Tag_CPU_raw_name remains blank.  */
12878 	    if (out_attr[Tag_CPU_name].s == NULL
12879 		&& out_attr[i].i < ARRAY_SIZE (name_table))
12880 	      out_attr[Tag_CPU_name].s =
12881 		_bfd_elf_attr_strdup (obfd, name_table[out_attr[i].i]);
12882 	  }
12883 	  break;
12884 
12885 	case Tag_ARM_ISA_use:
12886 	case Tag_THUMB_ISA_use:
12887 	case Tag_WMMX_arch:
12888 	case Tag_Advanced_SIMD_arch:
12889 	  /* ??? Do Advanced_SIMD (NEON) and WMMX conflict?  */
12890 	case Tag_ABI_FP_rounding:
12891 	case Tag_ABI_FP_exceptions:
12892 	case Tag_ABI_FP_user_exceptions:
12893 	case Tag_ABI_FP_number_model:
12894 	case Tag_FP_HP_extension:
12895 	case Tag_CPU_unaligned_access:
12896 	case Tag_T2EE_use:
12897 	case Tag_MPextension_use:
12898 	  /* Use the largest value specified.  */
12899 	  if (in_attr[i].i > out_attr[i].i)
12900 	    out_attr[i].i = in_attr[i].i;
12901 	  break;
12902 
12903 	case Tag_ABI_align_preserved:
12904 	case Tag_ABI_PCS_RO_data:
12905 	  /* Use the smallest value specified.  */
12906 	  if (in_attr[i].i < out_attr[i].i)
12907 	    out_attr[i].i = in_attr[i].i;
12908 	  break;
12909 
12910 	case Tag_ABI_align_needed:
12911 	  if ((in_attr[i].i > 0 || out_attr[i].i > 0)
12912 	      && (in_attr[Tag_ABI_align_preserved].i == 0
12913 		  || out_attr[Tag_ABI_align_preserved].i == 0))
12914 	    {
12915 	      /* This error message should be enabled once all non-conformant
12916 		 binaries in the toolchain have had the attributes set
12917 		 properly.
12918 	      _bfd_error_handler
12919 		(_("error: %B: 8-byte data alignment conflicts with %B"),
12920 		 obfd, ibfd);
12921 	      result = FALSE; */
12922 	    }
12923 	  /* Fall through.  */
12924 	case Tag_ABI_FP_denormal:
12925 	case Tag_ABI_PCS_GOT_use:
12926 	  /* Use the "greatest" from the sequence 0, 2, 1, or the largest
12927 	     value if greater than 2 (for future-proofing).  */
12928 	  if ((in_attr[i].i > 2 && in_attr[i].i > out_attr[i].i)
12929 	      || (in_attr[i].i <= 2 && out_attr[i].i <= 2
12930 		  && order_021[in_attr[i].i] > order_021[out_attr[i].i]))
12931 	    out_attr[i].i = in_attr[i].i;
12932 	  break;
12933 
12934 	case Tag_Virtualization_use:
12935 	  /* The virtualization tag effectively stores two bits of
12936 	     information: the intended use of TrustZone (in bit 0), and the
12937 	     intended use of Virtualization (in bit 1).  */
12938 	  if (out_attr[i].i == 0)
12939 	    out_attr[i].i = in_attr[i].i;
12940 	  else if (in_attr[i].i != 0
12941 		   && in_attr[i].i != out_attr[i].i)
12942 	    {
12943 	      if (in_attr[i].i <= 3 && out_attr[i].i <= 3)
12944 		out_attr[i].i = 3;
12945 	      else
12946 		{
12947 		  _bfd_error_handler
12948 		    (_("error: %B: unable to merge virtualization attributes "
12949 		       "with %B"),
12950 		     obfd, ibfd);
12951 		  result = FALSE;
12952 		}
12953 	    }
12954 	  break;
12955 
12956 	case Tag_CPU_arch_profile:
12957 	  if (out_attr[i].i != in_attr[i].i)
12958 	    {
12959 	      /* 0 will merge with anything.
12960 		 'A' and 'S' merge to 'A'.
12961 		 'R' and 'S' merge to 'R'.
12962 		 'M' and 'A|R|S' is an error.  */
12963 	      if (out_attr[i].i == 0
12964 		  || (out_attr[i].i == 'S'
12965 		      && (in_attr[i].i == 'A' || in_attr[i].i == 'R')))
12966 		out_attr[i].i = in_attr[i].i;
12967 	      else if (in_attr[i].i == 0
12968 		       || (in_attr[i].i == 'S'
12969 			   && (out_attr[i].i == 'A' || out_attr[i].i == 'R')))
12970 		; /* Do nothing.  */
12971 	      else
12972 		{
12973 		  _bfd_error_handler
12974 		    (_("error: %B: Conflicting architecture profiles %c/%c"),
12975 		     ibfd,
12976 		     in_attr[i].i ? in_attr[i].i : '0',
12977 		     out_attr[i].i ? out_attr[i].i : '0');
12978 		  result = FALSE;
12979 		}
12980 	    }
12981 	  break;
12982 
12983 	case Tag_DSP_extension:
12984 	  /* No need to change output value if any of:
12985 	     - pre (<=) ARMv5T input architecture (do not have DSP)
12986 	     - M input profile not ARMv7E-M and do not have DSP.  */
12987 	  if (in_attr[Tag_CPU_arch].i <= 3
12988 	      || (in_attr[Tag_CPU_arch_profile].i == 'M'
12989 		  && in_attr[Tag_CPU_arch].i != 13
12990 		  && in_attr[i].i == 0))
12991 	    ; /* Do nothing.  */
12992 	  /* Output value should be 0 if DSP part of architecture, ie.
12993 	     - post (>=) ARMv5te architecture output
12994 	     - A, R or S profile output or ARMv7E-M output architecture.  */
12995 	  else if (out_attr[Tag_CPU_arch].i >= 4
12996 		   && (out_attr[Tag_CPU_arch_profile].i == 'A'
12997 		       || out_attr[Tag_CPU_arch_profile].i == 'R'
12998 		       || out_attr[Tag_CPU_arch_profile].i == 'S'
12999 		       || out_attr[Tag_CPU_arch].i == 13))
13000 	    out_attr[i].i = 0;
13001 	  /* Otherwise, DSP instructions are added and not part of output
13002 	     architecture.  */
13003 	  else
13004 	    out_attr[i].i = 1;
13005 	  break;
13006 
13007 	case Tag_FP_arch:
13008 	    {
13009 	      /* Tag_ABI_HardFP_use is handled along with Tag_FP_arch since
13010 		 the meaning of Tag_ABI_HardFP_use depends on Tag_FP_arch
13011 		 when it's 0.  It might mean absence of FP hardware if
13012 		 Tag_FP_arch is zero.  */
13013 
13014 #define VFP_VERSION_COUNT 9
13015 	      static const struct
13016 	      {
13017 		  int ver;
13018 		  int regs;
13019 	      } vfp_versions[VFP_VERSION_COUNT] =
13020 		{
13021 		  {0, 0},
13022 		  {1, 16},
13023 		  {2, 16},
13024 		  {3, 32},
13025 		  {3, 16},
13026 		  {4, 32},
13027 		  {4, 16},
13028 		  {8, 32},
13029 		  {8, 16}
13030 		};
13031 	      int ver;
13032 	      int regs;
13033 	      int newval;
13034 
13035 	      /* If the output has no requirement about FP hardware,
13036 		 follow the requirement of the input.  */
13037 	      if (out_attr[i].i == 0)
13038 		{
13039 		  BFD_ASSERT (out_attr[Tag_ABI_HardFP_use].i == 0);
13040 		  out_attr[i].i = in_attr[i].i;
13041 		  out_attr[Tag_ABI_HardFP_use].i
13042 		    = in_attr[Tag_ABI_HardFP_use].i;
13043 		  break;
13044 		}
13045 	      /* If the input has no requirement about FP hardware, do
13046 		 nothing.  */
13047 	      else if (in_attr[i].i == 0)
13048 		{
13049 		  BFD_ASSERT (in_attr[Tag_ABI_HardFP_use].i == 0);
13050 		  break;
13051 		}
13052 
13053 	      /* Both the input and the output have nonzero Tag_FP_arch.
13054 		 So Tag_ABI_HardFP_use is implied by Tag_FP_arch when it's zero.  */
13055 
13056 	      /* If both the input and the output have zero Tag_ABI_HardFP_use,
13057 		 do nothing.  */
13058 	      if (in_attr[Tag_ABI_HardFP_use].i == 0
13059 		  && out_attr[Tag_ABI_HardFP_use].i == 0)
13060 		;
13061 	      /* If the input and the output have different Tag_ABI_HardFP_use,
13062 		 the combination of them is 0 (implied by Tag_FP_arch).  */
13063 	      else if (in_attr[Tag_ABI_HardFP_use].i
13064 		       != out_attr[Tag_ABI_HardFP_use].i)
13065 		out_attr[Tag_ABI_HardFP_use].i = 0;
13066 
13067 	      /* Now we can handle Tag_FP_arch.  */
13068 
13069 	      /* Values of VFP_VERSION_COUNT or more aren't defined, so just
13070 		 pick the biggest.  */
13071 	      if (in_attr[i].i >= VFP_VERSION_COUNT
13072 		  && in_attr[i].i > out_attr[i].i)
13073 		{
13074 		  out_attr[i] = in_attr[i];
13075 		  break;
13076 		}
13077 	      /* The output uses the superset of input features
13078 		 (ISA version) and registers.  */
13079 	      ver = vfp_versions[in_attr[i].i].ver;
13080 	      if (ver < vfp_versions[out_attr[i].i].ver)
13081 		ver = vfp_versions[out_attr[i].i].ver;
13082 	      regs = vfp_versions[in_attr[i].i].regs;
13083 	      if (regs < vfp_versions[out_attr[i].i].regs)
13084 		regs = vfp_versions[out_attr[i].i].regs;
13085 	      /* This assumes all possible supersets are also a valid
13086 		 options.  */
13087 	      for (newval = VFP_VERSION_COUNT - 1; newval > 0; newval--)
13088 		{
13089 		  if (regs == vfp_versions[newval].regs
13090 		      && ver == vfp_versions[newval].ver)
13091 		    break;
13092 		}
13093 	      out_attr[i].i = newval;
13094 	    }
13095 	  break;
13096 	case Tag_PCS_config:
13097 	  if (out_attr[i].i == 0)
13098 	    out_attr[i].i = in_attr[i].i;
13099 	  else if (in_attr[i].i != 0 && out_attr[i].i != in_attr[i].i)
13100 	    {
13101 	      /* It's sometimes ok to mix different configs, so this is only
13102 		 a warning.  */
13103 	      _bfd_error_handler
13104 		(_("Warning: %B: Conflicting platform configuration"), ibfd);
13105 	    }
13106 	  break;
13107 	case Tag_ABI_PCS_R9_use:
13108 	  if (in_attr[i].i != out_attr[i].i
13109 	      && out_attr[i].i != AEABI_R9_unused
13110 	      && in_attr[i].i != AEABI_R9_unused)
13111 	    {
13112 	      _bfd_error_handler
13113 		(_("error: %B: Conflicting use of R9"), ibfd);
13114 	      result = FALSE;
13115 	    }
13116 	  if (out_attr[i].i == AEABI_R9_unused)
13117 	    out_attr[i].i = in_attr[i].i;
13118 	  break;
13119 	case Tag_ABI_PCS_RW_data:
13120 	  if (in_attr[i].i == AEABI_PCS_RW_data_SBrel
13121 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_SB
13122 	      && out_attr[Tag_ABI_PCS_R9_use].i != AEABI_R9_unused)
13123 	    {
13124 	      _bfd_error_handler
13125 		(_("error: %B: SB relative addressing conflicts with use of R9"),
13126 		 ibfd);
13127 	      result = FALSE;
13128 	    }
13129 	  /* Use the smallest value specified.  */
13130 	  if (in_attr[i].i < out_attr[i].i)
13131 	    out_attr[i].i = in_attr[i].i;
13132 	  break;
13133 	case Tag_ABI_PCS_wchar_t:
13134 	  if (out_attr[i].i && in_attr[i].i && out_attr[i].i != in_attr[i].i
13135 	      && !elf_arm_tdata (obfd)->no_wchar_size_warning)
13136 	    {
13137 	      _bfd_error_handler
13138 		(_("warning: %B uses %u-byte wchar_t yet the output is to use %u-byte wchar_t; use of wchar_t values across objects may fail"),
13139 		 ibfd, in_attr[i].i, out_attr[i].i);
13140 	    }
13141 	  else if (in_attr[i].i && !out_attr[i].i)
13142 	    out_attr[i].i = in_attr[i].i;
13143 	  break;
13144 	case Tag_ABI_enum_size:
13145 	  if (in_attr[i].i != AEABI_enum_unused)
13146 	    {
13147 	      if (out_attr[i].i == AEABI_enum_unused
13148 		  || out_attr[i].i == AEABI_enum_forced_wide)
13149 		{
13150 		  /* The existing object is compatible with anything.
13151 		     Use whatever requirements the new object has.  */
13152 		  out_attr[i].i = in_attr[i].i;
13153 		}
13154 	      else if (in_attr[i].i != AEABI_enum_forced_wide
13155 		       && out_attr[i].i != in_attr[i].i
13156 		       && !elf_arm_tdata (obfd)->no_enum_size_warning)
13157 		{
13158 		  static const char *aeabi_enum_names[] =
13159 		    { "", "variable-size", "32-bit", "" };
13160 		  const char *in_name =
13161 		    in_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13162 		    ? aeabi_enum_names[in_attr[i].i]
13163 		    : "<unknown>";
13164 		  const char *out_name =
13165 		    out_attr[i].i < ARRAY_SIZE(aeabi_enum_names)
13166 		    ? aeabi_enum_names[out_attr[i].i]
13167 		    : "<unknown>";
13168 		  _bfd_error_handler
13169 		    (_("warning: %B uses %s enums yet the output is to use %s enums; use of enum values across objects may fail"),
13170 		     ibfd, in_name, out_name);
13171 		}
13172 	    }
13173 	  break;
13174 	case Tag_ABI_VFP_args:
13175 	  /* Aready done.  */
13176 	  break;
13177 	case Tag_ABI_WMMX_args:
13178 	  if (in_attr[i].i != out_attr[i].i)
13179 	    {
13180 	      _bfd_error_handler
13181 		(_("error: %B uses iWMMXt register arguments, %B does not"),
13182 		 ibfd, obfd);
13183 	      result = FALSE;
13184 	    }
13185 	  break;
13186 	case Tag_compatibility:
13187 	  /* Merged in target-independent code.  */
13188 	  break;
13189 	case Tag_ABI_HardFP_use:
13190 	  /* This is handled along with Tag_FP_arch.  */
13191 	  break;
13192 	case Tag_ABI_FP_16bit_format:
13193 	  if (in_attr[i].i != 0 && out_attr[i].i != 0)
13194 	    {
13195 	      if (in_attr[i].i != out_attr[i].i)
13196 		{
13197 		  _bfd_error_handler
13198 		    (_("error: fp16 format mismatch between %B and %B"),
13199 		     ibfd, obfd);
13200 		  result = FALSE;
13201 		}
13202 	    }
13203 	  if (in_attr[i].i != 0)
13204 	    out_attr[i].i = in_attr[i].i;
13205 	  break;
13206 
13207 	case Tag_DIV_use:
13208 	  /* A value of zero on input means that the divide instruction may
13209 	     be used if available in the base architecture as specified via
13210 	     Tag_CPU_arch and Tag_CPU_arch_profile.  A value of 1 means that
13211 	     the user did not want divide instructions.  A value of 2
13212 	     explicitly means that divide instructions were allowed in ARM
13213 	     and Thumb state.  */
13214 	  if (in_attr[i].i == out_attr[i].i)
13215 	    /* Do nothing.  */ ;
13216 	  else if (elf32_arm_attributes_forbid_div (in_attr)
13217 		   && !elf32_arm_attributes_accept_div (out_attr))
13218 	    out_attr[i].i = 1;
13219 	  else if (elf32_arm_attributes_forbid_div (out_attr)
13220 		   && elf32_arm_attributes_accept_div (in_attr))
13221 	    out_attr[i].i = in_attr[i].i;
13222 	  else if (in_attr[i].i == 2)
13223 	    out_attr[i].i = in_attr[i].i;
13224 	  break;
13225 
13226 	case Tag_MPextension_use_legacy:
13227 	  /* We don't output objects with Tag_MPextension_use_legacy - we
13228 	     move the value to Tag_MPextension_use.  */
13229 	  if (in_attr[i].i != 0 && in_attr[Tag_MPextension_use].i != 0)
13230 	    {
13231 	      if (in_attr[Tag_MPextension_use].i != in_attr[i].i)
13232 		{
13233 		  _bfd_error_handler
13234 		    (_("%B has has both the current and legacy "
13235 		       "Tag_MPextension_use attributes"),
13236 		     ibfd);
13237 		  result = FALSE;
13238 		}
13239 	    }
13240 
13241 	  if (in_attr[i].i > out_attr[Tag_MPextension_use].i)
13242 	    out_attr[Tag_MPextension_use] = in_attr[i];
13243 
13244 	  break;
13245 
13246 	case Tag_nodefaults:
13247 	  /* This tag is set if it exists, but the value is unused (and is
13248 	     typically zero).  We don't actually need to do anything here -
13249 	     the merge happens automatically when the type flags are merged
13250 	     below.  */
13251 	  break;
13252 	case Tag_also_compatible_with:
13253 	  /* Already done in Tag_CPU_arch.  */
13254 	  break;
13255 	case Tag_conformance:
13256 	  /* Keep the attribute if it matches.  Throw it away otherwise.
13257 	     No attribute means no claim to conform.  */
13258 	  if (!in_attr[i].s || !out_attr[i].s
13259 	      || strcmp (in_attr[i].s, out_attr[i].s) != 0)
13260 	    out_attr[i].s = NULL;
13261 	  break;
13262 
13263 	default:
13264 	  result
13265 	    = result && _bfd_elf_merge_unknown_attribute_low (ibfd, obfd, i);
13266 	}
13267 
13268       /* If out_attr was copied from in_attr then it won't have a type yet.  */
13269       if (in_attr[i].type && !out_attr[i].type)
13270 	out_attr[i].type = in_attr[i].type;
13271     }
13272 
13273   /* Merge Tag_compatibility attributes and any common GNU ones.  */
13274   if (!_bfd_elf_merge_object_attributes (ibfd, obfd))
13275     return FALSE;
13276 
13277   /* Check for any attributes not known on ARM.  */
13278   result &= _bfd_elf_merge_unknown_attribute_list (ibfd, obfd);
13279 
13280   return result;
13281 }
13282 
13283 
13284 /* Return TRUE if the two EABI versions are incompatible.  */
13285 
13286 static bfd_boolean
13287 elf32_arm_versions_compatible (unsigned iver, unsigned over)
13288 {
13289   /* v4 and v5 are the same spec before and after it was released,
13290      so allow mixing them.  */
13291   if ((iver == EF_ARM_EABI_VER4 && over == EF_ARM_EABI_VER5)
13292       || (iver == EF_ARM_EABI_VER5 && over == EF_ARM_EABI_VER4))
13293     return TRUE;
13294 
13295   return (iver == over);
13296 }
13297 
13298 /* Merge backend specific data from an object file to the output
13299    object file when linking.  */
13300 
13301 static bfd_boolean
13302 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd);
13303 
13304 /* Display the flags field.  */
13305 
13306 static bfd_boolean
13307 elf32_arm_print_private_bfd_data (bfd *abfd, void * ptr)
13308 {
13309   FILE * file = (FILE *) ptr;
13310   unsigned long flags;
13311 
13312   BFD_ASSERT (abfd != NULL && ptr != NULL);
13313 
13314   /* Print normal ELF private data.  */
13315   _bfd_elf_print_private_bfd_data (abfd, ptr);
13316 
13317   flags = elf_elfheader (abfd)->e_flags;
13318   /* Ignore init flag - it may not be set, despite the flags field
13319      containing valid data.  */
13320 
13321   /* xgettext:c-format */
13322   fprintf (file, _("private flags = %lx:"), elf_elfheader (abfd)->e_flags);
13323 
13324   switch (EF_ARM_EABI_VERSION (flags))
13325     {
13326     case EF_ARM_EABI_UNKNOWN:
13327       /* The following flag bits are GNU extensions and not part of the
13328 	 official ARM ELF extended ABI.  Hence they are only decoded if
13329 	 the EABI version is not set.  */
13330       if (flags & EF_ARM_INTERWORK)
13331 	fprintf (file, _(" [interworking enabled]"));
13332 
13333       if (flags & EF_ARM_APCS_26)
13334 	fprintf (file, " [APCS-26]");
13335       else
13336 	fprintf (file, " [APCS-32]");
13337 
13338       if (flags & EF_ARM_VFP_FLOAT)
13339 	fprintf (file, _(" [VFP float format]"));
13340       else if (flags & EF_ARM_MAVERICK_FLOAT)
13341 	fprintf (file, _(" [Maverick float format]"));
13342       else
13343 	fprintf (file, _(" [FPA float format]"));
13344 
13345       if (flags & EF_ARM_APCS_FLOAT)
13346 	fprintf (file, _(" [floats passed in float registers]"));
13347 
13348       if (flags & EF_ARM_PIC)
13349 	fprintf (file, _(" [position independent]"));
13350 
13351       if (flags & EF_ARM_NEW_ABI)
13352 	fprintf (file, _(" [new ABI]"));
13353 
13354       if (flags & EF_ARM_OLD_ABI)
13355 	fprintf (file, _(" [old ABI]"));
13356 
13357       if (flags & EF_ARM_SOFT_FLOAT)
13358 	fprintf (file, _(" [software FP]"));
13359 
13360       flags &= ~(EF_ARM_INTERWORK | EF_ARM_APCS_26 | EF_ARM_APCS_FLOAT
13361 		 | EF_ARM_PIC | EF_ARM_NEW_ABI | EF_ARM_OLD_ABI
13362 		 | EF_ARM_SOFT_FLOAT | EF_ARM_VFP_FLOAT
13363 		 | EF_ARM_MAVERICK_FLOAT);
13364       break;
13365 
13366     case EF_ARM_EABI_VER1:
13367       fprintf (file, _(" [Version1 EABI]"));
13368 
13369       if (flags & EF_ARM_SYMSARESORTED)
13370 	fprintf (file, _(" [sorted symbol table]"));
13371       else
13372 	fprintf (file, _(" [unsorted symbol table]"));
13373 
13374       flags &= ~ EF_ARM_SYMSARESORTED;
13375       break;
13376 
13377     case EF_ARM_EABI_VER2:
13378       fprintf (file, _(" [Version2 EABI]"));
13379 
13380       if (flags & EF_ARM_SYMSARESORTED)
13381 	fprintf (file, _(" [sorted symbol table]"));
13382       else
13383 	fprintf (file, _(" [unsorted symbol table]"));
13384 
13385       if (flags & EF_ARM_DYNSYMSUSESEGIDX)
13386 	fprintf (file, _(" [dynamic symbols use segment index]"));
13387 
13388       if (flags & EF_ARM_MAPSYMSFIRST)
13389 	fprintf (file, _(" [mapping symbols precede others]"));
13390 
13391       flags &= ~(EF_ARM_SYMSARESORTED | EF_ARM_DYNSYMSUSESEGIDX
13392 		 | EF_ARM_MAPSYMSFIRST);
13393       break;
13394 
13395     case EF_ARM_EABI_VER3:
13396       fprintf (file, _(" [Version3 EABI]"));
13397       break;
13398 
13399     case EF_ARM_EABI_VER4:
13400       fprintf (file, _(" [Version4 EABI]"));
13401       goto eabi;
13402 
13403     case EF_ARM_EABI_VER5:
13404       fprintf (file, _(" [Version5 EABI]"));
13405 
13406       if (flags & EF_ARM_ABI_FLOAT_SOFT)
13407 	fprintf (file, _(" [soft-float ABI]"));
13408 
13409       if (flags & EF_ARM_ABI_FLOAT_HARD)
13410 	fprintf (file, _(" [hard-float ABI]"));
13411 
13412       flags &= ~(EF_ARM_ABI_FLOAT_SOFT | EF_ARM_ABI_FLOAT_HARD);
13413 
13414     eabi:
13415       if (flags & EF_ARM_BE8)
13416 	fprintf (file, _(" [BE8]"));
13417 
13418       if (flags & EF_ARM_LE8)
13419 	fprintf (file, _(" [LE8]"));
13420 
13421       flags &= ~(EF_ARM_LE8 | EF_ARM_BE8);
13422       break;
13423 
13424     default:
13425       fprintf (file, _(" <EABI version unrecognised>"));
13426       break;
13427     }
13428 
13429   flags &= ~ EF_ARM_EABIMASK;
13430 
13431   if (flags & EF_ARM_RELEXEC)
13432     fprintf (file, _(" [relocatable executable]"));
13433 
13434   flags &= ~EF_ARM_RELEXEC;
13435 
13436   if (flags)
13437     fprintf (file, _("<Unrecognised flag bits set>"));
13438 
13439   fputc ('\n', file);
13440 
13441   return TRUE;
13442 }
13443 
13444 static int
13445 elf32_arm_get_symbol_type (Elf_Internal_Sym * elf_sym, int type)
13446 {
13447   switch (ELF_ST_TYPE (elf_sym->st_info))
13448     {
13449     case STT_ARM_TFUNC:
13450       return ELF_ST_TYPE (elf_sym->st_info);
13451 
13452     case STT_ARM_16BIT:
13453       /* If the symbol is not an object, return the STT_ARM_16BIT flag.
13454 	 This allows us to distinguish between data used by Thumb instructions
13455 	 and non-data (which is probably code) inside Thumb regions of an
13456 	 executable.  */
13457       if (type != STT_OBJECT && type != STT_TLS)
13458 	return ELF_ST_TYPE (elf_sym->st_info);
13459       break;
13460 
13461     default:
13462       break;
13463     }
13464 
13465   return type;
13466 }
13467 
13468 static asection *
13469 elf32_arm_gc_mark_hook (asection *sec,
13470 			struct bfd_link_info *info,
13471 			Elf_Internal_Rela *rel,
13472 			struct elf_link_hash_entry *h,
13473 			Elf_Internal_Sym *sym)
13474 {
13475   if (h != NULL)
13476     switch (ELF32_R_TYPE (rel->r_info))
13477       {
13478       case R_ARM_GNU_VTINHERIT:
13479       case R_ARM_GNU_VTENTRY:
13480 	return NULL;
13481       }
13482 
13483   return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym);
13484 }
13485 
13486 /* Update the got entry reference counts for the section being removed.  */
13487 
13488 static bfd_boolean
13489 elf32_arm_gc_sweep_hook (bfd *                     abfd,
13490 			 struct bfd_link_info *    info,
13491 			 asection *                sec,
13492 			 const Elf_Internal_Rela * relocs)
13493 {
13494   Elf_Internal_Shdr *symtab_hdr;
13495   struct elf_link_hash_entry **sym_hashes;
13496   bfd_signed_vma *local_got_refcounts;
13497   const Elf_Internal_Rela *rel, *relend;
13498   struct elf32_arm_link_hash_table * globals;
13499 
13500   if (bfd_link_relocatable (info))
13501     return TRUE;
13502 
13503   globals = elf32_arm_hash_table (info);
13504   if (globals == NULL)
13505     return FALSE;
13506 
13507   elf_section_data (sec)->local_dynrel = NULL;
13508 
13509   symtab_hdr = & elf_symtab_hdr (abfd);
13510   sym_hashes = elf_sym_hashes (abfd);
13511   local_got_refcounts = elf_local_got_refcounts (abfd);
13512 
13513   check_use_blx (globals);
13514 
13515   relend = relocs + sec->reloc_count;
13516   for (rel = relocs; rel < relend; rel++)
13517     {
13518       unsigned long r_symndx;
13519       struct elf_link_hash_entry *h = NULL;
13520       struct elf32_arm_link_hash_entry *eh;
13521       int r_type;
13522       bfd_boolean call_reloc_p;
13523       bfd_boolean may_become_dynamic_p;
13524       bfd_boolean may_need_local_target_p;
13525       union gotplt_union *root_plt;
13526       struct arm_plt_info *arm_plt;
13527 
13528       r_symndx = ELF32_R_SYM (rel->r_info);
13529       if (r_symndx >= symtab_hdr->sh_info)
13530 	{
13531 	  h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13532 	  while (h->root.type == bfd_link_hash_indirect
13533 		 || h->root.type == bfd_link_hash_warning)
13534 	    h = (struct elf_link_hash_entry *) h->root.u.i.link;
13535 	}
13536       eh = (struct elf32_arm_link_hash_entry *) h;
13537 
13538       call_reloc_p = FALSE;
13539       may_become_dynamic_p = FALSE;
13540       may_need_local_target_p = FALSE;
13541 
13542       r_type = ELF32_R_TYPE (rel->r_info);
13543       r_type = arm_real_reloc_type (globals, r_type);
13544       switch (r_type)
13545 	{
13546 	case R_ARM_GOT32:
13547 	case R_ARM_GOT_PREL:
13548 	case R_ARM_TLS_GD32:
13549 	case R_ARM_TLS_IE32:
13550 	  if (h != NULL)
13551 	    {
13552 	      if (h->got.refcount > 0)
13553 		h->got.refcount -= 1;
13554 	    }
13555 	  else if (local_got_refcounts != NULL)
13556 	    {
13557 	      if (local_got_refcounts[r_symndx] > 0)
13558 		local_got_refcounts[r_symndx] -= 1;
13559 	    }
13560 	  break;
13561 
13562 	case R_ARM_TLS_LDM32:
13563 	  globals->tls_ldm_got.refcount -= 1;
13564 	  break;
13565 
13566 	case R_ARM_PC24:
13567 	case R_ARM_PLT32:
13568 	case R_ARM_CALL:
13569 	case R_ARM_JUMP24:
13570 	case R_ARM_PREL31:
13571 	case R_ARM_THM_CALL:
13572 	case R_ARM_THM_JUMP24:
13573 	case R_ARM_THM_JUMP19:
13574 	  call_reloc_p = TRUE;
13575 	  may_need_local_target_p = TRUE;
13576 	  break;
13577 
13578 	case R_ARM_ABS12:
13579 	  if (!globals->vxworks_p)
13580 	    {
13581 	      may_need_local_target_p = TRUE;
13582 	      break;
13583 	    }
13584 	  /* Fall through.  */
13585 	case R_ARM_ABS32:
13586 	case R_ARM_ABS32_NOI:
13587 	case R_ARM_REL32:
13588 	case R_ARM_REL32_NOI:
13589 	case R_ARM_MOVW_ABS_NC:
13590 	case R_ARM_MOVT_ABS:
13591 	case R_ARM_MOVW_PREL_NC:
13592 	case R_ARM_MOVT_PREL:
13593 	case R_ARM_THM_MOVW_ABS_NC:
13594 	case R_ARM_THM_MOVT_ABS:
13595 	case R_ARM_THM_MOVW_PREL_NC:
13596 	case R_ARM_THM_MOVT_PREL:
13597 	  /* Should the interworking branches be here also?  */
13598 	  if ((bfd_link_pic (info) || globals->root.is_relocatable_executable)
13599 	      && (sec->flags & SEC_ALLOC) != 0)
13600 	    {
13601 	      if (h == NULL
13602 		  && elf32_arm_howto_from_type (r_type)->pc_relative)
13603 		{
13604 		  call_reloc_p = TRUE;
13605 		  may_need_local_target_p = TRUE;
13606 		}
13607 	      else
13608 		may_become_dynamic_p = TRUE;
13609 	    }
13610 	  else
13611 	    may_need_local_target_p = TRUE;
13612 	  break;
13613 
13614 	default:
13615 	  break;
13616 	}
13617 
13618       if (may_need_local_target_p
13619 	  && elf32_arm_get_plt_info (abfd, eh, r_symndx, &root_plt, &arm_plt))
13620 	{
13621 	  /* If PLT refcount book-keeping is wrong and too low, we'll
13622 	     see a zero value (going to -1) for the root PLT reference
13623 	     count.  */
13624 	  if (root_plt->refcount >= 0)
13625 	    {
13626 	      BFD_ASSERT (root_plt->refcount != 0);
13627 	      root_plt->refcount -= 1;
13628 	    }
13629 	  else
13630 	    /* A value of -1 means the symbol has become local, forced
13631 	       or seeing a hidden definition.  Any other negative value
13632 	       is an error.  */
13633 	    BFD_ASSERT (root_plt->refcount == -1);
13634 
13635 	  if (!call_reloc_p)
13636 	    arm_plt->noncall_refcount--;
13637 
13638 	  if (r_type == R_ARM_THM_CALL)
13639 	    arm_plt->maybe_thumb_refcount--;
13640 
13641 	  if (r_type == R_ARM_THM_JUMP24
13642 	      || r_type == R_ARM_THM_JUMP19)
13643 	    arm_plt->thumb_refcount--;
13644 	}
13645 
13646       if (may_become_dynamic_p)
13647 	{
13648 	  struct elf_dyn_relocs **pp;
13649 	  struct elf_dyn_relocs *p;
13650 
13651 	  if (h != NULL)
13652 	    pp = &(eh->dyn_relocs);
13653 	  else
13654 	    {
13655 	      Elf_Internal_Sym *isym;
13656 
13657 	      isym = bfd_sym_from_r_symndx (&globals->sym_cache,
13658 					    abfd, r_symndx);
13659 	      if (isym == NULL)
13660 		return FALSE;
13661 	      pp = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
13662 	      if (pp == NULL)
13663 		return FALSE;
13664 	    }
13665 	  for (; (p = *pp) != NULL; pp = &p->next)
13666 	    if (p->sec == sec)
13667 	      {
13668 		/* Everything must go for SEC.  */
13669 		*pp = p->next;
13670 		break;
13671 	      }
13672 	}
13673     }
13674 
13675   return TRUE;
13676 }
13677 
13678 /* Look through the relocs for a section during the first phase.  */
13679 
13680 static bfd_boolean
13681 elf32_arm_check_relocs (bfd *abfd, struct bfd_link_info *info,
13682 			asection *sec, const Elf_Internal_Rela *relocs)
13683 {
13684   Elf_Internal_Shdr *symtab_hdr;
13685   struct elf_link_hash_entry **sym_hashes;
13686   const Elf_Internal_Rela *rel;
13687   const Elf_Internal_Rela *rel_end;
13688   bfd *dynobj;
13689   asection *sreloc;
13690   struct elf32_arm_link_hash_table *htab;
13691   bfd_boolean call_reloc_p;
13692   bfd_boolean may_become_dynamic_p;
13693   bfd_boolean may_need_local_target_p;
13694   unsigned long nsyms;
13695 
13696   if (bfd_link_relocatable (info))
13697     return TRUE;
13698 
13699   BFD_ASSERT (is_arm_elf (abfd));
13700 
13701   htab = elf32_arm_hash_table (info);
13702   if (htab == NULL)
13703     return FALSE;
13704 
13705   sreloc = NULL;
13706 
13707   /* Create dynamic sections for relocatable executables so that we can
13708      copy relocations.  */
13709   if (htab->root.is_relocatable_executable
13710       && ! htab->root.dynamic_sections_created)
13711     {
13712       if (! _bfd_elf_link_create_dynamic_sections (abfd, info))
13713 	return FALSE;
13714     }
13715 
13716   if (htab->root.dynobj == NULL)
13717     htab->root.dynobj = abfd;
13718   if (!create_ifunc_sections (info))
13719     return FALSE;
13720 
13721   dynobj = htab->root.dynobj;
13722 
13723   symtab_hdr = & elf_symtab_hdr (abfd);
13724   sym_hashes = elf_sym_hashes (abfd);
13725   nsyms = NUM_SHDR_ENTRIES (symtab_hdr);
13726 
13727   rel_end = relocs + sec->reloc_count;
13728   for (rel = relocs; rel < rel_end; rel++)
13729     {
13730       Elf_Internal_Sym *isym;
13731       struct elf_link_hash_entry *h;
13732       struct elf32_arm_link_hash_entry *eh;
13733       unsigned long r_symndx;
13734       int r_type;
13735 
13736       r_symndx = ELF32_R_SYM (rel->r_info);
13737       r_type = ELF32_R_TYPE (rel->r_info);
13738       r_type = arm_real_reloc_type (htab, r_type);
13739 
13740       if (r_symndx >= nsyms
13741 	  /* PR 9934: It is possible to have relocations that do not
13742 	     refer to symbols, thus it is also possible to have an
13743 	     object file containing relocations but no symbol table.  */
13744 	  && (r_symndx > STN_UNDEF || nsyms > 0))
13745 	{
13746 	  (*_bfd_error_handler) (_("%B: bad symbol index: %d"), abfd,
13747 				   r_symndx);
13748 	  return FALSE;
13749 	}
13750 
13751       h = NULL;
13752       isym = NULL;
13753       if (nsyms > 0)
13754 	{
13755 	  if (r_symndx < symtab_hdr->sh_info)
13756 	    {
13757 	      /* A local symbol.  */
13758 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache,
13759 					    abfd, r_symndx);
13760 	      if (isym == NULL)
13761 		return FALSE;
13762 	    }
13763 	  else
13764 	    {
13765 	      h = sym_hashes[r_symndx - symtab_hdr->sh_info];
13766 	      while (h->root.type == bfd_link_hash_indirect
13767 		     || h->root.type == bfd_link_hash_warning)
13768 		h = (struct elf_link_hash_entry *) h->root.u.i.link;
13769 
13770 	      /* PR15323, ref flags aren't set for references in the
13771 		 same object.  */
13772 	      h->root.non_ir_ref = 1;
13773 	    }
13774 	}
13775 
13776       eh = (struct elf32_arm_link_hash_entry *) h;
13777 
13778       call_reloc_p = FALSE;
13779       may_become_dynamic_p = FALSE;
13780       may_need_local_target_p = FALSE;
13781 
13782       /* Could be done earlier, if h were already available.  */
13783       r_type = elf32_arm_tls_transition (info, r_type, h);
13784       switch (r_type)
13785 	{
13786 	  case R_ARM_GOT32:
13787 	  case R_ARM_GOT_PREL:
13788 	  case R_ARM_TLS_GD32:
13789 	  case R_ARM_TLS_IE32:
13790 	  case R_ARM_TLS_GOTDESC:
13791 	  case R_ARM_TLS_DESCSEQ:
13792 	  case R_ARM_THM_TLS_DESCSEQ:
13793 	  case R_ARM_TLS_CALL:
13794 	  case R_ARM_THM_TLS_CALL:
13795 	    /* This symbol requires a global offset table entry.  */
13796 	    {
13797 	      int tls_type, old_tls_type;
13798 
13799 	      switch (r_type)
13800 		{
13801 		case R_ARM_TLS_GD32: tls_type = GOT_TLS_GD; break;
13802 
13803 		case R_ARM_TLS_IE32: tls_type = GOT_TLS_IE; break;
13804 
13805 		case R_ARM_TLS_GOTDESC:
13806 		case R_ARM_TLS_CALL: case R_ARM_THM_TLS_CALL:
13807 		case R_ARM_TLS_DESCSEQ: case R_ARM_THM_TLS_DESCSEQ:
13808 		  tls_type = GOT_TLS_GDESC; break;
13809 
13810 		default: tls_type = GOT_NORMAL; break;
13811 		}
13812 
13813 	      if (!bfd_link_executable (info) && (tls_type & GOT_TLS_IE))
13814 		info->flags |= DF_STATIC_TLS;
13815 
13816 	      if (h != NULL)
13817 		{
13818 		  h->got.refcount++;
13819 		  old_tls_type = elf32_arm_hash_entry (h)->tls_type;
13820 		}
13821 	      else
13822 		{
13823 		  /* This is a global offset table entry for a local symbol.  */
13824 		  if (!elf32_arm_allocate_local_sym_info (abfd))
13825 		    return FALSE;
13826 		  elf_local_got_refcounts (abfd)[r_symndx] += 1;
13827 		  old_tls_type = elf32_arm_local_got_tls_type (abfd) [r_symndx];
13828 		}
13829 
13830 	      /* If a variable is accessed with both tls methods, two
13831 		 slots may be created.  */
13832 	      if (GOT_TLS_GD_ANY_P (old_tls_type)
13833 		  && GOT_TLS_GD_ANY_P (tls_type))
13834 		tls_type |= old_tls_type;
13835 
13836 	      /* We will already have issued an error message if there
13837 		 is a TLS/non-TLS mismatch, based on the symbol
13838 		 type.  So just combine any TLS types needed.  */
13839 	      if (old_tls_type != GOT_UNKNOWN && old_tls_type != GOT_NORMAL
13840 		  && tls_type != GOT_NORMAL)
13841 		tls_type |= old_tls_type;
13842 
13843 	      /* If the symbol is accessed in both IE and GDESC
13844 		 method, we're able to relax. Turn off the GDESC flag,
13845 		 without messing up with any other kind of tls types
13846 		 that may be involved.  */
13847 	      if ((tls_type & GOT_TLS_IE) && (tls_type & GOT_TLS_GDESC))
13848 		tls_type &= ~GOT_TLS_GDESC;
13849 
13850 	      if (old_tls_type != tls_type)
13851 		{
13852 		  if (h != NULL)
13853 		    elf32_arm_hash_entry (h)->tls_type = tls_type;
13854 		  else
13855 		    elf32_arm_local_got_tls_type (abfd) [r_symndx] = tls_type;
13856 		}
13857 	    }
13858 	    /* Fall through.  */
13859 
13860 	  case R_ARM_TLS_LDM32:
13861 	    if (r_type == R_ARM_TLS_LDM32)
13862 		htab->tls_ldm_got.refcount++;
13863 	    /* Fall through.  */
13864 
13865 	  case R_ARM_GOTOFF32:
13866 	  case R_ARM_GOTPC:
13867 	    if (htab->root.sgot == NULL
13868 		&& !create_got_section (htab->root.dynobj, info))
13869 	      return FALSE;
13870 	    break;
13871 
13872 	  case R_ARM_PC24:
13873 	  case R_ARM_PLT32:
13874 	  case R_ARM_CALL:
13875 	  case R_ARM_JUMP24:
13876 	  case R_ARM_PREL31:
13877 	  case R_ARM_THM_CALL:
13878 	  case R_ARM_THM_JUMP24:
13879 	  case R_ARM_THM_JUMP19:
13880 	    call_reloc_p = TRUE;
13881 	    may_need_local_target_p = TRUE;
13882 	    break;
13883 
13884 	  case R_ARM_ABS12:
13885 	    /* VxWorks uses dynamic R_ARM_ABS12 relocations for
13886 	       ldr __GOTT_INDEX__ offsets.  */
13887 	    if (!htab->vxworks_p)
13888 	      {
13889 		may_need_local_target_p = TRUE;
13890 		break;
13891 	      }
13892 	    else goto jump_over;
13893 
13894 	    /* Fall through.  */
13895 
13896 	  case R_ARM_MOVW_ABS_NC:
13897 	  case R_ARM_MOVT_ABS:
13898 	  case R_ARM_THM_MOVW_ABS_NC:
13899 	  case R_ARM_THM_MOVT_ABS:
13900 	    if (bfd_link_pic (info))
13901 	      {
13902 		(*_bfd_error_handler)
13903 		  (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"),
13904 		   abfd, elf32_arm_howto_table_1[r_type].name,
13905 		   (h) ? h->root.root.string : "a local symbol");
13906 		bfd_set_error (bfd_error_bad_value);
13907 		return FALSE;
13908 	      }
13909 
13910 	    /* Fall through.  */
13911 	  case R_ARM_ABS32:
13912 	  case R_ARM_ABS32_NOI:
13913 	jump_over:
13914 	    if (h != NULL && bfd_link_executable (info))
13915 	      {
13916 		h->pointer_equality_needed = 1;
13917 	      }
13918 	    /* Fall through.  */
13919 	  case R_ARM_REL32:
13920 	  case R_ARM_REL32_NOI:
13921 	  case R_ARM_MOVW_PREL_NC:
13922 	  case R_ARM_MOVT_PREL:
13923 	  case R_ARM_THM_MOVW_PREL_NC:
13924 	  case R_ARM_THM_MOVT_PREL:
13925 
13926 	    /* Should the interworking branches be listed here?  */
13927 	    if ((bfd_link_pic (info) || htab->root.is_relocatable_executable)
13928 		&& (sec->flags & SEC_ALLOC) != 0)
13929 	      {
13930 		if (h == NULL
13931 		    && elf32_arm_howto_from_type (r_type)->pc_relative)
13932 		  {
13933 		    /* In shared libraries and relocatable executables,
13934 		       we treat local relative references as calls;
13935 		       see the related SYMBOL_CALLS_LOCAL code in
13936 		       allocate_dynrelocs.  */
13937 		    call_reloc_p = TRUE;
13938 		    may_need_local_target_p = TRUE;
13939 		  }
13940 		else
13941 		  /* We are creating a shared library or relocatable
13942 		     executable, and this is a reloc against a global symbol,
13943 		     or a non-PC-relative reloc against a local symbol.
13944 		     We may need to copy the reloc into the output.  */
13945 		  may_become_dynamic_p = TRUE;
13946 	      }
13947 	    else
13948 	      may_need_local_target_p = TRUE;
13949 	    break;
13950 
13951 	/* This relocation describes the C++ object vtable hierarchy.
13952 	   Reconstruct it for later use during GC.  */
13953 	case R_ARM_GNU_VTINHERIT:
13954 	  if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
13955 	    return FALSE;
13956 	  break;
13957 
13958 	/* This relocation describes which C++ vtable entries are actually
13959 	   used.  Record for later use during GC.  */
13960 	case R_ARM_GNU_VTENTRY:
13961 	  BFD_ASSERT (h != NULL);
13962 	  if (h != NULL
13963 	      && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_offset))
13964 	    return FALSE;
13965 	  break;
13966 	}
13967 
13968       if (h != NULL)
13969 	{
13970 	  if (call_reloc_p)
13971 	    /* We may need a .plt entry if the function this reloc
13972 	       refers to is in a different object, regardless of the
13973 	       symbol's type.  We can't tell for sure yet, because
13974 	       something later might force the symbol local.  */
13975 	    h->needs_plt = 1;
13976 	  else if (may_need_local_target_p)
13977 	    /* If this reloc is in a read-only section, we might
13978 	       need a copy reloc.  We can't check reliably at this
13979 	       stage whether the section is read-only, as input
13980 	       sections have not yet been mapped to output sections.
13981 	       Tentatively set the flag for now, and correct in
13982 	       adjust_dynamic_symbol.  */
13983 	    h->non_got_ref = 1;
13984 	}
13985 
13986       if (may_need_local_target_p
13987 	  && (h != NULL || ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC))
13988 	{
13989 	  union gotplt_union *root_plt;
13990 	  struct arm_plt_info *arm_plt;
13991 	  struct arm_local_iplt_info *local_iplt;
13992 
13993 	  if (h != NULL)
13994 	    {
13995 	      root_plt = &h->plt;
13996 	      arm_plt = &eh->plt;
13997 	    }
13998 	  else
13999 	    {
14000 	      local_iplt = elf32_arm_create_local_iplt (abfd, r_symndx);
14001 	      if (local_iplt == NULL)
14002 		return FALSE;
14003 	      root_plt = &local_iplt->root;
14004 	      arm_plt = &local_iplt->arm;
14005 	    }
14006 
14007 	  /* If the symbol is a function that doesn't bind locally,
14008 	     this relocation will need a PLT entry.  */
14009 	  if (root_plt->refcount != -1)
14010 	    root_plt->refcount += 1;
14011 
14012 	  if (!call_reloc_p)
14013 	    arm_plt->noncall_refcount++;
14014 
14015 	  /* It's too early to use htab->use_blx here, so we have to
14016 	     record possible blx references separately from
14017 	     relocs that definitely need a thumb stub.  */
14018 
14019 	  if (r_type == R_ARM_THM_CALL)
14020 	    arm_plt->maybe_thumb_refcount += 1;
14021 
14022 	  if (r_type == R_ARM_THM_JUMP24
14023 	      || r_type == R_ARM_THM_JUMP19)
14024 	    arm_plt->thumb_refcount += 1;
14025 	}
14026 
14027       if (may_become_dynamic_p)
14028 	{
14029 	  struct elf_dyn_relocs *p, **head;
14030 
14031 	  /* Create a reloc section in dynobj.  */
14032 	  if (sreloc == NULL)
14033 	    {
14034 	      sreloc = _bfd_elf_make_dynamic_reloc_section
14035 		(sec, dynobj, 2, abfd, ! htab->use_rel);
14036 
14037 	      if (sreloc == NULL)
14038 		return FALSE;
14039 
14040 	      /* BPABI objects never have dynamic relocations mapped.  */
14041 	      if (htab->symbian_p)
14042 		{
14043 		  flagword flags;
14044 
14045 		  flags = bfd_get_section_flags (dynobj, sreloc);
14046 		  flags &= ~(SEC_LOAD | SEC_ALLOC);
14047 		  bfd_set_section_flags (dynobj, sreloc, flags);
14048 		}
14049 	    }
14050 
14051 	  /* If this is a global symbol, count the number of
14052 	     relocations we need for this symbol.  */
14053 	  if (h != NULL)
14054 	    head = &((struct elf32_arm_link_hash_entry *) h)->dyn_relocs;
14055 	  else
14056 	    {
14057 	      head = elf32_arm_get_local_dynreloc_list (abfd, r_symndx, isym);
14058 	      if (head == NULL)
14059 		return FALSE;
14060 	    }
14061 
14062 	  p = *head;
14063 	  if (p == NULL || p->sec != sec)
14064 	    {
14065 	      bfd_size_type amt = sizeof *p;
14066 
14067 	      p = (struct elf_dyn_relocs *) bfd_alloc (htab->root.dynobj, amt);
14068 	      if (p == NULL)
14069 		return FALSE;
14070 	      p->next = *head;
14071 	      *head = p;
14072 	      p->sec = sec;
14073 	      p->count = 0;
14074 	      p->pc_count = 0;
14075 	    }
14076 
14077 	  if (elf32_arm_howto_from_type (r_type)->pc_relative)
14078 	    p->pc_count += 1;
14079 	  p->count += 1;
14080 	}
14081     }
14082 
14083   return TRUE;
14084 }
14085 
14086 /* Unwinding tables are not referenced directly.  This pass marks them as
14087    required if the corresponding code section is marked.  */
14088 
14089 static bfd_boolean
14090 elf32_arm_gc_mark_extra_sections (struct bfd_link_info *info,
14091 				  elf_gc_mark_hook_fn gc_mark_hook)
14092 {
14093   bfd *sub;
14094   Elf_Internal_Shdr **elf_shdrp;
14095   bfd_boolean again;
14096 
14097   _bfd_elf_gc_mark_extra_sections (info, gc_mark_hook);
14098 
14099   /* Marking EH data may cause additional code sections to be marked,
14100      requiring multiple passes.  */
14101   again = TRUE;
14102   while (again)
14103     {
14104       again = FALSE;
14105       for (sub = info->input_bfds; sub != NULL; sub = sub->link.next)
14106 	{
14107 	  asection *o;
14108 
14109 	  if (! is_arm_elf (sub))
14110 	    continue;
14111 
14112 	  elf_shdrp = elf_elfsections (sub);
14113 	  for (o = sub->sections; o != NULL; o = o->next)
14114 	    {
14115 	      Elf_Internal_Shdr *hdr;
14116 
14117 	      hdr = &elf_section_data (o)->this_hdr;
14118 	      if (hdr->sh_type == SHT_ARM_EXIDX
14119 		  && hdr->sh_link
14120 		  && hdr->sh_link < elf_numsections (sub)
14121 		  && !o->gc_mark
14122 		  && elf_shdrp[hdr->sh_link]->bfd_section->gc_mark)
14123 		{
14124 		  again = TRUE;
14125 		  if (!_bfd_elf_gc_mark (info, o, gc_mark_hook))
14126 		    return FALSE;
14127 		}
14128 	    }
14129 	}
14130     }
14131 
14132   return TRUE;
14133 }
14134 
14135 /* Treat mapping symbols as special target symbols.  */
14136 
14137 static bfd_boolean
14138 elf32_arm_is_target_special_symbol (bfd * abfd ATTRIBUTE_UNUSED, asymbol * sym)
14139 {
14140   return bfd_is_arm_special_symbol_name (sym->name,
14141 					 BFD_ARM_SPECIAL_SYM_TYPE_ANY);
14142 }
14143 
14144 /* This is a copy of elf_find_function() from elf.c except that
14145    ARM mapping symbols are ignored when looking for function names
14146    and STT_ARM_TFUNC is considered to a function type.  */
14147 
14148 static bfd_boolean
14149 arm_elf_find_function (bfd *         abfd ATTRIBUTE_UNUSED,
14150 		       asymbol **    symbols,
14151 		       asection *    section,
14152 		       bfd_vma       offset,
14153 		       const char ** filename_ptr,
14154 		       const char ** functionname_ptr)
14155 {
14156   const char * filename = NULL;
14157   asymbol * func = NULL;
14158   bfd_vma low_func = 0;
14159   asymbol ** p;
14160 
14161   for (p = symbols; *p != NULL; p++)
14162     {
14163       elf_symbol_type *q;
14164 
14165       q = (elf_symbol_type *) *p;
14166 
14167       switch (ELF_ST_TYPE (q->internal_elf_sym.st_info))
14168 	{
14169 	default:
14170 	  break;
14171 	case STT_FILE:
14172 	  filename = bfd_asymbol_name (&q->symbol);
14173 	  break;
14174 	case STT_FUNC:
14175 	case STT_ARM_TFUNC:
14176 	case STT_NOTYPE:
14177 	  /* Skip mapping symbols.  */
14178 	  if ((q->symbol.flags & BSF_LOCAL)
14179 	      && bfd_is_arm_special_symbol_name (q->symbol.name,
14180 		    BFD_ARM_SPECIAL_SYM_TYPE_ANY))
14181 	    continue;
14182 	  /* Fall through.  */
14183 	  if (bfd_get_section (&q->symbol) == section
14184 	      && q->symbol.value >= low_func
14185 	      && q->symbol.value <= offset)
14186 	    {
14187 	      func = (asymbol *) q;
14188 	      low_func = q->symbol.value;
14189 	    }
14190 	  break;
14191 	}
14192     }
14193 
14194   if (func == NULL)
14195     return FALSE;
14196 
14197   if (filename_ptr)
14198     *filename_ptr = filename;
14199   if (functionname_ptr)
14200     *functionname_ptr = bfd_asymbol_name (func);
14201 
14202   return TRUE;
14203 }
14204 
14205 
14206 /* Find the nearest line to a particular section and offset, for error
14207    reporting.   This code is a duplicate of the code in elf.c, except
14208    that it uses arm_elf_find_function.  */
14209 
14210 static bfd_boolean
14211 elf32_arm_find_nearest_line (bfd *          abfd,
14212 			     asymbol **     symbols,
14213 			     asection *     section,
14214 			     bfd_vma        offset,
14215 			     const char **  filename_ptr,
14216 			     const char **  functionname_ptr,
14217 			     unsigned int * line_ptr,
14218 			     unsigned int * discriminator_ptr)
14219 {
14220   bfd_boolean found = FALSE;
14221 
14222   if (_bfd_dwarf2_find_nearest_line (abfd, symbols, NULL, section, offset,
14223 				     filename_ptr, functionname_ptr,
14224 				     line_ptr, discriminator_ptr,
14225 				     dwarf_debug_sections, 0,
14226 				     & elf_tdata (abfd)->dwarf2_find_line_info))
14227     {
14228       if (!*functionname_ptr)
14229 	arm_elf_find_function (abfd, symbols, section, offset,
14230 			       *filename_ptr ? NULL : filename_ptr,
14231 			       functionname_ptr);
14232 
14233       return TRUE;
14234     }
14235 
14236   /* Skip _bfd_dwarf1_find_nearest_line since no known ARM toolchain
14237      uses DWARF1.  */
14238 
14239   if (! _bfd_stab_section_find_nearest_line (abfd, symbols, section, offset,
14240 					     & found, filename_ptr,
14241 					     functionname_ptr, line_ptr,
14242 					     & elf_tdata (abfd)->line_info))
14243     return FALSE;
14244 
14245   if (found && (*functionname_ptr || *line_ptr))
14246     return TRUE;
14247 
14248   if (symbols == NULL)
14249     return FALSE;
14250 
14251   if (! arm_elf_find_function (abfd, symbols, section, offset,
14252 			       filename_ptr, functionname_ptr))
14253     return FALSE;
14254 
14255   *line_ptr = 0;
14256   return TRUE;
14257 }
14258 
14259 static bfd_boolean
14260 elf32_arm_find_inliner_info (bfd *          abfd,
14261 			     const char **  filename_ptr,
14262 			     const char **  functionname_ptr,
14263 			     unsigned int * line_ptr)
14264 {
14265   bfd_boolean found;
14266   found = _bfd_dwarf2_find_inliner_info (abfd, filename_ptr,
14267 					 functionname_ptr, line_ptr,
14268 					 & elf_tdata (abfd)->dwarf2_find_line_info);
14269   return found;
14270 }
14271 
14272 /* Adjust a symbol defined by a dynamic object and referenced by a
14273    regular object.  The current definition is in some section of the
14274    dynamic object, but we're not including those sections.  We have to
14275    change the definition to something the rest of the link can
14276    understand.  */
14277 
14278 static bfd_boolean
14279 elf32_arm_adjust_dynamic_symbol (struct bfd_link_info * info,
14280 				 struct elf_link_hash_entry * h)
14281 {
14282   bfd * dynobj;
14283   asection * s;
14284   struct elf32_arm_link_hash_entry * eh;
14285   struct elf32_arm_link_hash_table *globals;
14286 
14287   globals = elf32_arm_hash_table (info);
14288   if (globals == NULL)
14289     return FALSE;
14290 
14291   dynobj = elf_hash_table (info)->dynobj;
14292 
14293   /* Make sure we know what is going on here.  */
14294   BFD_ASSERT (dynobj != NULL
14295 	      && (h->needs_plt
14296 		  || h->type == STT_GNU_IFUNC
14297 		  || h->u.weakdef != NULL
14298 		  || (h->def_dynamic
14299 		      && h->ref_regular
14300 		      && !h->def_regular)));
14301 
14302   eh = (struct elf32_arm_link_hash_entry *) h;
14303 
14304   /* If this is a function, put it in the procedure linkage table.  We
14305      will fill in the contents of the procedure linkage table later,
14306      when we know the address of the .got section.  */
14307   if (h->type == STT_FUNC || h->type == STT_GNU_IFUNC || h->needs_plt)
14308     {
14309       /* Calls to STT_GNU_IFUNC symbols always use a PLT, even if the
14310 	 symbol binds locally.  */
14311       if (h->plt.refcount <= 0
14312 	  || (h->type != STT_GNU_IFUNC
14313 	      && (SYMBOL_CALLS_LOCAL (info, h)
14314 		  || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT
14315 		      && h->root.type == bfd_link_hash_undefweak))))
14316 	{
14317 	  /* This case can occur if we saw a PLT32 reloc in an input
14318 	     file, but the symbol was never referred to by a dynamic
14319 	     object, or if all references were garbage collected.  In
14320 	     such a case, we don't actually need to build a procedure
14321 	     linkage table, and we can just do a PC24 reloc instead.  */
14322 	  h->plt.offset = (bfd_vma) -1;
14323 	  eh->plt.thumb_refcount = 0;
14324 	  eh->plt.maybe_thumb_refcount = 0;
14325 	  eh->plt.noncall_refcount = 0;
14326 	  h->needs_plt = 0;
14327 	}
14328 
14329       return TRUE;
14330     }
14331   else
14332     {
14333       /* It's possible that we incorrectly decided a .plt reloc was
14334 	 needed for an R_ARM_PC24 or similar reloc to a non-function sym
14335 	 in check_relocs.  We can't decide accurately between function
14336 	 and non-function syms in check-relocs; Objects loaded later in
14337 	 the link may change h->type.  So fix it now.  */
14338       h->plt.offset = (bfd_vma) -1;
14339       eh->plt.thumb_refcount = 0;
14340       eh->plt.maybe_thumb_refcount = 0;
14341       eh->plt.noncall_refcount = 0;
14342     }
14343 
14344   /* If this is a weak symbol, and there is a real definition, the
14345      processor independent code will have arranged for us to see the
14346      real definition first, and we can just use the same value.  */
14347   if (h->u.weakdef != NULL)
14348     {
14349       BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined
14350 		  || h->u.weakdef->root.type == bfd_link_hash_defweak);
14351       h->root.u.def.section = h->u.weakdef->root.u.def.section;
14352       h->root.u.def.value = h->u.weakdef->root.u.def.value;
14353       return TRUE;
14354     }
14355 
14356   /* If there are no non-GOT references, we do not need a copy
14357      relocation.  */
14358   if (!h->non_got_ref)
14359     return TRUE;
14360 
14361   /* This is a reference to a symbol defined by a dynamic object which
14362      is not a function.  */
14363 
14364   /* If we are creating a shared library, we must presume that the
14365      only references to the symbol are via the global offset table.
14366      For such cases we need not do anything here; the relocations will
14367      be handled correctly by relocate_section.  Relocatable executables
14368      can reference data in shared objects directly, so we don't need to
14369      do anything here.  */
14370   if (bfd_link_pic (info) || globals->root.is_relocatable_executable)
14371     return TRUE;
14372 
14373   /* We must allocate the symbol in our .dynbss section, which will
14374      become part of the .bss section of the executable.  There will be
14375      an entry for this symbol in the .dynsym section.  The dynamic
14376      object will contain position independent code, so all references
14377      from the dynamic object to this symbol will go through the global
14378      offset table.  The dynamic linker will use the .dynsym entry to
14379      determine the address it must put in the global offset table, so
14380      both the dynamic object and the regular object will refer to the
14381      same memory location for the variable.  */
14382   s = bfd_get_linker_section (dynobj, ".dynbss");
14383   BFD_ASSERT (s != NULL);
14384 
14385   /* If allowed, we must generate a R_ARM_COPY reloc to tell the dynamic
14386      linker to copy the initial value out of the dynamic object and into
14387      the runtime process image.  We need to remember the offset into the
14388      .rel(a).bss section we are going to use.  */
14389   if (info->nocopyreloc == 0
14390       && (h->root.u.def.section->flags & SEC_ALLOC) != 0
14391       && h->size != 0)
14392     {
14393       asection *srel;
14394 
14395       srel = bfd_get_linker_section (dynobj, RELOC_SECTION (globals, ".bss"));
14396       elf32_arm_allocate_dynrelocs (info, srel, 1);
14397       h->needs_copy = 1;
14398     }
14399 
14400   return _bfd_elf_adjust_dynamic_copy (info, h, s);
14401 }
14402 
14403 /* Allocate space in .plt, .got and associated reloc sections for
14404    dynamic relocs.  */
14405 
14406 static bfd_boolean
14407 allocate_dynrelocs_for_symbol (struct elf_link_hash_entry *h, void * inf)
14408 {
14409   struct bfd_link_info *info;
14410   struct elf32_arm_link_hash_table *htab;
14411   struct elf32_arm_link_hash_entry *eh;
14412   struct elf_dyn_relocs *p;
14413 
14414   if (h->root.type == bfd_link_hash_indirect)
14415     return TRUE;
14416 
14417   eh = (struct elf32_arm_link_hash_entry *) h;
14418 
14419   info = (struct bfd_link_info *) inf;
14420   htab = elf32_arm_hash_table (info);
14421   if (htab == NULL)
14422     return FALSE;
14423 
14424   if ((htab->root.dynamic_sections_created || h->type == STT_GNU_IFUNC)
14425       && h->plt.refcount > 0)
14426     {
14427       /* Make sure this symbol is output as a dynamic symbol.
14428 	 Undefined weak syms won't yet be marked as dynamic.  */
14429       if (h->dynindx == -1
14430 	  && !h->forced_local)
14431 	{
14432 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
14433 	    return FALSE;
14434 	}
14435 
14436       /* If the call in the PLT entry binds locally, the associated
14437 	 GOT entry should use an R_ARM_IRELATIVE relocation instead of
14438 	 the usual R_ARM_JUMP_SLOT.  Put it in the .iplt section rather
14439 	 than the .plt section.  */
14440       if (h->type == STT_GNU_IFUNC && SYMBOL_CALLS_LOCAL (info, h))
14441 	{
14442 	  eh->is_iplt = 1;
14443 	  if (eh->plt.noncall_refcount == 0
14444 	      && SYMBOL_REFERENCES_LOCAL (info, h))
14445 	    /* All non-call references can be resolved directly.
14446 	       This means that they can (and in some cases, must)
14447 	       resolve directly to the run-time target, rather than
14448 	       to the PLT.  That in turns means that any .got entry
14449 	       would be equal to the .igot.plt entry, so there's
14450 	       no point having both.  */
14451 	    h->got.refcount = 0;
14452 	}
14453 
14454       if (bfd_link_pic (info)
14455 	  || eh->is_iplt
14456 	  || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h))
14457 	{
14458 	  elf32_arm_allocate_plt_entry (info, eh->is_iplt, &h->plt, &eh->plt);
14459 
14460 	  /* If this symbol is not defined in a regular file, and we are
14461 	     not generating a shared library, then set the symbol to this
14462 	     location in the .plt.  This is required to make function
14463 	     pointers compare as equal between the normal executable and
14464 	     the shared library.  */
14465 	  if (! bfd_link_pic (info)
14466 	      && !h->def_regular)
14467 	    {
14468 	      h->root.u.def.section = htab->root.splt;
14469 	      h->root.u.def.value = h->plt.offset;
14470 
14471 	      /* Make sure the function is not marked as Thumb, in case
14472 		 it is the target of an ABS32 relocation, which will
14473 		 point to the PLT entry.  */
14474 	      ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14475 	    }
14476 
14477 	  /* VxWorks executables have a second set of relocations for
14478 	     each PLT entry.  They go in a separate relocation section,
14479 	     which is processed by the kernel loader.  */
14480 	  if (htab->vxworks_p && !bfd_link_pic (info))
14481 	    {
14482 	      /* There is a relocation for the initial PLT entry:
14483 		 an R_ARM_32 relocation for _GLOBAL_OFFSET_TABLE_.  */
14484 	      if (h->plt.offset == htab->plt_header_size)
14485 		elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 1);
14486 
14487 	      /* There are two extra relocations for each subsequent
14488 		 PLT entry: an R_ARM_32 relocation for the GOT entry,
14489 		 and an R_ARM_32 relocation for the PLT entry.  */
14490 	      elf32_arm_allocate_dynrelocs (info, htab->srelplt2, 2);
14491 	    }
14492 	}
14493       else
14494 	{
14495 	  h->plt.offset = (bfd_vma) -1;
14496 	  h->needs_plt = 0;
14497 	}
14498     }
14499   else
14500     {
14501       h->plt.offset = (bfd_vma) -1;
14502       h->needs_plt = 0;
14503     }
14504 
14505   eh = (struct elf32_arm_link_hash_entry *) h;
14506   eh->tlsdesc_got = (bfd_vma) -1;
14507 
14508   if (h->got.refcount > 0)
14509     {
14510       asection *s;
14511       bfd_boolean dyn;
14512       int tls_type = elf32_arm_hash_entry (h)->tls_type;
14513       int indx;
14514 
14515       /* Make sure this symbol is output as a dynamic symbol.
14516 	 Undefined weak syms won't yet be marked as dynamic.  */
14517       if (h->dynindx == -1
14518 	  && !h->forced_local)
14519 	{
14520 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
14521 	    return FALSE;
14522 	}
14523 
14524       if (!htab->symbian_p)
14525 	{
14526 	  s = htab->root.sgot;
14527 	  h->got.offset = s->size;
14528 
14529 	  if (tls_type == GOT_UNKNOWN)
14530 	    abort ();
14531 
14532 	  if (tls_type == GOT_NORMAL)
14533 	    /* Non-TLS symbols need one GOT slot.  */
14534 	    s->size += 4;
14535 	  else
14536 	    {
14537 	      if (tls_type & GOT_TLS_GDESC)
14538 		{
14539 		  /* R_ARM_TLS_DESC needs 2 GOT slots.  */
14540 		  eh->tlsdesc_got
14541 		    = (htab->root.sgotplt->size
14542 		       - elf32_arm_compute_jump_table_size (htab));
14543 		  htab->root.sgotplt->size += 8;
14544 		  h->got.offset = (bfd_vma) -2;
14545 		  /* plt.got_offset needs to know there's a TLS_DESC
14546 		     reloc in the middle of .got.plt.  */
14547 		  htab->num_tls_desc++;
14548 		}
14549 
14550 	      if (tls_type & GOT_TLS_GD)
14551 		{
14552 		  /* R_ARM_TLS_GD32 needs 2 consecutive GOT slots.  If
14553 		     the symbol is both GD and GDESC, got.offset may
14554 		     have been overwritten.  */
14555 		  h->got.offset = s->size;
14556 		  s->size += 8;
14557 		}
14558 
14559 	      if (tls_type & GOT_TLS_IE)
14560 		/* R_ARM_TLS_IE32 needs one GOT slot.  */
14561 		s->size += 4;
14562 	    }
14563 
14564 	  dyn = htab->root.dynamic_sections_created;
14565 
14566 	  indx = 0;
14567 	  if (WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn,
14568 					       bfd_link_pic (info),
14569 					       h)
14570 	      && (!bfd_link_pic (info)
14571 		  || !SYMBOL_REFERENCES_LOCAL (info, h)))
14572 	    indx = h->dynindx;
14573 
14574 	  if (tls_type != GOT_NORMAL
14575 	      && (bfd_link_pic (info) || indx != 0)
14576 	      && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14577 		  || h->root.type != bfd_link_hash_undefweak))
14578 	    {
14579 	      if (tls_type & GOT_TLS_IE)
14580 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14581 
14582 	      if (tls_type & GOT_TLS_GD)
14583 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14584 
14585 	      if (tls_type & GOT_TLS_GDESC)
14586 		{
14587 		  elf32_arm_allocate_dynrelocs (info, htab->root.srelplt, 1);
14588 		  /* GDESC needs a trampoline to jump to.  */
14589 		  htab->tls_trampoline = -1;
14590 		}
14591 
14592 	      /* Only GD needs it.  GDESC just emits one relocation per
14593 		 2 entries.  */
14594 	      if ((tls_type & GOT_TLS_GD) && indx != 0)
14595 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14596 	    }
14597 	  else if (indx != -1 && !SYMBOL_REFERENCES_LOCAL (info, h))
14598 	    {
14599 	      if (htab->root.dynamic_sections_created)
14600 		/* Reserve room for the GOT entry's R_ARM_GLOB_DAT relocation.  */
14601 		elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14602 	    }
14603 	  else if (h->type == STT_GNU_IFUNC
14604 		   && eh->plt.noncall_refcount == 0)
14605 	    /* No non-call references resolve the STT_GNU_IFUNC's PLT entry;
14606 	       they all resolve dynamically instead.  Reserve room for the
14607 	       GOT entry's R_ARM_IRELATIVE relocation.  */
14608 	    elf32_arm_allocate_irelocs (info, htab->root.srelgot, 1);
14609 	  else if (bfd_link_pic (info)
14610 		   && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
14611 		       || h->root.type != bfd_link_hash_undefweak))
14612 	    /* Reserve room for the GOT entry's R_ARM_RELATIVE relocation.  */
14613 	    elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
14614 	}
14615     }
14616   else
14617     h->got.offset = (bfd_vma) -1;
14618 
14619   /* Allocate stubs for exported Thumb functions on v4t.  */
14620   if (!htab->use_blx && h->dynindx != -1
14621       && h->def_regular
14622       && ARM_GET_SYM_BRANCH_TYPE (h->target_internal) == ST_BRANCH_TO_THUMB
14623       && ELF_ST_VISIBILITY (h->other) == STV_DEFAULT)
14624     {
14625       struct elf_link_hash_entry * th;
14626       struct bfd_link_hash_entry * bh;
14627       struct elf_link_hash_entry * myh;
14628       char name[1024];
14629       asection *s;
14630       bh = NULL;
14631       /* Create a new symbol to regist the real location of the function.  */
14632       s = h->root.u.def.section;
14633       sprintf (name, "__real_%s", h->root.root.string);
14634       _bfd_generic_link_add_one_symbol (info, s->owner,
14635 					name, BSF_GLOBAL, s,
14636 					h->root.u.def.value,
14637 					NULL, TRUE, FALSE, &bh);
14638 
14639       myh = (struct elf_link_hash_entry *) bh;
14640       myh->type = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
14641       myh->forced_local = 1;
14642       ARM_SET_SYM_BRANCH_TYPE (myh->target_internal, ST_BRANCH_TO_THUMB);
14643       eh->export_glue = myh;
14644       th = record_arm_to_thumb_glue (info, h);
14645       /* Point the symbol at the stub.  */
14646       h->type = ELF_ST_INFO (ELF_ST_BIND (h->type), STT_FUNC);
14647       ARM_SET_SYM_BRANCH_TYPE (h->target_internal, ST_BRANCH_TO_ARM);
14648       h->root.u.def.section = th->root.u.def.section;
14649       h->root.u.def.value = th->root.u.def.value & ~1;
14650     }
14651 
14652   if (eh->dyn_relocs == NULL)
14653     return TRUE;
14654 
14655   /* In the shared -Bsymbolic case, discard space allocated for
14656      dynamic pc-relative relocs against symbols which turn out to be
14657      defined in regular objects.  For the normal shared case, discard
14658      space for pc-relative relocs that have become local due to symbol
14659      visibility changes.  */
14660 
14661   if (bfd_link_pic (info) || htab->root.is_relocatable_executable)
14662     {
14663       /* Relocs that use pc_count are PC-relative forms, which will appear
14664 	 on something like ".long foo - ." or "movw REG, foo - .".  We want
14665 	 calls to protected symbols to resolve directly to the function
14666 	 rather than going via the plt.  If people want function pointer
14667 	 comparisons to work as expected then they should avoid writing
14668 	 assembly like ".long foo - .".  */
14669       if (SYMBOL_CALLS_LOCAL (info, h))
14670 	{
14671 	  struct elf_dyn_relocs **pp;
14672 
14673 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14674 	    {
14675 	      p->count -= p->pc_count;
14676 	      p->pc_count = 0;
14677 	      if (p->count == 0)
14678 		*pp = p->next;
14679 	      else
14680 		pp = &p->next;
14681 	    }
14682 	}
14683 
14684       if (htab->vxworks_p)
14685 	{
14686 	  struct elf_dyn_relocs **pp;
14687 
14688 	  for (pp = &eh->dyn_relocs; (p = *pp) != NULL; )
14689 	    {
14690 	      if (strcmp (p->sec->output_section->name, ".tls_vars") == 0)
14691 		*pp = p->next;
14692 	      else
14693 		pp = &p->next;
14694 	    }
14695 	}
14696 
14697       /* Also discard relocs on undefined weak syms with non-default
14698 	 visibility.  */
14699       if (eh->dyn_relocs != NULL
14700 	  && h->root.type == bfd_link_hash_undefweak)
14701 	{
14702 	  if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT)
14703 	    eh->dyn_relocs = NULL;
14704 
14705 	  /* Make sure undefined weak symbols are output as a dynamic
14706 	     symbol in PIEs.  */
14707 	  else if (h->dynindx == -1
14708 		   && !h->forced_local)
14709 	    {
14710 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
14711 		return FALSE;
14712 	    }
14713 	}
14714 
14715       else if (htab->root.is_relocatable_executable && h->dynindx == -1
14716 	       && h->root.type == bfd_link_hash_new)
14717 	{
14718 	  /* Output absolute symbols so that we can create relocations
14719 	     against them.  For normal symbols we output a relocation
14720 	     against the section that contains them.  */
14721 	  if (! bfd_elf_link_record_dynamic_symbol (info, h))
14722 	    return FALSE;
14723 	}
14724 
14725     }
14726   else
14727     {
14728       /* For the non-shared case, discard space for relocs against
14729 	 symbols which turn out to need copy relocs or are not
14730 	 dynamic.  */
14731 
14732       if (!h->non_got_ref
14733 	  && ((h->def_dynamic
14734 	       && !h->def_regular)
14735 	      || (htab->root.dynamic_sections_created
14736 		  && (h->root.type == bfd_link_hash_undefweak
14737 		      || h->root.type == bfd_link_hash_undefined))))
14738 	{
14739 	  /* Make sure this symbol is output as a dynamic symbol.
14740 	     Undefined weak syms won't yet be marked as dynamic.  */
14741 	  if (h->dynindx == -1
14742 	      && !h->forced_local)
14743 	    {
14744 	      if (! bfd_elf_link_record_dynamic_symbol (info, h))
14745 		return FALSE;
14746 	    }
14747 
14748 	  /* If that succeeded, we know we'll be keeping all the
14749 	     relocs.  */
14750 	  if (h->dynindx != -1)
14751 	    goto keep;
14752 	}
14753 
14754       eh->dyn_relocs = NULL;
14755 
14756     keep: ;
14757     }
14758 
14759   /* Finally, allocate space.  */
14760   for (p = eh->dyn_relocs; p != NULL; p = p->next)
14761     {
14762       asection *sreloc = elf_section_data (p->sec)->sreloc;
14763       if (h->type == STT_GNU_IFUNC
14764 	  && eh->plt.noncall_refcount == 0
14765 	  && SYMBOL_REFERENCES_LOCAL (info, h))
14766 	elf32_arm_allocate_irelocs (info, sreloc, p->count);
14767       else
14768 	elf32_arm_allocate_dynrelocs (info, sreloc, p->count);
14769     }
14770 
14771   return TRUE;
14772 }
14773 
14774 /* Find any dynamic relocs that apply to read-only sections.  */
14775 
14776 static bfd_boolean
14777 elf32_arm_readonly_dynrelocs (struct elf_link_hash_entry * h, void * inf)
14778 {
14779   struct elf32_arm_link_hash_entry * eh;
14780   struct elf_dyn_relocs * p;
14781 
14782   eh = (struct elf32_arm_link_hash_entry *) h;
14783   for (p = eh->dyn_relocs; p != NULL; p = p->next)
14784     {
14785       asection *s = p->sec;
14786 
14787       if (s != NULL && (s->flags & SEC_READONLY) != 0)
14788 	{
14789 	  struct bfd_link_info *info = (struct bfd_link_info *) inf;
14790 
14791           if (info->warn_shared_textrel)
14792             (*_bfd_error_handler)
14793               (_("warning: dynamic relocation to `%s' in readonly section `%s'"),
14794               h->root.root.string, s->name);
14795 	  info->flags |= DF_TEXTREL;
14796 
14797 	  /* Not an error, just cut short the traversal.  */
14798 	  return FALSE;
14799 	}
14800     }
14801   return TRUE;
14802 }
14803 
14804 void
14805 bfd_elf32_arm_set_byteswap_code (struct bfd_link_info *info,
14806 				 int byteswap_code)
14807 {
14808   struct elf32_arm_link_hash_table *globals;
14809 
14810   globals = elf32_arm_hash_table (info);
14811   if (globals == NULL)
14812     return;
14813 
14814   globals->byteswap_code = byteswap_code;
14815 }
14816 
14817 /* Set the sizes of the dynamic sections.  */
14818 
14819 static bfd_boolean
14820 elf32_arm_size_dynamic_sections (bfd * output_bfd ATTRIBUTE_UNUSED,
14821 				 struct bfd_link_info * info)
14822 {
14823   bfd * dynobj;
14824   asection * s;
14825   bfd_boolean plt;
14826   bfd_boolean relocs;
14827   bfd *ibfd;
14828   struct elf32_arm_link_hash_table *htab;
14829 
14830   htab = elf32_arm_hash_table (info);
14831   if (htab == NULL)
14832     return FALSE;
14833 
14834   dynobj = elf_hash_table (info)->dynobj;
14835   BFD_ASSERT (dynobj != NULL);
14836   check_use_blx (htab);
14837 
14838   if (elf_hash_table (info)->dynamic_sections_created)
14839     {
14840       /* Set the contents of the .interp section to the interpreter.  */
14841       if (bfd_link_executable (info) && !info->nointerp)
14842 	{
14843 	  s = bfd_get_linker_section (dynobj, ".interp");
14844 	  BFD_ASSERT (s != NULL);
14845 	  s->size = sizeof ELF_DYNAMIC_INTERPRETER;
14846 	  s->contents = (unsigned char *) ELF_DYNAMIC_INTERPRETER;
14847 	}
14848     }
14849 
14850   /* Set up .got offsets for local syms, and space for local dynamic
14851      relocs.  */
14852   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
14853     {
14854       bfd_signed_vma *local_got;
14855       bfd_signed_vma *end_local_got;
14856       struct arm_local_iplt_info **local_iplt_ptr, *local_iplt;
14857       char *local_tls_type;
14858       bfd_vma *local_tlsdesc_gotent;
14859       bfd_size_type locsymcount;
14860       Elf_Internal_Shdr *symtab_hdr;
14861       asection *srel;
14862       bfd_boolean is_vxworks = htab->vxworks_p;
14863       unsigned int symndx;
14864 
14865       if (! is_arm_elf (ibfd))
14866 	continue;
14867 
14868       for (s = ibfd->sections; s != NULL; s = s->next)
14869 	{
14870 	  struct elf_dyn_relocs *p;
14871 
14872 	  for (p = (struct elf_dyn_relocs *)
14873 		   elf_section_data (s)->local_dynrel; p != NULL; p = p->next)
14874 	    {
14875 	      if (!bfd_is_abs_section (p->sec)
14876 		  && bfd_is_abs_section (p->sec->output_section))
14877 		{
14878 		  /* Input section has been discarded, either because
14879 		     it is a copy of a linkonce section or due to
14880 		     linker script /DISCARD/, so we'll be discarding
14881 		     the relocs too.  */
14882 		}
14883 	      else if (is_vxworks
14884 		       && strcmp (p->sec->output_section->name,
14885 				  ".tls_vars") == 0)
14886 		{
14887 		  /* Relocations in vxworks .tls_vars sections are
14888 		     handled specially by the loader.  */
14889 		}
14890 	      else if (p->count != 0)
14891 		{
14892 		  srel = elf_section_data (p->sec)->sreloc;
14893 		  elf32_arm_allocate_dynrelocs (info, srel, p->count);
14894 		  if ((p->sec->output_section->flags & SEC_READONLY) != 0)
14895 		    info->flags |= DF_TEXTREL;
14896 		}
14897 	    }
14898 	}
14899 
14900       local_got = elf_local_got_refcounts (ibfd);
14901       if (!local_got)
14902 	continue;
14903 
14904       symtab_hdr = & elf_symtab_hdr (ibfd);
14905       locsymcount = symtab_hdr->sh_info;
14906       end_local_got = local_got + locsymcount;
14907       local_iplt_ptr = elf32_arm_local_iplt (ibfd);
14908       local_tls_type = elf32_arm_local_got_tls_type (ibfd);
14909       local_tlsdesc_gotent = elf32_arm_local_tlsdesc_gotent (ibfd);
14910       symndx = 0;
14911       s = htab->root.sgot;
14912       srel = htab->root.srelgot;
14913       for (; local_got < end_local_got;
14914 	   ++local_got, ++local_iplt_ptr, ++local_tls_type,
14915 	   ++local_tlsdesc_gotent, ++symndx)
14916 	{
14917 	  *local_tlsdesc_gotent = (bfd_vma) -1;
14918 	  local_iplt = *local_iplt_ptr;
14919 	  if (local_iplt != NULL)
14920 	    {
14921 	      struct elf_dyn_relocs *p;
14922 
14923 	      if (local_iplt->root.refcount > 0)
14924 		{
14925 		  elf32_arm_allocate_plt_entry (info, TRUE,
14926 						&local_iplt->root,
14927 						&local_iplt->arm);
14928 		  if (local_iplt->arm.noncall_refcount == 0)
14929 		    /* All references to the PLT are calls, so all
14930 		       non-call references can resolve directly to the
14931 		       run-time target.  This means that the .got entry
14932 		       would be the same as the .igot.plt entry, so there's
14933 		       no point creating both.  */
14934 		    *local_got = 0;
14935 		}
14936 	      else
14937 		{
14938 		  BFD_ASSERT (local_iplt->arm.noncall_refcount == 0);
14939 		  local_iplt->root.offset = (bfd_vma) -1;
14940 		}
14941 
14942 	      for (p = local_iplt->dyn_relocs; p != NULL; p = p->next)
14943 		{
14944 		  asection *psrel;
14945 
14946 		  psrel = elf_section_data (p->sec)->sreloc;
14947 		  if (local_iplt->arm.noncall_refcount == 0)
14948 		    elf32_arm_allocate_irelocs (info, psrel, p->count);
14949 		  else
14950 		    elf32_arm_allocate_dynrelocs (info, psrel, p->count);
14951 		}
14952 	    }
14953 	  if (*local_got > 0)
14954 	    {
14955 	      Elf_Internal_Sym *isym;
14956 
14957 	      *local_got = s->size;
14958 	      if (*local_tls_type & GOT_TLS_GD)
14959 		/* TLS_GD relocs need an 8-byte structure in the GOT.  */
14960 		s->size += 8;
14961 	      if (*local_tls_type & GOT_TLS_GDESC)
14962 		{
14963 		  *local_tlsdesc_gotent = htab->root.sgotplt->size
14964 		    - elf32_arm_compute_jump_table_size (htab);
14965 		  htab->root.sgotplt->size += 8;
14966 		  *local_got = (bfd_vma) -2;
14967 		  /* plt.got_offset needs to know there's a TLS_DESC
14968 		     reloc in the middle of .got.plt.  */
14969 		  htab->num_tls_desc++;
14970 		}
14971 	      if (*local_tls_type & GOT_TLS_IE)
14972 		s->size += 4;
14973 
14974 	      if (*local_tls_type & GOT_NORMAL)
14975 		{
14976 		  /* If the symbol is both GD and GDESC, *local_got
14977 		     may have been overwritten.  */
14978 		  *local_got = s->size;
14979 		  s->size += 4;
14980 		}
14981 
14982 	      isym = bfd_sym_from_r_symndx (&htab->sym_cache, ibfd, symndx);
14983 	      if (isym == NULL)
14984 		return FALSE;
14985 
14986 	      /* If all references to an STT_GNU_IFUNC PLT are calls,
14987 		 then all non-call references, including this GOT entry,
14988 		 resolve directly to the run-time target.  */
14989 	      if (ELF32_ST_TYPE (isym->st_info) == STT_GNU_IFUNC
14990 		  && (local_iplt == NULL
14991 		      || local_iplt->arm.noncall_refcount == 0))
14992 		elf32_arm_allocate_irelocs (info, srel, 1);
14993 	      else if (bfd_link_pic (info) || output_bfd->flags & DYNAMIC)
14994 		{
14995 		  if ((bfd_link_pic (info) && !(*local_tls_type & GOT_TLS_GDESC))
14996 		      || *local_tls_type & GOT_TLS_GD)
14997 		    elf32_arm_allocate_dynrelocs (info, srel, 1);
14998 
14999 		  if (bfd_link_pic (info) && *local_tls_type & GOT_TLS_GDESC)
15000 		    {
15001 		      elf32_arm_allocate_dynrelocs (info,
15002 						    htab->root.srelplt, 1);
15003 		      htab->tls_trampoline = -1;
15004 		    }
15005 		}
15006 	    }
15007 	  else
15008 	    *local_got = (bfd_vma) -1;
15009 	}
15010     }
15011 
15012   if (htab->tls_ldm_got.refcount > 0)
15013     {
15014       /* Allocate two GOT entries and one dynamic relocation (if necessary)
15015 	 for R_ARM_TLS_LDM32 relocations.  */
15016       htab->tls_ldm_got.offset = htab->root.sgot->size;
15017       htab->root.sgot->size += 8;
15018       if (bfd_link_pic (info))
15019 	elf32_arm_allocate_dynrelocs (info, htab->root.srelgot, 1);
15020     }
15021   else
15022     htab->tls_ldm_got.offset = -1;
15023 
15024   /* Allocate global sym .plt and .got entries, and space for global
15025      sym dynamic relocs.  */
15026   elf_link_hash_traverse (& htab->root, allocate_dynrelocs_for_symbol, info);
15027 
15028   /* Here we rummage through the found bfds to collect glue information.  */
15029   for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next)
15030     {
15031       if (! is_arm_elf (ibfd))
15032 	continue;
15033 
15034       /* Initialise mapping tables for code/data.  */
15035       bfd_elf32_arm_init_maps (ibfd);
15036 
15037       if (!bfd_elf32_arm_process_before_allocation (ibfd, info)
15038 	  || !bfd_elf32_arm_vfp11_erratum_scan (ibfd, info)
15039 	  || !bfd_elf32_arm_stm32l4xx_erratum_scan (ibfd, info))
15040 	/* xgettext:c-format */
15041 	_bfd_error_handler (_("Errors encountered processing file %s"),
15042 			    ibfd->filename);
15043     }
15044 
15045   /* Allocate space for the glue sections now that we've sized them.  */
15046   bfd_elf32_arm_allocate_interworking_sections (info);
15047 
15048   /* For every jump slot reserved in the sgotplt, reloc_count is
15049      incremented.  However, when we reserve space for TLS descriptors,
15050      it's not incremented, so in order to compute the space reserved
15051      for them, it suffices to multiply the reloc count by the jump
15052      slot size.  */
15053   if (htab->root.srelplt)
15054     htab->sgotplt_jump_table_size = elf32_arm_compute_jump_table_size(htab);
15055 
15056   if (htab->tls_trampoline)
15057     {
15058       if (htab->root.splt->size == 0)
15059 	htab->root.splt->size += htab->plt_header_size;
15060 
15061       htab->tls_trampoline = htab->root.splt->size;
15062       htab->root.splt->size += htab->plt_entry_size;
15063 
15064       /* If we're not using lazy TLS relocations, don't generate the
15065 	 PLT and GOT entries they require.  */
15066       if (!(info->flags & DF_BIND_NOW))
15067 	{
15068 	  htab->dt_tlsdesc_got = htab->root.sgot->size;
15069 	  htab->root.sgot->size += 4;
15070 
15071 	  htab->dt_tlsdesc_plt = htab->root.splt->size;
15072 	  htab->root.splt->size += 4 * ARRAY_SIZE (dl_tlsdesc_lazy_trampoline);
15073 	}
15074     }
15075 
15076   /* The check_relocs and adjust_dynamic_symbol entry points have
15077      determined the sizes of the various dynamic sections.  Allocate
15078      memory for them.  */
15079   plt = FALSE;
15080   relocs = FALSE;
15081   for (s = dynobj->sections; s != NULL; s = s->next)
15082     {
15083       const char * name;
15084 
15085       if ((s->flags & SEC_LINKER_CREATED) == 0)
15086 	continue;
15087 
15088       /* It's OK to base decisions on the section name, because none
15089 	 of the dynobj section names depend upon the input files.  */
15090       name = bfd_get_section_name (dynobj, s);
15091 
15092       if (s == htab->root.splt)
15093 	{
15094 	  /* Remember whether there is a PLT.  */
15095 	  plt = s->size != 0;
15096 	}
15097       else if (CONST_STRNEQ (name, ".rel"))
15098 	{
15099 	  if (s->size != 0)
15100 	    {
15101 	      /* Remember whether there are any reloc sections other
15102 		 than .rel(a).plt and .rela.plt.unloaded.  */
15103 	      if (s != htab->root.srelplt && s != htab->srelplt2)
15104 		relocs = TRUE;
15105 
15106 	      /* We use the reloc_count field as a counter if we need
15107 		 to copy relocs into the output file.  */
15108 	      s->reloc_count = 0;
15109 	    }
15110 	}
15111       else if (s != htab->root.sgot
15112 	       && s != htab->root.sgotplt
15113 	       && s != htab->root.iplt
15114 	       && s != htab->root.igotplt
15115 	       && s != htab->sdynbss)
15116 	{
15117 	  /* It's not one of our sections, so don't allocate space.  */
15118 	  continue;
15119 	}
15120 
15121       if (s->size == 0)
15122 	{
15123 	  /* If we don't need this section, strip it from the
15124 	     output file.  This is mostly to handle .rel(a).bss and
15125 	     .rel(a).plt.  We must create both sections in
15126 	     create_dynamic_sections, because they must be created
15127 	     before the linker maps input sections to output
15128 	     sections.  The linker does that before
15129 	     adjust_dynamic_symbol is called, and it is that
15130 	     function which decides whether anything needs to go
15131 	     into these sections.  */
15132 	  s->flags |= SEC_EXCLUDE;
15133 	  continue;
15134 	}
15135 
15136       if ((s->flags & SEC_HAS_CONTENTS) == 0)
15137 	continue;
15138 
15139       /* Allocate memory for the section contents.  */
15140       s->contents = (unsigned char *) bfd_zalloc (dynobj, s->size);
15141       if (s->contents == NULL)
15142 	return FALSE;
15143     }
15144 
15145   if (elf_hash_table (info)->dynamic_sections_created)
15146     {
15147       /* Add some entries to the .dynamic section.  We fill in the
15148 	 values later, in elf32_arm_finish_dynamic_sections, but we
15149 	 must add the entries now so that we get the correct size for
15150 	 the .dynamic section.  The DT_DEBUG entry is filled in by the
15151 	 dynamic linker and used by the debugger.  */
15152 #define add_dynamic_entry(TAG, VAL) \
15153   _bfd_elf_add_dynamic_entry (info, TAG, VAL)
15154 
15155      if (bfd_link_executable (info))
15156 	{
15157 	  if (!add_dynamic_entry (DT_DEBUG, 0))
15158 	    return FALSE;
15159 	}
15160 
15161       if (plt)
15162 	{
15163 	  if (   !add_dynamic_entry (DT_PLTGOT, 0)
15164 	      || !add_dynamic_entry (DT_PLTRELSZ, 0)
15165 	      || !add_dynamic_entry (DT_PLTREL,
15166 				     htab->use_rel ? DT_REL : DT_RELA)
15167 	      || !add_dynamic_entry (DT_JMPREL, 0))
15168 	    return FALSE;
15169 
15170 	  if (htab->dt_tlsdesc_plt &&
15171 		(!add_dynamic_entry (DT_TLSDESC_PLT,0)
15172 		 || !add_dynamic_entry (DT_TLSDESC_GOT,0)))
15173 	    return FALSE;
15174 	}
15175 
15176       if (relocs)
15177 	{
15178 	  if (htab->use_rel)
15179 	    {
15180 	      if (!add_dynamic_entry (DT_REL, 0)
15181 		  || !add_dynamic_entry (DT_RELSZ, 0)
15182 		  || !add_dynamic_entry (DT_RELENT, RELOC_SIZE (htab)))
15183 		return FALSE;
15184 	    }
15185 	  else
15186 	    {
15187 	      if (!add_dynamic_entry (DT_RELA, 0)
15188 		  || !add_dynamic_entry (DT_RELASZ, 0)
15189 		  || !add_dynamic_entry (DT_RELAENT, RELOC_SIZE (htab)))
15190 		return FALSE;
15191 	    }
15192 	}
15193 
15194       /* If any dynamic relocs apply to a read-only section,
15195 	 then we need a DT_TEXTREL entry.  */
15196       if ((info->flags & DF_TEXTREL) == 0)
15197 	elf_link_hash_traverse (& htab->root, elf32_arm_readonly_dynrelocs,
15198 				info);
15199 
15200       if ((info->flags & DF_TEXTREL) != 0)
15201 	{
15202 	  if (!add_dynamic_entry (DT_TEXTREL, 0))
15203 	    return FALSE;
15204 	}
15205       if (htab->vxworks_p
15206 	  && !elf_vxworks_add_dynamic_entries (output_bfd, info))
15207 	return FALSE;
15208     }
15209 #undef add_dynamic_entry
15210 
15211   return TRUE;
15212 }
15213 
15214 /* Size sections even though they're not dynamic.  We use it to setup
15215    _TLS_MODULE_BASE_, if needed.  */
15216 
15217 static bfd_boolean
15218 elf32_arm_always_size_sections (bfd *output_bfd,
15219 				struct bfd_link_info *info)
15220 {
15221   asection *tls_sec;
15222 
15223   if (bfd_link_relocatable (info))
15224     return TRUE;
15225 
15226   tls_sec = elf_hash_table (info)->tls_sec;
15227 
15228   if (tls_sec)
15229     {
15230       struct elf_link_hash_entry *tlsbase;
15231 
15232       tlsbase = elf_link_hash_lookup
15233 	(elf_hash_table (info), "_TLS_MODULE_BASE_", TRUE, TRUE, FALSE);
15234 
15235       if (tlsbase)
15236 	{
15237 	  struct bfd_link_hash_entry *bh = NULL;
15238 	  const struct elf_backend_data *bed
15239 	    = get_elf_backend_data (output_bfd);
15240 
15241 	  if (!(_bfd_generic_link_add_one_symbol
15242 		(info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL,
15243 		 tls_sec, 0, NULL, FALSE,
15244 		 bed->collect, &bh)))
15245 	    return FALSE;
15246 
15247 	  tlsbase->type = STT_TLS;
15248 	  tlsbase = (struct elf_link_hash_entry *)bh;
15249 	  tlsbase->def_regular = 1;
15250 	  tlsbase->other = STV_HIDDEN;
15251 	  (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE);
15252 	}
15253     }
15254   return TRUE;
15255 }
15256 
15257 /* Finish up dynamic symbol handling.  We set the contents of various
15258    dynamic sections here.  */
15259 
15260 static bfd_boolean
15261 elf32_arm_finish_dynamic_symbol (bfd * output_bfd,
15262 				 struct bfd_link_info * info,
15263 				 struct elf_link_hash_entry * h,
15264 				 Elf_Internal_Sym * sym)
15265 {
15266   struct elf32_arm_link_hash_table *htab;
15267   struct elf32_arm_link_hash_entry *eh;
15268 
15269   htab = elf32_arm_hash_table (info);
15270   if (htab == NULL)
15271     return FALSE;
15272 
15273   eh = (struct elf32_arm_link_hash_entry *) h;
15274 
15275   if (h->plt.offset != (bfd_vma) -1)
15276     {
15277       if (!eh->is_iplt)
15278 	{
15279 	  BFD_ASSERT (h->dynindx != -1);
15280 	  if (! elf32_arm_populate_plt_entry (output_bfd, info, &h->plt, &eh->plt,
15281 					      h->dynindx, 0))
15282 	    return FALSE;
15283 	}
15284 
15285       if (!h->def_regular)
15286 	{
15287 	  /* Mark the symbol as undefined, rather than as defined in
15288 	     the .plt section.  */
15289 	  sym->st_shndx = SHN_UNDEF;
15290 	  /* If the symbol is weak we need to clear the value.
15291 	     Otherwise, the PLT entry would provide a definition for
15292 	     the symbol even if the symbol wasn't defined anywhere,
15293 	     and so the symbol would never be NULL.  Leave the value if
15294 	     there were any relocations where pointer equality matters
15295 	     (this is a clue for the dynamic linker, to make function
15296 	     pointer comparisons work between an application and shared
15297 	     library).  */
15298 	  if (!h->ref_regular_nonweak || !h->pointer_equality_needed)
15299 	    sym->st_value = 0;
15300 	}
15301       else if (eh->is_iplt && eh->plt.noncall_refcount != 0)
15302 	{
15303 	  /* At least one non-call relocation references this .iplt entry,
15304 	     so the .iplt entry is the function's canonical address.  */
15305 	  sym->st_info = ELF_ST_INFO (ELF_ST_BIND (sym->st_info), STT_FUNC);
15306 	  ARM_SET_SYM_BRANCH_TYPE (sym->st_target_internal, ST_BRANCH_TO_ARM);
15307 	  sym->st_shndx = (_bfd_elf_section_from_bfd_section
15308 			   (output_bfd, htab->root.iplt->output_section));
15309 	  sym->st_value = (h->plt.offset
15310 			   + htab->root.iplt->output_section->vma
15311 			   + htab->root.iplt->output_offset);
15312 	}
15313     }
15314 
15315   if (h->needs_copy)
15316     {
15317       asection * s;
15318       Elf_Internal_Rela rel;
15319 
15320       /* This symbol needs a copy reloc.  Set it up.  */
15321       BFD_ASSERT (h->dynindx != -1
15322 		  && (h->root.type == bfd_link_hash_defined
15323 		      || h->root.type == bfd_link_hash_defweak));
15324 
15325       s = htab->srelbss;
15326       BFD_ASSERT (s != NULL);
15327 
15328       rel.r_addend = 0;
15329       rel.r_offset = (h->root.u.def.value
15330 		      + h->root.u.def.section->output_section->vma
15331 		      + h->root.u.def.section->output_offset);
15332       rel.r_info = ELF32_R_INFO (h->dynindx, R_ARM_COPY);
15333       elf32_arm_add_dynreloc (output_bfd, info, s, &rel);
15334     }
15335 
15336   /* Mark _DYNAMIC and _GLOBAL_OFFSET_TABLE_ as absolute.  On VxWorks,
15337      the _GLOBAL_OFFSET_TABLE_ symbol is not absolute: it is relative
15338      to the ".got" section.  */
15339   if (h == htab->root.hdynamic
15340       || (!htab->vxworks_p && h == htab->root.hgot))
15341     sym->st_shndx = SHN_ABS;
15342 
15343   return TRUE;
15344 }
15345 
15346 static void
15347 arm_put_trampoline (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15348 		    void *contents,
15349 		    const unsigned long *template, unsigned count)
15350 {
15351   unsigned ix;
15352 
15353   for (ix = 0; ix != count; ix++)
15354     {
15355       unsigned long insn = template[ix];
15356 
15357       /* Emit mov pc,rx if bx is not permitted.  */
15358       if (htab->fix_v4bx == 1 && (insn & 0x0ffffff0) == 0x012fff10)
15359 	insn = (insn & 0xf000000f) | 0x01a0f000;
15360       put_arm_insn (htab, output_bfd, insn, (char *)contents + ix*4);
15361     }
15362 }
15363 
15364 /* Install the special first PLT entry for elf32-arm-nacl.  Unlike
15365    other variants, NaCl needs this entry in a static executable's
15366    .iplt too.  When we're handling that case, GOT_DISPLACEMENT is
15367    zero.  For .iplt really only the last bundle is useful, and .iplt
15368    could have a shorter first entry, with each individual PLT entry's
15369    relative branch calculated differently so it targets the last
15370    bundle instead of the instruction before it (labelled .Lplt_tail
15371    above).  But it's simpler to keep the size and layout of PLT0
15372    consistent with the dynamic case, at the cost of some dead code at
15373    the start of .iplt and the one dead store to the stack at the start
15374    of .Lplt_tail.  */
15375 static void
15376 arm_nacl_put_plt0 (struct elf32_arm_link_hash_table *htab, bfd *output_bfd,
15377 		   asection *plt, bfd_vma got_displacement)
15378 {
15379   unsigned int i;
15380 
15381   put_arm_insn (htab, output_bfd,
15382 		elf32_arm_nacl_plt0_entry[0]
15383 		| arm_movw_immediate (got_displacement),
15384 		plt->contents + 0);
15385   put_arm_insn (htab, output_bfd,
15386 		elf32_arm_nacl_plt0_entry[1]
15387 		| arm_movt_immediate (got_displacement),
15388 		plt->contents + 4);
15389 
15390   for (i = 2; i < ARRAY_SIZE (elf32_arm_nacl_plt0_entry); ++i)
15391     put_arm_insn (htab, output_bfd,
15392 		  elf32_arm_nacl_plt0_entry[i],
15393 		  plt->contents + (i * 4));
15394 }
15395 
15396 /* Finish up the dynamic sections.  */
15397 
15398 static bfd_boolean
15399 elf32_arm_finish_dynamic_sections (bfd * output_bfd, struct bfd_link_info * info)
15400 {
15401   bfd * dynobj;
15402   asection * sgot;
15403   asection * sdyn;
15404   struct elf32_arm_link_hash_table *htab;
15405 
15406   htab = elf32_arm_hash_table (info);
15407   if (htab == NULL)
15408     return FALSE;
15409 
15410   dynobj = elf_hash_table (info)->dynobj;
15411 
15412   sgot = htab->root.sgotplt;
15413   /* A broken linker script might have discarded the dynamic sections.
15414      Catch this here so that we do not seg-fault later on.  */
15415   if (sgot != NULL && bfd_is_abs_section (sgot->output_section))
15416     return FALSE;
15417   sdyn = bfd_get_linker_section (dynobj, ".dynamic");
15418 
15419   if (elf_hash_table (info)->dynamic_sections_created)
15420     {
15421       asection *splt;
15422       Elf32_External_Dyn *dyncon, *dynconend;
15423 
15424       splt = htab->root.splt;
15425       BFD_ASSERT (splt != NULL && sdyn != NULL);
15426       BFD_ASSERT (htab->symbian_p || sgot != NULL);
15427 
15428       dyncon = (Elf32_External_Dyn *) sdyn->contents;
15429       dynconend = (Elf32_External_Dyn *) (sdyn->contents + sdyn->size);
15430 
15431       for (; dyncon < dynconend; dyncon++)
15432 	{
15433 	  Elf_Internal_Dyn dyn;
15434 	  const char * name;
15435 	  asection * s;
15436 
15437 	  bfd_elf32_swap_dyn_in (dynobj, dyncon, &dyn);
15438 
15439 	  switch (dyn.d_tag)
15440 	    {
15441 	      unsigned int type;
15442 
15443 	    default:
15444 	      if (htab->vxworks_p
15445 		  && elf_vxworks_finish_dynamic_entry (output_bfd, &dyn))
15446 		bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15447 	      break;
15448 
15449 	    case DT_HASH:
15450 	      name = ".hash";
15451 	      goto get_vma_if_bpabi;
15452 	    case DT_STRTAB:
15453 	      name = ".dynstr";
15454 	      goto get_vma_if_bpabi;
15455 	    case DT_SYMTAB:
15456 	      name = ".dynsym";
15457 	      goto get_vma_if_bpabi;
15458 	    case DT_VERSYM:
15459 	      name = ".gnu.version";
15460 	      goto get_vma_if_bpabi;
15461 	    case DT_VERDEF:
15462 	      name = ".gnu.version_d";
15463 	      goto get_vma_if_bpabi;
15464 	    case DT_VERNEED:
15465 	      name = ".gnu.version_r";
15466 	      goto get_vma_if_bpabi;
15467 
15468 	    case DT_PLTGOT:
15469 	      name = htab->symbian_p ? ".got" : ".got.plt";
15470 	      goto get_vma;
15471 	    case DT_JMPREL:
15472 	      name = RELOC_SECTION (htab, ".plt");
15473 	    get_vma:
15474 	      s = bfd_get_linker_section (dynobj, name);
15475 	      if (s == NULL)
15476 		{
15477 		  (*_bfd_error_handler)
15478 		    (_("could not find section %s"), name);
15479 		  bfd_set_error (bfd_error_invalid_operation);
15480 		  return FALSE;
15481 		}
15482 	      if (!htab->symbian_p)
15483 		dyn.d_un.d_ptr = s->output_section->vma + s->output_offset;
15484 	      else
15485 		/* In the BPABI, tags in the PT_DYNAMIC section point
15486 		   at the file offset, not the memory address, for the
15487 		   convenience of the post linker.  */
15488 		dyn.d_un.d_ptr = s->output_section->filepos + s->output_offset;
15489 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15490 	      break;
15491 
15492 	    get_vma_if_bpabi:
15493 	      if (htab->symbian_p)
15494 		goto get_vma;
15495 	      break;
15496 
15497 	    case DT_PLTRELSZ:
15498 	      s = htab->root.srelplt;
15499 	      BFD_ASSERT (s != NULL);
15500 	      dyn.d_un.d_val = s->size;
15501 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15502 	      break;
15503 
15504 	    case DT_RELSZ:
15505 	    case DT_RELASZ:
15506 	      if (!htab->symbian_p)
15507 		{
15508 		  /* My reading of the SVR4 ABI indicates that the
15509 		     procedure linkage table relocs (DT_JMPREL) should be
15510 		     included in the overall relocs (DT_REL).  This is
15511 		     what Solaris does.  However, UnixWare can not handle
15512 		     that case.  Therefore, we override the DT_RELSZ entry
15513 		     here to make it not include the JMPREL relocs.  Since
15514 		     the linker script arranges for .rel(a).plt to follow all
15515 		     other relocation sections, we don't have to worry
15516 		     about changing the DT_REL entry.  */
15517 		  s = htab->root.srelplt;
15518 		  if (s != NULL)
15519 		    dyn.d_un.d_val -= s->size;
15520 		  bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15521 		  break;
15522 		}
15523 	      /* Fall through.  */
15524 
15525 	    case DT_REL:
15526 	    case DT_RELA:
15527 	      /* In the BPABI, the DT_REL tag must point at the file
15528 		 offset, not the VMA, of the first relocation
15529 		 section.  So, we use code similar to that in
15530 		 elflink.c, but do not check for SHF_ALLOC on the
15531 		 relcoation section, since relocations sections are
15532 		 never allocated under the BPABI.  The comments above
15533 		 about Unixware notwithstanding, we include all of the
15534 		 relocations here.  */
15535 	      if (htab->symbian_p)
15536 		{
15537 		  unsigned int i;
15538 		  type = ((dyn.d_tag == DT_REL || dyn.d_tag == DT_RELSZ)
15539 			  ? SHT_REL : SHT_RELA);
15540 		  dyn.d_un.d_val = 0;
15541 		  for (i = 1; i < elf_numsections (output_bfd); i++)
15542 		    {
15543 		      Elf_Internal_Shdr *hdr
15544 			= elf_elfsections (output_bfd)[i];
15545 		      if (hdr->sh_type == type)
15546 			{
15547 			  if (dyn.d_tag == DT_RELSZ
15548 			      || dyn.d_tag == DT_RELASZ)
15549 			    dyn.d_un.d_val += hdr->sh_size;
15550 			  else if ((ufile_ptr) hdr->sh_offset
15551 				   <= dyn.d_un.d_val - 1)
15552 			    dyn.d_un.d_val = hdr->sh_offset;
15553 			}
15554 		    }
15555 		  bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15556 		}
15557 	      break;
15558 
15559 	    case DT_TLSDESC_PLT:
15560 	      s = htab->root.splt;
15561 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15562 				+ htab->dt_tlsdesc_plt);
15563 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15564 	      break;
15565 
15566 	    case DT_TLSDESC_GOT:
15567 	      s = htab->root.sgot;
15568 	      dyn.d_un.d_ptr = (s->output_section->vma + s->output_offset
15569 				+ htab->dt_tlsdesc_got);
15570 	      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15571 	      break;
15572 
15573 	      /* Set the bottom bit of DT_INIT/FINI if the
15574 		 corresponding function is Thumb.  */
15575 	    case DT_INIT:
15576 	      name = info->init_function;
15577 	      goto get_sym;
15578 	    case DT_FINI:
15579 	      name = info->fini_function;
15580 	    get_sym:
15581 	      /* If it wasn't set by elf_bfd_final_link
15582 		 then there is nothing to adjust.  */
15583 	      if (dyn.d_un.d_val != 0)
15584 		{
15585 		  struct elf_link_hash_entry * eh;
15586 
15587 		  eh = elf_link_hash_lookup (elf_hash_table (info), name,
15588 					     FALSE, FALSE, TRUE);
15589 		  if (eh != NULL
15590 		      && ARM_GET_SYM_BRANCH_TYPE (eh->target_internal)
15591 			 == ST_BRANCH_TO_THUMB)
15592 		    {
15593 		      dyn.d_un.d_val |= 1;
15594 		      bfd_elf32_swap_dyn_out (output_bfd, &dyn, dyncon);
15595 		    }
15596 		}
15597 	      break;
15598 	    }
15599 	}
15600 
15601       /* Fill in the first entry in the procedure linkage table.  */
15602       if (splt->size > 0 && htab->plt_header_size)
15603 	{
15604 	  const bfd_vma *plt0_entry;
15605 	  bfd_vma got_address, plt_address, got_displacement;
15606 
15607 	  /* Calculate the addresses of the GOT and PLT.  */
15608 	  got_address = sgot->output_section->vma + sgot->output_offset;
15609 	  plt_address = splt->output_section->vma + splt->output_offset;
15610 
15611 	  if (htab->vxworks_p)
15612 	    {
15613 	      /* The VxWorks GOT is relocated by the dynamic linker.
15614 		 Therefore, we must emit relocations rather than simply
15615 		 computing the values now.  */
15616 	      Elf_Internal_Rela rel;
15617 
15618 	      plt0_entry = elf32_arm_vxworks_exec_plt0_entry;
15619 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
15620 			    splt->contents + 0);
15621 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
15622 			    splt->contents + 4);
15623 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
15624 			    splt->contents + 8);
15625 	      bfd_put_32 (output_bfd, got_address, splt->contents + 12);
15626 
15627 	      /* Generate a relocation for _GLOBAL_OFFSET_TABLE_.  */
15628 	      rel.r_offset = plt_address + 12;
15629 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15630 	      rel.r_addend = 0;
15631 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel,
15632 				     htab->srelplt2->contents);
15633 	    }
15634 	  else if (htab->nacl_p)
15635 	    arm_nacl_put_plt0 (htab, output_bfd, splt,
15636 			       got_address + 8 - (plt_address + 16));
15637 	  else if (using_thumb_only (htab))
15638 	    {
15639 	      got_displacement = got_address - (plt_address + 12);
15640 
15641 	      plt0_entry = elf32_thumb2_plt0_entry;
15642 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
15643 			    splt->contents + 0);
15644 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
15645 			    splt->contents + 4);
15646 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
15647 			    splt->contents + 8);
15648 
15649 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 12);
15650 	    }
15651 	  else
15652 	    {
15653 	      got_displacement = got_address - (plt_address + 16);
15654 
15655 	      plt0_entry = elf32_arm_plt0_entry;
15656 	      put_arm_insn (htab, output_bfd, plt0_entry[0],
15657 			    splt->contents + 0);
15658 	      put_arm_insn (htab, output_bfd, plt0_entry[1],
15659 			    splt->contents + 4);
15660 	      put_arm_insn (htab, output_bfd, plt0_entry[2],
15661 			    splt->contents + 8);
15662 	      put_arm_insn (htab, output_bfd, plt0_entry[3],
15663 			    splt->contents + 12);
15664 
15665 #ifdef FOUR_WORD_PLT
15666 	      /* The displacement value goes in the otherwise-unused
15667 		 last word of the second entry.  */
15668 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 28);
15669 #else
15670 	      bfd_put_32 (output_bfd, got_displacement, splt->contents + 16);
15671 #endif
15672 	    }
15673 	}
15674 
15675       /* UnixWare sets the entsize of .plt to 4, although that doesn't
15676 	 really seem like the right value.  */
15677       if (splt->output_section->owner == output_bfd)
15678 	elf_section_data (splt->output_section)->this_hdr.sh_entsize = 4;
15679 
15680       if (htab->dt_tlsdesc_plt)
15681 	{
15682 	  bfd_vma got_address
15683 	    = sgot->output_section->vma + sgot->output_offset;
15684 	  bfd_vma gotplt_address = (htab->root.sgot->output_section->vma
15685 				    + htab->root.sgot->output_offset);
15686 	  bfd_vma plt_address
15687 	    = splt->output_section->vma + splt->output_offset;
15688 
15689 	  arm_put_trampoline (htab, output_bfd,
15690 			      splt->contents + htab->dt_tlsdesc_plt,
15691 			      dl_tlsdesc_lazy_trampoline, 6);
15692 
15693 	  bfd_put_32 (output_bfd,
15694 		      gotplt_address + htab->dt_tlsdesc_got
15695 		      - (plt_address + htab->dt_tlsdesc_plt)
15696 		      - dl_tlsdesc_lazy_trampoline[6],
15697 		      splt->contents + htab->dt_tlsdesc_plt + 24);
15698 	  bfd_put_32 (output_bfd,
15699 		      got_address - (plt_address + htab->dt_tlsdesc_plt)
15700 		      - dl_tlsdesc_lazy_trampoline[7],
15701 		      splt->contents + htab->dt_tlsdesc_plt + 24 + 4);
15702 	}
15703 
15704       if (htab->tls_trampoline)
15705 	{
15706 	  arm_put_trampoline (htab, output_bfd,
15707 			      splt->contents + htab->tls_trampoline,
15708 			      tls_trampoline, 3);
15709 #ifdef FOUR_WORD_PLT
15710 	  bfd_put_32 (output_bfd, 0x00000000,
15711 		      splt->contents + htab->tls_trampoline + 12);
15712 #endif
15713 	}
15714 
15715       if (htab->vxworks_p
15716 	  && !bfd_link_pic (info)
15717 	  && htab->root.splt->size > 0)
15718 	{
15719 	  /* Correct the .rel(a).plt.unloaded relocations.  They will have
15720 	     incorrect symbol indexes.  */
15721 	  int num_plts;
15722 	  unsigned char *p;
15723 
15724 	  num_plts = ((htab->root.splt->size - htab->plt_header_size)
15725 		      / htab->plt_entry_size);
15726 	  p = htab->srelplt2->contents + RELOC_SIZE (htab);
15727 
15728 	  for (; num_plts; num_plts--)
15729 	    {
15730 	      Elf_Internal_Rela rel;
15731 
15732 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15733 	      rel.r_info = ELF32_R_INFO (htab->root.hgot->indx, R_ARM_ABS32);
15734 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15735 	      p += RELOC_SIZE (htab);
15736 
15737 	      SWAP_RELOC_IN (htab) (output_bfd, p, &rel);
15738 	      rel.r_info = ELF32_R_INFO (htab->root.hplt->indx, R_ARM_ABS32);
15739 	      SWAP_RELOC_OUT (htab) (output_bfd, &rel, p);
15740 	      p += RELOC_SIZE (htab);
15741 	    }
15742 	}
15743     }
15744 
15745   if (htab->nacl_p && htab->root.iplt != NULL && htab->root.iplt->size > 0)
15746     /* NaCl uses a special first entry in .iplt too.  */
15747     arm_nacl_put_plt0 (htab, output_bfd, htab->root.iplt, 0);
15748 
15749   /* Fill in the first three entries in the global offset table.  */
15750   if (sgot)
15751     {
15752       if (sgot->size > 0)
15753 	{
15754 	  if (sdyn == NULL)
15755 	    bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents);
15756 	  else
15757 	    bfd_put_32 (output_bfd,
15758 			sdyn->output_section->vma + sdyn->output_offset,
15759 			sgot->contents);
15760 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 4);
15761 	  bfd_put_32 (output_bfd, (bfd_vma) 0, sgot->contents + 8);
15762 	}
15763 
15764       elf_section_data (sgot->output_section)->this_hdr.sh_entsize = 4;
15765     }
15766 
15767   return TRUE;
15768 }
15769 
15770 static void
15771 elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATTRIBUTE_UNUSED)
15772 {
15773   Elf_Internal_Ehdr * i_ehdrp;	/* ELF file header, internal form.  */
15774   struct elf32_arm_link_hash_table *globals;
15775   struct elf_segment_map *m;
15776 
15777   i_ehdrp = elf_elfheader (abfd);
15778 
15779   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN)
15780     i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_ARM;
15781   else
15782     _bfd_elf_post_process_headers (abfd, link_info);
15783   i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION;
15784 
15785   if (link_info)
15786     {
15787       globals = elf32_arm_hash_table (link_info);
15788       if (globals != NULL && globals->byteswap_code)
15789 	i_ehdrp->e_flags |= EF_ARM_BE8;
15790     }
15791 
15792   if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_VER5
15793       && ((i_ehdrp->e_type == ET_DYN) || (i_ehdrp->e_type == ET_EXEC)))
15794     {
15795       int abi = bfd_elf_get_obj_attr_int (abfd, OBJ_ATTR_PROC, Tag_ABI_VFP_args);
15796       if (abi == AEABI_VFP_args_vfp)
15797 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_HARD;
15798       else
15799 	i_ehdrp->e_flags |= EF_ARM_ABI_FLOAT_SOFT;
15800     }
15801 
15802   /* Scan segment to set p_flags attribute if it contains only sections with
15803      SHF_ARM_NOREAD flag.  */
15804   for (m = elf_seg_map (abfd); m != NULL; m = m->next)
15805     {
15806       unsigned int j;
15807 
15808       if (m->count == 0)
15809 	continue;
15810       for (j = 0; j < m->count; j++)
15811 	{
15812 	  if (!(elf_section_flags (m->sections[j]) & SHF_ARM_NOREAD))
15813 	    break;
15814 	}
15815       if (j == m->count)
15816 	{
15817 	  m->p_flags = PF_X;
15818 	  m->p_flags_valid = 1;
15819 	}
15820     }
15821 }
15822 
15823 static enum elf_reloc_type_class
15824 elf32_arm_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED,
15825 			    const asection *rel_sec ATTRIBUTE_UNUSED,
15826 			    const Elf_Internal_Rela *rela)
15827 {
15828   switch ((int) ELF32_R_TYPE (rela->r_info))
15829     {
15830     case R_ARM_RELATIVE:
15831       return reloc_class_relative;
15832     case R_ARM_JUMP_SLOT:
15833       return reloc_class_plt;
15834     case R_ARM_COPY:
15835       return reloc_class_copy;
15836     case R_ARM_IRELATIVE:
15837       return reloc_class_ifunc;
15838     default:
15839       return reloc_class_normal;
15840     }
15841 }
15842 
15843 static void
15844 elf32_arm_final_write_processing (bfd *abfd, bfd_boolean linker ATTRIBUTE_UNUSED)
15845 {
15846   bfd_arm_update_notes (abfd, ARM_NOTE_SECTION);
15847 }
15848 
15849 /* Return TRUE if this is an unwinding table entry.  */
15850 
15851 static bfd_boolean
15852 is_arm_elf_unwind_section_name (bfd * abfd ATTRIBUTE_UNUSED, const char * name)
15853 {
15854   return (CONST_STRNEQ (name, ELF_STRING_ARM_unwind)
15855 	  || CONST_STRNEQ (name, ELF_STRING_ARM_unwind_once));
15856 }
15857 
15858 
15859 /* Set the type and flags for an ARM section.  We do this by
15860    the section name, which is a hack, but ought to work.  */
15861 
15862 static bfd_boolean
15863 elf32_arm_fake_sections (bfd * abfd, Elf_Internal_Shdr * hdr, asection * sec)
15864 {
15865   const char * name;
15866 
15867   name = bfd_get_section_name (abfd, sec);
15868 
15869   if (is_arm_elf_unwind_section_name (abfd, name))
15870     {
15871       hdr->sh_type = SHT_ARM_EXIDX;
15872       hdr->sh_flags |= SHF_LINK_ORDER;
15873     }
15874 
15875   if (sec->flags & SEC_ELF_NOREAD)
15876     hdr->sh_flags |= SHF_ARM_NOREAD;
15877 
15878   return TRUE;
15879 }
15880 
15881 /* Handle an ARM specific section when reading an object file.  This is
15882    called when bfd_section_from_shdr finds a section with an unknown
15883    type.  */
15884 
15885 static bfd_boolean
15886 elf32_arm_section_from_shdr (bfd *abfd,
15887 			     Elf_Internal_Shdr * hdr,
15888 			     const char *name,
15889 			     int shindex)
15890 {
15891   /* There ought to be a place to keep ELF backend specific flags, but
15892      at the moment there isn't one.  We just keep track of the
15893      sections by their name, instead.  Fortunately, the ABI gives
15894      names for all the ARM specific sections, so we will probably get
15895      away with this.  */
15896   switch (hdr->sh_type)
15897     {
15898     case SHT_ARM_EXIDX:
15899     case SHT_ARM_PREEMPTMAP:
15900     case SHT_ARM_ATTRIBUTES:
15901       break;
15902 
15903     default:
15904       return FALSE;
15905     }
15906 
15907   if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
15908     return FALSE;
15909 
15910   return TRUE;
15911 }
15912 
15913 static _arm_elf_section_data *
15914 get_arm_elf_section_data (asection * sec)
15915 {
15916   if (sec && sec->owner && is_arm_elf (sec->owner))
15917     return elf32_arm_section_data (sec);
15918   else
15919     return NULL;
15920 }
15921 
15922 typedef struct
15923 {
15924   void *flaginfo;
15925   struct bfd_link_info *info;
15926   asection *sec;
15927   int sec_shndx;
15928   int (*func) (void *, const char *, Elf_Internal_Sym *,
15929 	       asection *, struct elf_link_hash_entry *);
15930 } output_arch_syminfo;
15931 
15932 enum map_symbol_type
15933 {
15934   ARM_MAP_ARM,
15935   ARM_MAP_THUMB,
15936   ARM_MAP_DATA
15937 };
15938 
15939 
15940 /* Output a single mapping symbol.  */
15941 
15942 static bfd_boolean
15943 elf32_arm_output_map_sym (output_arch_syminfo *osi,
15944 			  enum map_symbol_type type,
15945 			  bfd_vma offset)
15946 {
15947   static const char *names[3] = {"$a", "$t", "$d"};
15948   Elf_Internal_Sym sym;
15949 
15950   sym.st_value = osi->sec->output_section->vma
15951 		 + osi->sec->output_offset
15952 		 + offset;
15953   sym.st_size = 0;
15954   sym.st_other = 0;
15955   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_NOTYPE);
15956   sym.st_shndx = osi->sec_shndx;
15957   sym.st_target_internal = 0;
15958   elf32_arm_section_map_add (osi->sec, names[type][1], offset);
15959   return osi->func (osi->flaginfo, names[type], &sym, osi->sec, NULL) == 1;
15960 }
15961 
15962 /* Output mapping symbols for the PLT entry described by ROOT_PLT and ARM_PLT.
15963    IS_IPLT_ENTRY_P says whether the PLT is in .iplt rather than .plt.  */
15964 
15965 static bfd_boolean
15966 elf32_arm_output_plt_map_1 (output_arch_syminfo *osi,
15967 			    bfd_boolean is_iplt_entry_p,
15968 			    union gotplt_union *root_plt,
15969 			    struct arm_plt_info *arm_plt)
15970 {
15971   struct elf32_arm_link_hash_table *htab;
15972   bfd_vma addr, plt_header_size;
15973 
15974   if (root_plt->offset == (bfd_vma) -1)
15975     return TRUE;
15976 
15977   htab = elf32_arm_hash_table (osi->info);
15978   if (htab == NULL)
15979     return FALSE;
15980 
15981   if (is_iplt_entry_p)
15982     {
15983       osi->sec = htab->root.iplt;
15984       plt_header_size = 0;
15985     }
15986   else
15987     {
15988       osi->sec = htab->root.splt;
15989       plt_header_size = htab->plt_header_size;
15990     }
15991   osi->sec_shndx = (_bfd_elf_section_from_bfd_section
15992 		    (osi->info->output_bfd, osi->sec->output_section));
15993 
15994   addr = root_plt->offset & -2;
15995   if (htab->symbian_p)
15996     {
15997       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
15998 	return FALSE;
15999       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 4))
16000 	return FALSE;
16001     }
16002   else if (htab->vxworks_p)
16003     {
16004       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16005 	return FALSE;
16006       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 8))
16007 	return FALSE;
16008       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr + 12))
16009 	return FALSE;
16010       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 20))
16011 	return FALSE;
16012     }
16013   else if (htab->nacl_p)
16014     {
16015       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16016 	return FALSE;
16017     }
16018   else if (using_thumb_only (htab))
16019     {
16020       if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr))
16021 	return FALSE;
16022     }
16023   else
16024     {
16025       bfd_boolean thumb_stub_p;
16026 
16027       thumb_stub_p = elf32_arm_plt_needs_thumb_stub_p (osi->info, arm_plt);
16028       if (thumb_stub_p)
16029 	{
16030 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_THUMB, addr - 4))
16031 	    return FALSE;
16032 	}
16033 #ifdef FOUR_WORD_PLT
16034       if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16035 	return FALSE;
16036       if (!elf32_arm_output_map_sym (osi, ARM_MAP_DATA, addr + 12))
16037 	return FALSE;
16038 #else
16039       /* A three-word PLT with no Thumb thunk contains only Arm code,
16040 	 so only need to output a mapping symbol for the first PLT entry and
16041 	 entries with thumb thunks.  */
16042       if (thumb_stub_p || addr == plt_header_size)
16043 	{
16044 	  if (!elf32_arm_output_map_sym (osi, ARM_MAP_ARM, addr))
16045 	    return FALSE;
16046 	}
16047 #endif
16048     }
16049 
16050   return TRUE;
16051 }
16052 
16053 /* Output mapping symbols for PLT entries associated with H.  */
16054 
16055 static bfd_boolean
16056 elf32_arm_output_plt_map (struct elf_link_hash_entry *h, void *inf)
16057 {
16058   output_arch_syminfo *osi = (output_arch_syminfo *) inf;
16059   struct elf32_arm_link_hash_entry *eh;
16060 
16061   if (h->root.type == bfd_link_hash_indirect)
16062     return TRUE;
16063 
16064   if (h->root.type == bfd_link_hash_warning)
16065     /* When warning symbols are created, they **replace** the "real"
16066        entry in the hash table, thus we never get to see the real
16067        symbol in a hash traversal.  So look at it now.  */
16068     h = (struct elf_link_hash_entry *) h->root.u.i.link;
16069 
16070   eh = (struct elf32_arm_link_hash_entry *) h;
16071   return elf32_arm_output_plt_map_1 (osi, SYMBOL_CALLS_LOCAL (osi->info, h),
16072 				     &h->plt, &eh->plt);
16073 }
16074 
16075 /* Bind a veneered symbol to its veneer identified by its hash entry
16076    STUB_ENTRY.  The veneered location thus loose its symbol.  */
16077 
16078 static void
16079 arm_stub_claim_sym (struct elf32_arm_stub_hash_entry *stub_entry)
16080 {
16081   struct elf32_arm_link_hash_entry *hash = stub_entry->h;
16082 
16083   BFD_ASSERT (hash);
16084   hash->root.root.u.def.section = stub_entry->stub_sec;
16085   hash->root.root.u.def.value = stub_entry->stub_offset;
16086   hash->root.size = stub_entry->stub_size;
16087 }
16088 
16089 /* Output a single local symbol for a generated stub.  */
16090 
16091 static bfd_boolean
16092 elf32_arm_output_stub_sym (output_arch_syminfo *osi, const char *name,
16093 			   bfd_vma offset, bfd_vma size)
16094 {
16095   Elf_Internal_Sym sym;
16096 
16097   sym.st_value = osi->sec->output_section->vma
16098 		 + osi->sec->output_offset
16099 		 + offset;
16100   sym.st_size = size;
16101   sym.st_other = 0;
16102   sym.st_info = ELF_ST_INFO (STB_LOCAL, STT_FUNC);
16103   sym.st_shndx = osi->sec_shndx;
16104   sym.st_target_internal = 0;
16105   return osi->func (osi->flaginfo, name, &sym, osi->sec, NULL) == 1;
16106 }
16107 
16108 static bfd_boolean
16109 arm_map_one_stub (struct bfd_hash_entry * gen_entry,
16110 		  void * in_arg)
16111 {
16112   struct elf32_arm_stub_hash_entry *stub_entry;
16113   asection *stub_sec;
16114   bfd_vma addr;
16115   char *stub_name;
16116   output_arch_syminfo *osi;
16117   const insn_sequence *template_sequence;
16118   enum stub_insn_type prev_type;
16119   int size;
16120   int i;
16121   enum map_symbol_type sym_type;
16122 
16123   /* Massage our args to the form they really have.  */
16124   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16125   osi = (output_arch_syminfo *) in_arg;
16126 
16127   stub_sec = stub_entry->stub_sec;
16128 
16129   /* Ensure this stub is attached to the current section being
16130      processed.  */
16131   if (stub_sec != osi->sec)
16132     return TRUE;
16133 
16134   addr = (bfd_vma) stub_entry->stub_offset;
16135   template_sequence = stub_entry->stub_template;
16136 
16137   if (arm_stub_sym_claimed (stub_entry->stub_type))
16138     arm_stub_claim_sym (stub_entry);
16139   else
16140     {
16141       stub_name = stub_entry->output_name;
16142       switch (template_sequence[0].type)
16143 	{
16144 	case ARM_TYPE:
16145 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr,
16146 					  stub_entry->stub_size))
16147 	    return FALSE;
16148 	  break;
16149 	case THUMB16_TYPE:
16150 	case THUMB32_TYPE:
16151 	  if (!elf32_arm_output_stub_sym (osi, stub_name, addr | 1,
16152 					  stub_entry->stub_size))
16153 	    return FALSE;
16154 	  break;
16155 	default:
16156 	  BFD_FAIL ();
16157 	  return 0;
16158 	}
16159     }
16160 
16161   prev_type = DATA_TYPE;
16162   size = 0;
16163   for (i = 0; i < stub_entry->stub_template_size; i++)
16164     {
16165       switch (template_sequence[i].type)
16166 	{
16167 	case ARM_TYPE:
16168 	  sym_type = ARM_MAP_ARM;
16169 	  break;
16170 
16171 	case THUMB16_TYPE:
16172 	case THUMB32_TYPE:
16173 	  sym_type = ARM_MAP_THUMB;
16174 	  break;
16175 
16176 	case DATA_TYPE:
16177 	  sym_type = ARM_MAP_DATA;
16178 	  break;
16179 
16180 	default:
16181 	  BFD_FAIL ();
16182 	  return FALSE;
16183 	}
16184 
16185       if (template_sequence[i].type != prev_type)
16186 	{
16187 	  prev_type = template_sequence[i].type;
16188 	  if (!elf32_arm_output_map_sym (osi, sym_type, addr + size))
16189 	    return FALSE;
16190 	}
16191 
16192       switch (template_sequence[i].type)
16193 	{
16194 	case ARM_TYPE:
16195 	case THUMB32_TYPE:
16196 	  size += 4;
16197 	  break;
16198 
16199 	case THUMB16_TYPE:
16200 	  size += 2;
16201 	  break;
16202 
16203 	case DATA_TYPE:
16204 	  size += 4;
16205 	  break;
16206 
16207 	default:
16208 	  BFD_FAIL ();
16209 	  return FALSE;
16210 	}
16211     }
16212 
16213   return TRUE;
16214 }
16215 
16216 /* Output mapping symbols for linker generated sections,
16217    and for those data-only sections that do not have a
16218    $d.  */
16219 
16220 static bfd_boolean
16221 elf32_arm_output_arch_local_syms (bfd *output_bfd,
16222 				  struct bfd_link_info *info,
16223 				  void *flaginfo,
16224 				  int (*func) (void *, const char *,
16225 					       Elf_Internal_Sym *,
16226 					       asection *,
16227 					       struct elf_link_hash_entry *))
16228 {
16229   output_arch_syminfo osi;
16230   struct elf32_arm_link_hash_table *htab;
16231   bfd_vma offset;
16232   bfd_size_type size;
16233   bfd *input_bfd;
16234 
16235   htab = elf32_arm_hash_table (info);
16236   if (htab == NULL)
16237     return FALSE;
16238 
16239   check_use_blx (htab);
16240 
16241   osi.flaginfo = flaginfo;
16242   osi.info = info;
16243   osi.func = func;
16244 
16245   /* Add a $d mapping symbol to data-only sections that
16246      don't have any mapping symbol.  This may result in (harmless) redundant
16247      mapping symbols.  */
16248   for (input_bfd = info->input_bfds;
16249        input_bfd != NULL;
16250        input_bfd = input_bfd->link.next)
16251     {
16252       if ((input_bfd->flags & (BFD_LINKER_CREATED | HAS_SYMS)) == HAS_SYMS)
16253 	for (osi.sec = input_bfd->sections;
16254 	     osi.sec != NULL;
16255 	     osi.sec = osi.sec->next)
16256 	  {
16257 	    if (osi.sec->output_section != NULL
16258 		&& ((osi.sec->output_section->flags & (SEC_ALLOC | SEC_CODE))
16259 		    != 0)
16260 		&& (osi.sec->flags & (SEC_HAS_CONTENTS | SEC_LINKER_CREATED))
16261 		   == SEC_HAS_CONTENTS
16262 		&& get_arm_elf_section_data (osi.sec) != NULL
16263 		&& get_arm_elf_section_data (osi.sec)->mapcount == 0
16264 		&& osi.sec->size > 0
16265 		&& (osi.sec->flags & SEC_EXCLUDE) == 0)
16266 	      {
16267 		osi.sec_shndx = _bfd_elf_section_from_bfd_section
16268 		  (output_bfd, osi.sec->output_section);
16269 		if (osi.sec_shndx != (int)SHN_BAD)
16270 		  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 0);
16271 	      }
16272 	  }
16273     }
16274 
16275   /* ARM->Thumb glue.  */
16276   if (htab->arm_glue_size > 0)
16277     {
16278       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16279 					ARM2THUMB_GLUE_SECTION_NAME);
16280 
16281       osi.sec_shndx = _bfd_elf_section_from_bfd_section
16282 	  (output_bfd, osi.sec->output_section);
16283       if (bfd_link_pic (info) || htab->root.is_relocatable_executable
16284 	  || htab->pic_veneer)
16285 	size = ARM2THUMB_PIC_GLUE_SIZE;
16286       else if (htab->use_blx)
16287 	size = ARM2THUMB_V5_STATIC_GLUE_SIZE;
16288       else
16289 	size = ARM2THUMB_STATIC_GLUE_SIZE;
16290 
16291       for (offset = 0; offset < htab->arm_glue_size; offset += size)
16292 	{
16293 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset);
16294 	  elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, offset + size - 4);
16295 	}
16296     }
16297 
16298   /* Thumb->ARM glue.  */
16299   if (htab->thumb_glue_size > 0)
16300     {
16301       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16302 					THUMB2ARM_GLUE_SECTION_NAME);
16303 
16304       osi.sec_shndx = _bfd_elf_section_from_bfd_section
16305 	  (output_bfd, osi.sec->output_section);
16306       size = THUMB2ARM_GLUE_SIZE;
16307 
16308       for (offset = 0; offset < htab->thumb_glue_size; offset += size)
16309 	{
16310 	  elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, offset);
16311 	  elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, offset + 4);
16312 	}
16313     }
16314 
16315   /* ARMv4 BX veneers.  */
16316   if (htab->bx_glue_size > 0)
16317     {
16318       osi.sec = bfd_get_linker_section (htab->bfd_of_glue_owner,
16319 					ARM_BX_GLUE_SECTION_NAME);
16320 
16321       osi.sec_shndx = _bfd_elf_section_from_bfd_section
16322 	  (output_bfd, osi.sec->output_section);
16323 
16324       elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0);
16325     }
16326 
16327   /* Long calls stubs.  */
16328   if (htab->stub_bfd && htab->stub_bfd->sections)
16329     {
16330       asection* stub_sec;
16331 
16332       for (stub_sec = htab->stub_bfd->sections;
16333 	   stub_sec != NULL;
16334 	   stub_sec = stub_sec->next)
16335 	{
16336 	  /* Ignore non-stub sections.  */
16337 	  if (!strstr (stub_sec->name, STUB_SUFFIX))
16338 	    continue;
16339 
16340 	  osi.sec = stub_sec;
16341 
16342 	  osi.sec_shndx = _bfd_elf_section_from_bfd_section
16343 	    (output_bfd, osi.sec->output_section);
16344 
16345 	  bfd_hash_traverse (&htab->stub_hash_table, arm_map_one_stub, &osi);
16346 	}
16347     }
16348 
16349   /* Finally, output mapping symbols for the PLT.  */
16350   if (htab->root.splt && htab->root.splt->size > 0)
16351     {
16352       osi.sec = htab->root.splt;
16353       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16354 		       (output_bfd, osi.sec->output_section));
16355 
16356       /* Output mapping symbols for the plt header.  SymbianOS does not have a
16357 	 plt header.  */
16358       if (htab->vxworks_p)
16359 	{
16360 	  /* VxWorks shared libraries have no PLT header.  */
16361 	  if (!bfd_link_pic (info))
16362 	    {
16363 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16364 		return FALSE;
16365 	      if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16366 		return FALSE;
16367 	    }
16368 	}
16369       else if (htab->nacl_p)
16370 	{
16371 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16372 	    return FALSE;
16373 	}
16374       else if (using_thumb_only (htab))
16375 	{
16376 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 0))
16377 	    return FALSE;
16378 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 12))
16379 	    return FALSE;
16380 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_THUMB, 16))
16381 	    return FALSE;
16382 	}
16383       else if (!htab->symbian_p)
16384 	{
16385 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16386 	    return FALSE;
16387 #ifndef FOUR_WORD_PLT
16388 	  if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA, 16))
16389 	    return FALSE;
16390 #endif
16391 	}
16392     }
16393   if (htab->nacl_p && htab->root.iplt && htab->root.iplt->size > 0)
16394     {
16395       /* NaCl uses a special first entry in .iplt too.  */
16396       osi.sec = htab->root.iplt;
16397       osi.sec_shndx = (_bfd_elf_section_from_bfd_section
16398 		       (output_bfd, osi.sec->output_section));
16399       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, 0))
16400 	return FALSE;
16401     }
16402   if ((htab->root.splt && htab->root.splt->size > 0)
16403       || (htab->root.iplt && htab->root.iplt->size > 0))
16404     {
16405       elf_link_hash_traverse (&htab->root, elf32_arm_output_plt_map, &osi);
16406       for (input_bfd = info->input_bfds;
16407 	   input_bfd != NULL;
16408 	   input_bfd = input_bfd->link.next)
16409 	{
16410 	  struct arm_local_iplt_info **local_iplt;
16411 	  unsigned int i, num_syms;
16412 
16413 	  local_iplt = elf32_arm_local_iplt (input_bfd);
16414 	  if (local_iplt != NULL)
16415 	    {
16416 	      num_syms = elf_symtab_hdr (input_bfd).sh_info;
16417 	      for (i = 0; i < num_syms; i++)
16418 		if (local_iplt[i] != NULL
16419 		    && !elf32_arm_output_plt_map_1 (&osi, TRUE,
16420 						    &local_iplt[i]->root,
16421 						    &local_iplt[i]->arm))
16422 		  return FALSE;
16423 	    }
16424 	}
16425     }
16426   if (htab->dt_tlsdesc_plt != 0)
16427     {
16428       /* Mapping symbols for the lazy tls trampoline.  */
16429       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->dt_tlsdesc_plt))
16430 	return FALSE;
16431 
16432       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16433 				     htab->dt_tlsdesc_plt + 24))
16434 	return FALSE;
16435     }
16436   if (htab->tls_trampoline != 0)
16437     {
16438       /* Mapping symbols for the tls trampoline.  */
16439       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_ARM, htab->tls_trampoline))
16440 	return FALSE;
16441 #ifdef FOUR_WORD_PLT
16442       if (!elf32_arm_output_map_sym (&osi, ARM_MAP_DATA,
16443 				     htab->tls_trampoline + 12))
16444 	return FALSE;
16445 #endif
16446     }
16447 
16448   return TRUE;
16449 }
16450 
16451 /* Allocate target specific section data.  */
16452 
16453 static bfd_boolean
16454 elf32_arm_new_section_hook (bfd *abfd, asection *sec)
16455 {
16456   if (!sec->used_by_bfd)
16457     {
16458       _arm_elf_section_data *sdata;
16459       bfd_size_type amt = sizeof (*sdata);
16460 
16461       sdata = (_arm_elf_section_data *) bfd_zalloc (abfd, amt);
16462       if (sdata == NULL)
16463 	return FALSE;
16464       sec->used_by_bfd = sdata;
16465     }
16466 
16467   return _bfd_elf_new_section_hook (abfd, sec);
16468 }
16469 
16470 
16471 /* Used to order a list of mapping symbols by address.  */
16472 
16473 static int
16474 elf32_arm_compare_mapping (const void * a, const void * b)
16475 {
16476   const elf32_arm_section_map *amap = (const elf32_arm_section_map *) a;
16477   const elf32_arm_section_map *bmap = (const elf32_arm_section_map *) b;
16478 
16479   if (amap->vma > bmap->vma)
16480     return 1;
16481   else if (amap->vma < bmap->vma)
16482     return -1;
16483   else if (amap->type > bmap->type)
16484     /* Ensure results do not depend on the host qsort for objects with
16485        multiple mapping symbols at the same address by sorting on type
16486        after vma.  */
16487     return 1;
16488   else if (amap->type < bmap->type)
16489     return -1;
16490   else
16491     return 0;
16492 }
16493 
16494 /* Add OFFSET to lower 31 bits of ADDR, leaving other bits unmodified.  */
16495 
16496 static unsigned long
16497 offset_prel31 (unsigned long addr, bfd_vma offset)
16498 {
16499   return (addr & ~0x7ffffffful) | ((addr + offset) & 0x7ffffffful);
16500 }
16501 
16502 /* Copy an .ARM.exidx table entry, adding OFFSET to (applied) PREL31
16503    relocations.  */
16504 
16505 static void
16506 copy_exidx_entry (bfd *output_bfd, bfd_byte *to, bfd_byte *from, bfd_vma offset)
16507 {
16508   unsigned long first_word = bfd_get_32 (output_bfd, from);
16509   unsigned long second_word = bfd_get_32 (output_bfd, from + 4);
16510 
16511   /* High bit of first word is supposed to be zero.  */
16512   if ((first_word & 0x80000000ul) == 0)
16513     first_word = offset_prel31 (first_word, offset);
16514 
16515   /* If the high bit of the first word is clear, and the bit pattern is not 0x1
16516      (EXIDX_CANTUNWIND), this is an offset to an .ARM.extab entry.  */
16517   if ((second_word != 0x1) && ((second_word & 0x80000000ul) == 0))
16518     second_word = offset_prel31 (second_word, offset);
16519 
16520   bfd_put_32 (output_bfd, first_word, to);
16521   bfd_put_32 (output_bfd, second_word, to + 4);
16522 }
16523 
16524 /* Data for make_branch_to_a8_stub().  */
16525 
16526 struct a8_branch_to_stub_data
16527 {
16528   asection *writing_section;
16529   bfd_byte *contents;
16530 };
16531 
16532 
16533 /* Helper to insert branches to Cortex-A8 erratum stubs in the right
16534    places for a particular section.  */
16535 
16536 static bfd_boolean
16537 make_branch_to_a8_stub (struct bfd_hash_entry *gen_entry,
16538 		       void *in_arg)
16539 {
16540   struct elf32_arm_stub_hash_entry *stub_entry;
16541   struct a8_branch_to_stub_data *data;
16542   bfd_byte *contents;
16543   unsigned long branch_insn;
16544   bfd_vma veneered_insn_loc, veneer_entry_loc;
16545   bfd_signed_vma branch_offset;
16546   bfd *abfd;
16547   unsigned int loc;
16548 
16549   stub_entry = (struct elf32_arm_stub_hash_entry *) gen_entry;
16550   data = (struct a8_branch_to_stub_data *) in_arg;
16551 
16552   if (stub_entry->target_section != data->writing_section
16553       || stub_entry->stub_type < arm_stub_a8_veneer_lwm)
16554     return TRUE;
16555 
16556   contents = data->contents;
16557 
16558   /* We use target_section as Cortex-A8 erratum workaround stubs are only
16559      generated when both source and target are in the same section.  */
16560   veneered_insn_loc = stub_entry->target_section->output_section->vma
16561 		      + stub_entry->target_section->output_offset
16562 		      + stub_entry->source_value;
16563 
16564   veneer_entry_loc = stub_entry->stub_sec->output_section->vma
16565 		     + stub_entry->stub_sec->output_offset
16566 		     + stub_entry->stub_offset;
16567 
16568   if (stub_entry->stub_type == arm_stub_a8_veneer_blx)
16569     veneered_insn_loc &= ~3u;
16570 
16571   branch_offset = veneer_entry_loc - veneered_insn_loc - 4;
16572 
16573   abfd = stub_entry->target_section->owner;
16574   loc = stub_entry->source_value;
16575 
16576   /* We attempt to avoid this condition by setting stubs_always_after_branch
16577      in elf32_arm_size_stubs if we've enabled the Cortex-A8 erratum workaround.
16578      This check is just to be on the safe side...  */
16579   if ((veneered_insn_loc & ~0xfff) == (veneer_entry_loc & ~0xfff))
16580     {
16581       (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub is "
16582 			       "allocated in unsafe location"), abfd);
16583       return FALSE;
16584     }
16585 
16586   switch (stub_entry->stub_type)
16587     {
16588     case arm_stub_a8_veneer_b:
16589     case arm_stub_a8_veneer_b_cond:
16590       branch_insn = 0xf0009000;
16591       goto jump24;
16592 
16593     case arm_stub_a8_veneer_blx:
16594       branch_insn = 0xf000e800;
16595       goto jump24;
16596 
16597     case arm_stub_a8_veneer_bl:
16598       {
16599 	unsigned int i1, j1, i2, j2, s;
16600 
16601 	branch_insn = 0xf000d000;
16602 
16603       jump24:
16604 	if (branch_offset < -16777216 || branch_offset > 16777214)
16605 	  {
16606 	    /* There's not much we can do apart from complain if this
16607 	       happens.  */
16608 	    (*_bfd_error_handler) (_("%B: error: Cortex-A8 erratum stub out "
16609 				     "of range (input file too large)"), abfd);
16610 	    return FALSE;
16611 	  }
16612 
16613 	/* i1 = not(j1 eor s), so:
16614 	   not i1 = j1 eor s
16615 	   j1 = (not i1) eor s.  */
16616 
16617 	branch_insn |= (branch_offset >> 1) & 0x7ff;
16618 	branch_insn |= ((branch_offset >> 12) & 0x3ff) << 16;
16619 	i2 = (branch_offset >> 22) & 1;
16620 	i1 = (branch_offset >> 23) & 1;
16621 	s = (branch_offset >> 24) & 1;
16622 	j1 = (!i1) ^ s;
16623 	j2 = (!i2) ^ s;
16624 	branch_insn |= j2 << 11;
16625 	branch_insn |= j1 << 13;
16626 	branch_insn |= s << 26;
16627       }
16628       break;
16629 
16630     default:
16631       BFD_FAIL ();
16632       return FALSE;
16633     }
16634 
16635   bfd_put_16 (abfd, (branch_insn >> 16) & 0xffff, &contents[loc]);
16636   bfd_put_16 (abfd, branch_insn & 0xffff, &contents[loc + 2]);
16637 
16638   return TRUE;
16639 }
16640 
16641 /* Beginning of stm32l4xx work-around.  */
16642 
16643 /* Functions encoding instructions necessary for the emission of the
16644    fix-stm32l4xx-629360.
16645    Encoding is extracted from the
16646    ARM (C) Architecture Reference Manual
16647    ARMv7-A and ARMv7-R edition
16648    ARM DDI 0406C.b (ID072512).  */
16649 
16650 static inline bfd_vma
16651 create_instruction_branch_absolute (int branch_offset)
16652 {
16653   /* A8.8.18 B (A8-334)
16654      B target_address (Encoding T4).  */
16655   /* 1111 - 0Sii - iiii - iiii - 10J1 - Jiii - iiii - iiii.  */
16656   /* jump offset is:  S:I1:I2:imm10:imm11:0.  */
16657   /* with : I1 = NOT (J1 EOR S) I2 = NOT (J2 EOR S).  */
16658 
16659   int s = ((branch_offset & 0x1000000) >> 24);
16660   int j1 = s ^ !((branch_offset & 0x800000) >> 23);
16661   int j2 = s ^ !((branch_offset & 0x400000) >> 22);
16662 
16663   if (branch_offset < -(1 << 24) || branch_offset >= (1 << 24))
16664     BFD_ASSERT (0 && "Error: branch out of range.  Cannot create branch.");
16665 
16666   bfd_vma patched_inst = 0xf0009000
16667     | s << 26 /* S.  */
16668     | (((unsigned long) (branch_offset) >> 12) & 0x3ff) << 16 /* imm10.  */
16669     | j1 << 13 /* J1.  */
16670     | j2 << 11 /* J2.  */
16671     | (((unsigned long) (branch_offset) >> 1) & 0x7ff); /* imm11.  */
16672 
16673   return patched_inst;
16674 }
16675 
16676 static inline bfd_vma
16677 create_instruction_ldmia (int base_reg, int wback, int reg_mask)
16678 {
16679   /* A8.8.57 LDM/LDMIA/LDMFD (A8-396)
16680      LDMIA Rn!, {Ra, Rb, Rc, ...} (Encoding T2).  */
16681   bfd_vma patched_inst = 0xe8900000
16682     | (/*W=*/wback << 21)
16683     | (base_reg << 16)
16684     | (reg_mask & 0x0000ffff);
16685 
16686   return patched_inst;
16687 }
16688 
16689 static inline bfd_vma
16690 create_instruction_ldmdb (int base_reg, int wback, int reg_mask)
16691 {
16692   /* A8.8.60 LDMDB/LDMEA (A8-402)
16693      LDMDB Rn!, {Ra, Rb, Rc, ...} (Encoding T1).  */
16694   bfd_vma patched_inst = 0xe9100000
16695     | (/*W=*/wback << 21)
16696     | (base_reg << 16)
16697     | (reg_mask & 0x0000ffff);
16698 
16699   return patched_inst;
16700 }
16701 
16702 static inline bfd_vma
16703 create_instruction_mov (int target_reg, int source_reg)
16704 {
16705   /* A8.8.103 MOV (register) (A8-486)
16706      MOV Rd, Rm (Encoding T1).  */
16707   bfd_vma patched_inst = 0x4600
16708     | (target_reg & 0x7)
16709     | ((target_reg & 0x8) >> 3) << 7
16710     | (source_reg << 3);
16711 
16712   return patched_inst;
16713 }
16714 
16715 static inline bfd_vma
16716 create_instruction_sub (int target_reg, int source_reg, int value)
16717 {
16718   /* A8.8.221 SUB (immediate) (A8-708)
16719      SUB Rd, Rn, #value (Encoding T3).  */
16720   bfd_vma patched_inst = 0xf1a00000
16721     | (target_reg << 8)
16722     | (source_reg << 16)
16723     | (/*S=*/0 << 20)
16724     | ((value & 0x800) >> 11) << 26
16725     | ((value & 0x700) >>  8) << 12
16726     | (value & 0x0ff);
16727 
16728   return patched_inst;
16729 }
16730 
16731 static inline bfd_vma
16732 create_instruction_vldmia (int base_reg, int is_dp, int wback, int num_words,
16733 			   int first_reg)
16734 {
16735   /* A8.8.332 VLDM (A8-922)
16736      VLMD{MODE} Rn{!}, {list} (Encoding T1 or T2).  */
16737   bfd_vma patched_inst = (is_dp ? 0xec900b00 : 0xec900a00)
16738     | (/*W=*/wback << 21)
16739     | (base_reg << 16)
16740     | (num_words & 0x000000ff)
16741     | (((unsigned)first_reg >> 1) & 0x0000000f) << 12
16742     | (first_reg & 0x00000001) << 22;
16743 
16744   return patched_inst;
16745 }
16746 
16747 static inline bfd_vma
16748 create_instruction_vldmdb (int base_reg, int is_dp, int num_words,
16749 			   int first_reg)
16750 {
16751   /* A8.8.332 VLDM (A8-922)
16752      VLMD{MODE} Rn!, {} (Encoding T1 or T2).  */
16753   bfd_vma patched_inst = (is_dp ? 0xed300b00 : 0xed300a00)
16754     | (base_reg << 16)
16755     | (num_words & 0x000000ff)
16756     | (((unsigned)first_reg >>1 ) & 0x0000000f) << 12
16757     | (first_reg & 0x00000001) << 22;
16758 
16759   return patched_inst;
16760 }
16761 
16762 static inline bfd_vma
16763 create_instruction_udf_w (int value)
16764 {
16765   /* A8.8.247 UDF (A8-758)
16766      Undefined (Encoding T2).  */
16767   bfd_vma patched_inst = 0xf7f0a000
16768     | (value & 0x00000fff)
16769     | (value & 0x000f0000) << 16;
16770 
16771   return patched_inst;
16772 }
16773 
16774 static inline bfd_vma
16775 create_instruction_udf (int value)
16776 {
16777   /* A8.8.247 UDF (A8-758)
16778      Undefined (Encoding T1).  */
16779   bfd_vma patched_inst = 0xde00
16780     | (value & 0xff);
16781 
16782   return patched_inst;
16783 }
16784 
16785 /* Functions writing an instruction in memory, returning the next
16786    memory position to write to.  */
16787 
16788 static inline bfd_byte *
16789 push_thumb2_insn32 (struct elf32_arm_link_hash_table * htab,
16790 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
16791 {
16792   put_thumb2_insn (htab, output_bfd, insn, pt);
16793   return pt + 4;
16794 }
16795 
16796 static inline bfd_byte *
16797 push_thumb2_insn16 (struct elf32_arm_link_hash_table * htab,
16798 		    bfd * output_bfd, bfd_byte *pt, insn32 insn)
16799 {
16800   put_thumb_insn (htab, output_bfd, insn, pt);
16801   return pt + 2;
16802 }
16803 
16804 /* Function filling up a region in memory with T1 and T2 UDFs taking
16805    care of alignment.  */
16806 
16807 static bfd_byte *
16808 stm32l4xx_fill_stub_udf (struct elf32_arm_link_hash_table * htab,
16809 			 bfd *                   output_bfd,
16810 			 const bfd_byte * const  base_stub_contents,
16811 			 bfd_byte * const        from_stub_contents,
16812 			 const bfd_byte * const  end_stub_contents)
16813 {
16814   bfd_byte *current_stub_contents = from_stub_contents;
16815 
16816   /* Fill the remaining of the stub with deterministic contents : UDF
16817      instructions.
16818      Check if realignment is needed on modulo 4 frontier using T1, to
16819      further use T2.  */
16820   if ((current_stub_contents < end_stub_contents)
16821       && !((current_stub_contents - base_stub_contents) % 2)
16822       && ((current_stub_contents - base_stub_contents) % 4))
16823     current_stub_contents =
16824       push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16825 			  create_instruction_udf (0));
16826 
16827   for (; current_stub_contents < end_stub_contents;)
16828     current_stub_contents =
16829       push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16830 			  create_instruction_udf_w (0));
16831 
16832   return current_stub_contents;
16833 }
16834 
16835 /* Functions writing the stream of instructions equivalent to the
16836    derived sequence for ldmia, ldmdb, vldm respectively.  */
16837 
16838 static void
16839 stm32l4xx_create_replacing_stub_ldmia (struct elf32_arm_link_hash_table * htab,
16840 				       bfd * output_bfd,
16841 				       const insn32 initial_insn,
16842 				       const bfd_byte *const initial_insn_addr,
16843 				       bfd_byte *const base_stub_contents)
16844 {
16845   int wback = (initial_insn & 0x00200000) >> 21;
16846   int ri, rn = (initial_insn & 0x000F0000) >> 16;
16847   int insn_all_registers = initial_insn & 0x0000ffff;
16848   int insn_low_registers, insn_high_registers;
16849   int usable_register_mask;
16850   int nb_registers = popcount (insn_all_registers);
16851   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16852   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16853   bfd_byte *current_stub_contents = base_stub_contents;
16854 
16855   BFD_ASSERT (is_thumb2_ldmia (initial_insn));
16856 
16857   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
16858      smaller than 8 registers load sequences that do not cause the
16859      hardware issue.  */
16860   if (nb_registers <= 8)
16861     {
16862       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
16863       current_stub_contents =
16864 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16865 			    initial_insn);
16866 
16867       /* B initial_insn_addr+4.  */
16868       if (!restore_pc)
16869 	current_stub_contents =
16870 	  push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16871 			      create_instruction_branch_absolute
16872 			      (initial_insn_addr - current_stub_contents));
16873 
16874 
16875       /* Fill the remaining of the stub with deterministic contents.  */
16876       current_stub_contents =
16877 	stm32l4xx_fill_stub_udf (htab, output_bfd,
16878 				 base_stub_contents, current_stub_contents,
16879 				 base_stub_contents +
16880 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16881 
16882       return;
16883     }
16884 
16885   /* - reg_list[13] == 0.  */
16886   BFD_ASSERT ((insn_all_registers & (1 << 13))==0);
16887 
16888   /* - reg_list[14] & reg_list[15] != 1.  */
16889   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
16890 
16891   /* - if (wback==1) reg_list[rn] == 0.  */
16892   BFD_ASSERT (!wback || !restore_rn);
16893 
16894   /* - nb_registers > 8.  */
16895   BFD_ASSERT (popcount (insn_all_registers) > 8);
16896 
16897   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
16898 
16899   /* In the following algorithm, we split this wide LDM using 2 LDM insns:
16900     - One with the 7 lowest registers (register mask 0x007F)
16901       This LDM will finally contain between 2 and 7 registers
16902     - One with the 7 highest registers (register mask 0xDF80)
16903       This ldm will finally contain between 2 and 7 registers.  */
16904   insn_low_registers = insn_all_registers & 0x007F;
16905   insn_high_registers = insn_all_registers & 0xDF80;
16906 
16907   /* A spare register may be needed during this veneer to temporarily
16908      handle the base register.  This register will be restored with the
16909      last LDM operation.
16910      The usable register may be any general purpose register (that
16911      excludes PC, SP, LR : register mask is 0x1FFF).  */
16912   usable_register_mask = 0x1FFF;
16913 
16914   /* Generate the stub function.  */
16915   if (wback)
16916     {
16917       /* LDMIA Rn!, {R-low-register-list} : (Encoding T2).  */
16918       current_stub_contents =
16919 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16920 			    create_instruction_ldmia
16921 			    (rn, /*wback=*/1, insn_low_registers));
16922 
16923       /* LDMIA Rn!, {R-high-register-list} : (Encoding T2).  */
16924       current_stub_contents =
16925 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16926 			    create_instruction_ldmia
16927 			    (rn, /*wback=*/1, insn_high_registers));
16928       if (!restore_pc)
16929 	{
16930 	  /* B initial_insn_addr+4.  */
16931 	  current_stub_contents =
16932 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16933 				create_instruction_branch_absolute
16934 				(initial_insn_addr - current_stub_contents));
16935        }
16936     }
16937   else /* if (!wback).  */
16938     {
16939       ri = rn;
16940 
16941       /* If Rn is not part of the high-register-list, move it there.  */
16942       if (!(insn_high_registers & (1 << rn)))
16943 	{
16944 	  /* Choose a Ri in the high-register-list that will be restored.  */
16945 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
16946 
16947 	  /* MOV Ri, Rn.  */
16948 	  current_stub_contents =
16949 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
16950 				create_instruction_mov (ri, rn));
16951 	}
16952 
16953       /* LDMIA Ri!, {R-low-register-list} : (Encoding T2).  */
16954       current_stub_contents =
16955 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16956 			    create_instruction_ldmia
16957 			    (ri, /*wback=*/1, insn_low_registers));
16958 
16959       /* LDMIA Ri, {R-high-register-list} : (Encoding T2).  */
16960       current_stub_contents =
16961 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16962 			    create_instruction_ldmia
16963 			    (ri, /*wback=*/0, insn_high_registers));
16964 
16965       if (!restore_pc)
16966 	{
16967 	  /* B initial_insn_addr+4.  */
16968 	  current_stub_contents =
16969 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
16970 				create_instruction_branch_absolute
16971 				(initial_insn_addr - current_stub_contents));
16972 	}
16973     }
16974 
16975   /* Fill the remaining of the stub with deterministic contents.  */
16976   current_stub_contents =
16977     stm32l4xx_fill_stub_udf (htab, output_bfd,
16978 			     base_stub_contents, current_stub_contents,
16979 			     base_stub_contents +
16980 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
16981 }
16982 
16983 static void
16984 stm32l4xx_create_replacing_stub_ldmdb (struct elf32_arm_link_hash_table * htab,
16985 				       bfd * output_bfd,
16986 				       const insn32 initial_insn,
16987 				       const bfd_byte *const initial_insn_addr,
16988 				       bfd_byte *const base_stub_contents)
16989 {
16990   int wback = (initial_insn & 0x00200000) >> 21;
16991   int ri, rn = (initial_insn & 0x000f0000) >> 16;
16992   int insn_all_registers = initial_insn & 0x0000ffff;
16993   int insn_low_registers, insn_high_registers;
16994   int usable_register_mask;
16995   int restore_pc = (insn_all_registers & (1 << 15)) ? 1 : 0;
16996   int restore_rn = (insn_all_registers & (1 << rn)) ? 1 : 0;
16997   int nb_registers = popcount (insn_all_registers);
16998   bfd_byte *current_stub_contents = base_stub_contents;
16999 
17000   BFD_ASSERT (is_thumb2_ldmdb (initial_insn));
17001 
17002   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17003      smaller than 8 registers load sequences that do not cause the
17004      hardware issue.  */
17005   if (nb_registers <= 8)
17006     {
17007       /* UNTOUCHED : LDMIA Rn{!}, {R-all-register-list}.  */
17008       current_stub_contents =
17009 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17010 			    initial_insn);
17011 
17012       /* B initial_insn_addr+4.  */
17013       current_stub_contents =
17014 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17015 			    create_instruction_branch_absolute
17016 			    (initial_insn_addr - current_stub_contents));
17017 
17018       /* Fill the remaining of the stub with deterministic contents.  */
17019       current_stub_contents =
17020 	stm32l4xx_fill_stub_udf (htab, output_bfd,
17021 				 base_stub_contents, current_stub_contents,
17022 				 base_stub_contents +
17023 				 STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17024 
17025       return;
17026     }
17027 
17028   /* - reg_list[13] == 0.  */
17029   BFD_ASSERT ((insn_all_registers & (1 << 13)) == 0);
17030 
17031   /* - reg_list[14] & reg_list[15] != 1.  */
17032   BFD_ASSERT ((insn_all_registers & 0xC000) != 0xC000);
17033 
17034   /* - if (wback==1) reg_list[rn] == 0.  */
17035   BFD_ASSERT (!wback || !restore_rn);
17036 
17037   /* - nb_registers > 8.  */
17038   BFD_ASSERT (popcount (insn_all_registers) > 8);
17039 
17040   /* At this point, LDMxx initial insn loads between 9 and 14 registers.  */
17041 
17042   /* In the following algorithm, we split this wide LDM using 2 LDM insn:
17043     - One with the 7 lowest registers (register mask 0x007F)
17044       This LDM will finally contain between 2 and 7 registers
17045     - One with the 7 highest registers (register mask 0xDF80)
17046       This ldm will finally contain between 2 and 7 registers.  */
17047   insn_low_registers = insn_all_registers & 0x007F;
17048   insn_high_registers = insn_all_registers & 0xDF80;
17049 
17050   /* A spare register may be needed during this veneer to temporarily
17051      handle the base register.  This register will be restored with
17052      the last LDM operation.
17053      The usable register may be any general purpose register (that excludes
17054      PC, SP, LR : register mask is 0x1FFF).  */
17055   usable_register_mask = 0x1FFF;
17056 
17057   /* Generate the stub function.  */
17058   if (!wback && !restore_pc && !restore_rn)
17059     {
17060       /* Choose a Ri in the low-register-list that will be restored.  */
17061       ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17062 
17063       /* MOV Ri, Rn.  */
17064       current_stub_contents =
17065 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17066 			    create_instruction_mov (ri, rn));
17067 
17068       /* LDMDB Ri!, {R-high-register-list}.  */
17069       current_stub_contents =
17070 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17071 			    create_instruction_ldmdb
17072 			    (ri, /*wback=*/1, insn_high_registers));
17073 
17074       /* LDMDB Ri, {R-low-register-list}.  */
17075       current_stub_contents =
17076 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17077 			    create_instruction_ldmdb
17078 			    (ri, /*wback=*/0, insn_low_registers));
17079 
17080       /* B initial_insn_addr+4.  */
17081       current_stub_contents =
17082 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17083 			    create_instruction_branch_absolute
17084 			    (initial_insn_addr - current_stub_contents));
17085     }
17086   else if (wback && !restore_pc && !restore_rn)
17087     {
17088       /* LDMDB Rn!, {R-high-register-list}.  */
17089       current_stub_contents =
17090 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17091 			    create_instruction_ldmdb
17092 			    (rn, /*wback=*/1, insn_high_registers));
17093 
17094       /* LDMDB Rn!, {R-low-register-list}.  */
17095       current_stub_contents =
17096 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17097 			    create_instruction_ldmdb
17098 			    (rn, /*wback=*/1, insn_low_registers));
17099 
17100       /* B initial_insn_addr+4.  */
17101       current_stub_contents =
17102 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17103 			    create_instruction_branch_absolute
17104 			    (initial_insn_addr - current_stub_contents));
17105     }
17106   else if (!wback && restore_pc && !restore_rn)
17107     {
17108       /* Choose a Ri in the high-register-list that will be restored.  */
17109       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17110 
17111       /* SUB Ri, Rn, #(4*nb_registers).  */
17112       current_stub_contents =
17113 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17114 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
17115 
17116       /* LDMIA Ri!, {R-low-register-list}.  */
17117       current_stub_contents =
17118 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17119 			    create_instruction_ldmia
17120 			    (ri, /*wback=*/1, insn_low_registers));
17121 
17122       /* LDMIA Ri, {R-high-register-list}.  */
17123       current_stub_contents =
17124 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17125 			    create_instruction_ldmia
17126 			    (ri, /*wback=*/0, insn_high_registers));
17127     }
17128   else if (wback && restore_pc && !restore_rn)
17129     {
17130       /* Choose a Ri in the high-register-list that will be restored.  */
17131       ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17132 
17133       /* SUB Rn, Rn, #(4*nb_registers)  */
17134       current_stub_contents =
17135 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17136 			    create_instruction_sub (rn, rn, (4 * nb_registers)));
17137 
17138       /* MOV Ri, Rn.  */
17139       current_stub_contents =
17140 	push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17141 			    create_instruction_mov (ri, rn));
17142 
17143       /* LDMIA Ri!, {R-low-register-list}.  */
17144       current_stub_contents =
17145 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17146 			    create_instruction_ldmia
17147 			    (ri, /*wback=*/1, insn_low_registers));
17148 
17149       /* LDMIA Ri, {R-high-register-list}.  */
17150       current_stub_contents =
17151 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17152 			    create_instruction_ldmia
17153 			    (ri, /*wback=*/0, insn_high_registers));
17154     }
17155   else if (!wback && !restore_pc && restore_rn)
17156     {
17157       ri = rn;
17158       if (!(insn_low_registers & (1 << rn)))
17159 	{
17160 	  /* Choose a Ri in the low-register-list that will be restored.  */
17161 	  ri = ctz (insn_low_registers & usable_register_mask & ~(1 << rn));
17162 
17163 	  /* MOV Ri, Rn.  */
17164 	  current_stub_contents =
17165 	    push_thumb2_insn16 (htab, output_bfd, current_stub_contents,
17166 				create_instruction_mov (ri, rn));
17167 	}
17168 
17169       /* LDMDB Ri!, {R-high-register-list}.  */
17170       current_stub_contents =
17171 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17172 			    create_instruction_ldmdb
17173 			    (ri, /*wback=*/1, insn_high_registers));
17174 
17175       /* LDMDB Ri, {R-low-register-list}.  */
17176       current_stub_contents =
17177 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17178 			    create_instruction_ldmdb
17179 			    (ri, /*wback=*/0, insn_low_registers));
17180 
17181       /* B initial_insn_addr+4.  */
17182       current_stub_contents =
17183 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17184 			    create_instruction_branch_absolute
17185 			    (initial_insn_addr - current_stub_contents));
17186     }
17187   else if (!wback && restore_pc && restore_rn)
17188     {
17189       ri = rn;
17190       if (!(insn_high_registers & (1 << rn)))
17191 	{
17192 	  /* Choose a Ri in the high-register-list that will be restored.  */
17193 	  ri = ctz (insn_high_registers & usable_register_mask & ~(1 << rn));
17194 	}
17195 
17196       /* SUB Ri, Rn, #(4*nb_registers).  */
17197       current_stub_contents =
17198 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17199 			    create_instruction_sub (ri, rn, (4 * nb_registers)));
17200 
17201       /* LDMIA Ri!, {R-low-register-list}.  */
17202       current_stub_contents =
17203 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17204 			    create_instruction_ldmia
17205 			    (ri, /*wback=*/1, insn_low_registers));
17206 
17207       /* LDMIA Ri, {R-high-register-list}.  */
17208       current_stub_contents =
17209 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17210 			    create_instruction_ldmia
17211 			    (ri, /*wback=*/0, insn_high_registers));
17212     }
17213   else if (wback && restore_rn)
17214     {
17215       /* The assembler should not have accepted to encode this.  */
17216       BFD_ASSERT (0 && "Cannot patch an instruction that has an "
17217 	"undefined behavior.\n");
17218     }
17219 
17220   /* Fill the remaining of the stub with deterministic contents.  */
17221   current_stub_contents =
17222     stm32l4xx_fill_stub_udf (htab, output_bfd,
17223 			     base_stub_contents, current_stub_contents,
17224 			     base_stub_contents +
17225 			     STM32L4XX_ERRATUM_LDM_VENEER_SIZE);
17226 
17227 }
17228 
17229 static void
17230 stm32l4xx_create_replacing_stub_vldm (struct elf32_arm_link_hash_table * htab,
17231 				      bfd * output_bfd,
17232 				      const insn32 initial_insn,
17233 				      const bfd_byte *const initial_insn_addr,
17234 				      bfd_byte *const base_stub_contents)
17235 {
17236   int num_words = ((unsigned int) initial_insn << 24) >> 24;
17237   bfd_byte *current_stub_contents = base_stub_contents;
17238 
17239   BFD_ASSERT (is_thumb2_vldm (initial_insn));
17240 
17241   /* In BFD_ARM_STM32L4XX_FIX_ALL mode we may have to deal with
17242      smaller than 8 words load sequences that do not cause the
17243      hardware issue.  */
17244   if (num_words <= 8)
17245     {
17246       /* Untouched instruction.  */
17247       current_stub_contents =
17248 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17249 			    initial_insn);
17250 
17251       /* B initial_insn_addr+4.  */
17252       current_stub_contents =
17253 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17254 			    create_instruction_branch_absolute
17255 			    (initial_insn_addr - current_stub_contents));
17256     }
17257   else
17258     {
17259       bfd_boolean is_dp = /* DP encoding. */
17260 	(initial_insn & 0xfe100f00) == 0xec100b00;
17261       bfd_boolean is_ia_nobang = /* (IA without !).  */
17262 	(((initial_insn << 7) >> 28) & 0xd) == 0x4;
17263       bfd_boolean is_ia_bang = /* (IA with !) - includes VPOP.  */
17264 	(((initial_insn << 7) >> 28) & 0xd) == 0x5;
17265       bfd_boolean is_db_bang = /* (DB with !).  */
17266 	(((initial_insn << 7) >> 28) & 0xd) == 0x9;
17267       int base_reg = ((unsigned int) initial_insn << 12) >> 28;
17268       /* d = UInt (Vd:D);.  */
17269       int first_reg = ((((unsigned int) initial_insn << 16) >> 28) << 1)
17270 	| (((unsigned int)initial_insn << 9) >> 31);
17271 
17272       /* Compute the number of 8-words chunks needed to split.  */
17273       int chunks = (num_words % 8) ? (num_words / 8 + 1) : (num_words / 8);
17274       int chunk;
17275 
17276       /* The test coverage has been done assuming the following
17277 	 hypothesis that exactly one of the previous is_ predicates is
17278 	 true.  */
17279       BFD_ASSERT (    (is_ia_nobang ^ is_ia_bang ^ is_db_bang)
17280 		  && !(is_ia_nobang & is_ia_bang & is_db_bang));
17281 
17282       /* We treat the cutting of the words in one pass for all
17283 	 cases, then we emit the adjustments:
17284 
17285 	 vldm rx, {...}
17286 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
17287 	 -> sub rx, rx, #size (list)
17288 
17289 	 vldm rx!, {...}
17290 	 -> vldm rx!, {8_words_or_less} for each needed 8_word
17291 	 This also handles vpop instruction (when rx is sp)
17292 
17293 	 vldmd rx!, {...}
17294 	 -> vldmb rx!, {8_words_or_less} for each needed 8_word.  */
17295       for (chunk = 0; chunk < chunks; ++chunk)
17296 	{
17297 	  bfd_vma new_insn = 0;
17298 
17299 	  if (is_ia_nobang || is_ia_bang)
17300 	    {
17301 	      new_insn = create_instruction_vldmia
17302 		(base_reg,
17303 		 is_dp,
17304 		 /*wback= .  */1,
17305 		 chunks - (chunk + 1) ?
17306 		 8 : num_words - chunk * 8,
17307 		 first_reg + chunk * 8);
17308 	    }
17309 	  else if (is_db_bang)
17310 	    {
17311 	      new_insn = create_instruction_vldmdb
17312 		(base_reg,
17313 		 is_dp,
17314 		 chunks - (chunk + 1) ?
17315 		 8 : num_words - chunk * 8,
17316 		 first_reg + chunk * 8);
17317 	    }
17318 
17319 	  if (new_insn)
17320 	    current_stub_contents =
17321 	      push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17322 				  new_insn);
17323 	}
17324 
17325       /* Only this case requires the base register compensation
17326 	 subtract.  */
17327       if (is_ia_nobang)
17328 	{
17329 	  current_stub_contents =
17330 	    push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17331 				create_instruction_sub
17332 				(base_reg, base_reg, 4*num_words));
17333 	}
17334 
17335       /* B initial_insn_addr+4.  */
17336       current_stub_contents =
17337 	push_thumb2_insn32 (htab, output_bfd, current_stub_contents,
17338 			    create_instruction_branch_absolute
17339 			    (initial_insn_addr - current_stub_contents));
17340     }
17341 
17342   /* Fill the remaining of the stub with deterministic contents.  */
17343   current_stub_contents =
17344     stm32l4xx_fill_stub_udf (htab, output_bfd,
17345 			     base_stub_contents, current_stub_contents,
17346 			     base_stub_contents +
17347 			     STM32L4XX_ERRATUM_VLDM_VENEER_SIZE);
17348 }
17349 
17350 static void
17351 stm32l4xx_create_replacing_stub (struct elf32_arm_link_hash_table * htab,
17352 				 bfd * output_bfd,
17353 				 const insn32 wrong_insn,
17354 				 const bfd_byte *const wrong_insn_addr,
17355 				 bfd_byte *const stub_contents)
17356 {
17357   if (is_thumb2_ldmia (wrong_insn))
17358     stm32l4xx_create_replacing_stub_ldmia (htab, output_bfd,
17359 					   wrong_insn, wrong_insn_addr,
17360 					   stub_contents);
17361   else if (is_thumb2_ldmdb (wrong_insn))
17362     stm32l4xx_create_replacing_stub_ldmdb (htab, output_bfd,
17363 					   wrong_insn, wrong_insn_addr,
17364 					   stub_contents);
17365   else if (is_thumb2_vldm (wrong_insn))
17366     stm32l4xx_create_replacing_stub_vldm (htab, output_bfd,
17367 					  wrong_insn, wrong_insn_addr,
17368 					  stub_contents);
17369 }
17370 
17371 /* End of stm32l4xx work-around.  */
17372 
17373 
17374 static void
17375 elf32_arm_add_relocation (bfd *output_bfd, struct bfd_link_info *info,
17376 			  asection *output_sec, Elf_Internal_Rela *rel)
17377 {
17378   BFD_ASSERT (output_sec && rel);
17379   struct bfd_elf_section_reloc_data *output_reldata;
17380   struct elf32_arm_link_hash_table *htab;
17381   struct bfd_elf_section_data *oesd = elf_section_data (output_sec);
17382   Elf_Internal_Shdr *rel_hdr;
17383 
17384 
17385   if (oesd->rel.hdr)
17386     {
17387       rel_hdr = oesd->rel.hdr;
17388       output_reldata = &(oesd->rel);
17389     }
17390   else if (oesd->rela.hdr)
17391     {
17392       rel_hdr = oesd->rela.hdr;
17393       output_reldata = &(oesd->rela);
17394     }
17395   else
17396     {
17397       abort ();
17398     }
17399 
17400   bfd_byte *erel = rel_hdr->contents;
17401   erel += output_reldata->count * rel_hdr->sh_entsize;
17402   htab = elf32_arm_hash_table (info);
17403   SWAP_RELOC_OUT (htab) (output_bfd, rel, erel);
17404   output_reldata->count++;
17405 }
17406 
17407 /* Do code byteswapping.  Return FALSE afterwards so that the section is
17408    written out as normal.  */
17409 
17410 static bfd_boolean
17411 elf32_arm_write_section (bfd *output_bfd,
17412 			 struct bfd_link_info *link_info,
17413 			 asection *sec,
17414 			 bfd_byte *contents)
17415 {
17416   unsigned int mapcount, errcount;
17417   _arm_elf_section_data *arm_data;
17418   struct elf32_arm_link_hash_table *globals = elf32_arm_hash_table (link_info);
17419   elf32_arm_section_map *map;
17420   elf32_vfp11_erratum_list *errnode;
17421   elf32_stm32l4xx_erratum_list *stm32l4xx_errnode;
17422   bfd_vma ptr;
17423   bfd_vma end;
17424   bfd_vma offset = sec->output_section->vma + sec->output_offset;
17425   bfd_byte tmp;
17426   unsigned int i;
17427 
17428   if (globals == NULL)
17429     return FALSE;
17430 
17431   /* If this section has not been allocated an _arm_elf_section_data
17432      structure then we cannot record anything.  */
17433   arm_data = get_arm_elf_section_data (sec);
17434   if (arm_data == NULL)
17435     return FALSE;
17436 
17437   mapcount = arm_data->mapcount;
17438   map = arm_data->map;
17439   errcount = arm_data->erratumcount;
17440 
17441   if (errcount != 0)
17442     {
17443       unsigned int endianflip = bfd_big_endian (output_bfd) ? 3 : 0;
17444 
17445       for (errnode = arm_data->erratumlist; errnode != 0;
17446 	   errnode = errnode->next)
17447 	{
17448 	  bfd_vma target = errnode->vma - offset;
17449 
17450 	  switch (errnode->type)
17451 	    {
17452 	    case VFP11_ERRATUM_BRANCH_TO_ARM_VENEER:
17453 	      {
17454 		bfd_vma branch_to_veneer;
17455 		/* Original condition code of instruction, plus bit mask for
17456 		   ARM B instruction.  */
17457 		unsigned int insn = (errnode->u.b.vfp_insn & 0xf0000000)
17458 				  | 0x0a000000;
17459 
17460 		/* The instruction is before the label.  */
17461 		target -= 4;
17462 
17463 		/* Above offset included in -4 below.  */
17464 		branch_to_veneer = errnode->u.b.veneer->vma
17465 				   - errnode->vma - 4;
17466 
17467 		if ((signed) branch_to_veneer < -(1 << 25)
17468 		    || (signed) branch_to_veneer >= (1 << 25))
17469 		  (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17470 					   "range"), output_bfd);
17471 
17472 		insn |= (branch_to_veneer >> 2) & 0xffffff;
17473 		contents[endianflip ^ target] = insn & 0xff;
17474 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17475 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17476 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17477 	      }
17478 	      break;
17479 
17480 	    case VFP11_ERRATUM_ARM_VENEER:
17481 	      {
17482 		bfd_vma branch_from_veneer;
17483 		unsigned int insn;
17484 
17485 		/* Take size of veneer into account.  */
17486 		branch_from_veneer = errnode->u.v.branch->vma
17487 				     - errnode->vma - 12;
17488 
17489 		if ((signed) branch_from_veneer < -(1 << 25)
17490 		    || (signed) branch_from_veneer >= (1 << 25))
17491 		  (*_bfd_error_handler) (_("%B: error: VFP11 veneer out of "
17492 					   "range"), output_bfd);
17493 
17494 		/* Original instruction.  */
17495 		insn = errnode->u.v.branch->u.b.vfp_insn;
17496 		contents[endianflip ^ target] = insn & 0xff;
17497 		contents[endianflip ^ (target + 1)] = (insn >> 8) & 0xff;
17498 		contents[endianflip ^ (target + 2)] = (insn >> 16) & 0xff;
17499 		contents[endianflip ^ (target + 3)] = (insn >> 24) & 0xff;
17500 
17501 		/* Branch back to insn after original insn.  */
17502 		insn = 0xea000000 | ((branch_from_veneer >> 2) & 0xffffff);
17503 		contents[endianflip ^ (target + 4)] = insn & 0xff;
17504 		contents[endianflip ^ (target + 5)] = (insn >> 8) & 0xff;
17505 		contents[endianflip ^ (target + 6)] = (insn >> 16) & 0xff;
17506 		contents[endianflip ^ (target + 7)] = (insn >> 24) & 0xff;
17507 	      }
17508 	      break;
17509 
17510 	    default:
17511 	      abort ();
17512 	    }
17513 	}
17514     }
17515 
17516   if (arm_data->stm32l4xx_erratumcount != 0)
17517     {
17518       for (stm32l4xx_errnode = arm_data->stm32l4xx_erratumlist;
17519 	   stm32l4xx_errnode != 0;
17520 	   stm32l4xx_errnode = stm32l4xx_errnode->next)
17521 	{
17522 	  bfd_vma target = stm32l4xx_errnode->vma - offset;
17523 
17524 	  switch (stm32l4xx_errnode->type)
17525 	    {
17526 	    case STM32L4XX_ERRATUM_BRANCH_TO_VENEER:
17527 	      {
17528 		unsigned int insn;
17529 		bfd_vma branch_to_veneer =
17530 		  stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma;
17531 
17532 		if ((signed) branch_to_veneer < -(1 << 24)
17533 		    || (signed) branch_to_veneer >= (1 << 24))
17534 		  {
17535 		    bfd_vma out_of_range =
17536 		      ((signed) branch_to_veneer < -(1 << 24)) ?
17537 		      - branch_to_veneer - (1 << 24) :
17538 		      ((signed) branch_to_veneer >= (1 << 24)) ?
17539 		      branch_to_veneer - (1 << 24) : 0;
17540 
17541 		    (*_bfd_error_handler)
17542 		      (_("%B(%#x): error: Cannot create STM32L4XX veneer. "
17543 			 "Jump out of range by %ld bytes. "
17544 			 "Cannot encode branch instruction. "),
17545 		       output_bfd,
17546 		       (long) (stm32l4xx_errnode->vma - 4),
17547 		       out_of_range);
17548 		    continue;
17549 		  }
17550 
17551 		insn = create_instruction_branch_absolute
17552 		  (stm32l4xx_errnode->u.b.veneer->vma - stm32l4xx_errnode->vma);
17553 
17554 		/* The instruction is before the label.  */
17555 		target -= 4;
17556 
17557 		put_thumb2_insn (globals, output_bfd,
17558 				 (bfd_vma) insn, contents + target);
17559 	      }
17560 	      break;
17561 
17562 	    case STM32L4XX_ERRATUM_VENEER:
17563 	      {
17564 		bfd_byte * veneer;
17565 		bfd_byte * veneer_r;
17566 		unsigned int insn;
17567 
17568 		veneer = contents + target;
17569 		veneer_r = veneer
17570 		  + stm32l4xx_errnode->u.b.veneer->vma
17571 		  - stm32l4xx_errnode->vma - 4;
17572 
17573 		if ((signed) (veneer_r - veneer -
17574 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE >
17575 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE ?
17576 			      STM32L4XX_ERRATUM_VLDM_VENEER_SIZE :
17577 			      STM32L4XX_ERRATUM_LDM_VENEER_SIZE) < -(1 << 24)
17578 		    || (signed) (veneer_r - veneer) >= (1 << 24))
17579 		  {
17580 		    (*_bfd_error_handler) (_("%B: error: Cannot create STM32L4XX "
17581 					     "veneer."), output_bfd);
17582 		     continue;
17583 		  }
17584 
17585 		/* Original instruction.  */
17586 		insn = stm32l4xx_errnode->u.v.branch->u.b.insn;
17587 
17588 		stm32l4xx_create_replacing_stub
17589 		  (globals, output_bfd, insn, (void*)veneer_r, (void*)veneer);
17590 	      }
17591 	      break;
17592 
17593 	    default:
17594 	      abort ();
17595 	    }
17596 	}
17597     }
17598 
17599   if (arm_data->elf.this_hdr.sh_type == SHT_ARM_EXIDX)
17600     {
17601       arm_unwind_table_edit *edit_node
17602 	= arm_data->u.exidx.unwind_edit_list;
17603       /* Now, sec->size is the size of the section we will write.  The original
17604 	 size (before we merged duplicate entries and inserted EXIDX_CANTUNWIND
17605 	 markers) was sec->rawsize.  (This isn't the case if we perform no
17606 	 edits, then rawsize will be zero and we should use size).  */
17607       bfd_byte *edited_contents = (bfd_byte *) bfd_malloc (sec->size);
17608       unsigned int input_size = sec->rawsize ? sec->rawsize : sec->size;
17609       unsigned int in_index, out_index;
17610       bfd_vma add_to_offsets = 0;
17611 
17612       for (in_index = 0, out_index = 0; in_index * 8 < input_size || edit_node;)
17613 	{
17614 	  if (edit_node)
17615 	    {
17616 	      unsigned int edit_index = edit_node->index;
17617 
17618 	      if (in_index < edit_index && in_index * 8 < input_size)
17619 		{
17620 		  copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17621 				    contents + in_index * 8, add_to_offsets);
17622 		  out_index++;
17623 		  in_index++;
17624 		}
17625 	      else if (in_index == edit_index
17626 		       || (in_index * 8 >= input_size
17627 			   && edit_index == UINT_MAX))
17628 		{
17629 		  switch (edit_node->type)
17630 		    {
17631 		    case DELETE_EXIDX_ENTRY:
17632 		      in_index++;
17633 		      add_to_offsets += 8;
17634 		      break;
17635 
17636 		    case INSERT_EXIDX_CANTUNWIND_AT_END:
17637 		      {
17638 			asection *text_sec = edit_node->linked_section;
17639 			bfd_vma text_offset = text_sec->output_section->vma
17640 					      + text_sec->output_offset
17641 					      + text_sec->size;
17642 			bfd_vma exidx_offset = offset + out_index * 8;
17643 			unsigned long prel31_offset;
17644 
17645 			/* Note: this is meant to be equivalent to an
17646 			   R_ARM_PREL31 relocation.  These synthetic
17647 			   EXIDX_CANTUNWIND markers are not relocated by the
17648 			   usual BFD method.  */
17649 			prel31_offset = (text_offset - exidx_offset)
17650 					& 0x7ffffffful;
17651 			if (bfd_link_relocatable (link_info))
17652 			  {
17653 			    /* Here relocation for new EXIDX_CANTUNWIND is
17654 			       created, so there is no need to
17655 			       adjust offset by hand.  */
17656 			    prel31_offset = text_sec->output_offset
17657 					    + text_sec->size;
17658 
17659 			    /* New relocation entity.  */
17660 			    asection *text_out = text_sec->output_section;
17661 			    Elf_Internal_Rela rel;
17662 			    rel.r_addend = 0;
17663 			    rel.r_offset = exidx_offset;
17664 			    rel.r_info = ELF32_R_INFO (text_out->target_index,
17665 						       R_ARM_PREL31);
17666 
17667 			    elf32_arm_add_relocation (output_bfd, link_info,
17668 						      sec->output_section,
17669 						      &rel);
17670 			  }
17671 
17672 			/* First address we can't unwind.  */
17673 			bfd_put_32 (output_bfd, prel31_offset,
17674 				    &edited_contents[out_index * 8]);
17675 
17676 			/* Code for EXIDX_CANTUNWIND.  */
17677 			bfd_put_32 (output_bfd, 0x1,
17678 				    &edited_contents[out_index * 8 + 4]);
17679 
17680 			out_index++;
17681 			add_to_offsets -= 8;
17682 		      }
17683 		      break;
17684 		    }
17685 
17686 		  edit_node = edit_node->next;
17687 		}
17688 	    }
17689 	  else
17690 	    {
17691 	      /* No more edits, copy remaining entries verbatim.  */
17692 	      copy_exidx_entry (output_bfd, edited_contents + out_index * 8,
17693 				contents + in_index * 8, add_to_offsets);
17694 	      out_index++;
17695 	      in_index++;
17696 	    }
17697 	}
17698 
17699       if (!(sec->flags & SEC_EXCLUDE) && !(sec->flags & SEC_NEVER_LOAD))
17700 	bfd_set_section_contents (output_bfd, sec->output_section,
17701 				  edited_contents,
17702 				  (file_ptr) sec->output_offset, sec->size);
17703 
17704       return TRUE;
17705     }
17706 
17707   /* Fix code to point to Cortex-A8 erratum stubs.  */
17708   if (globals->fix_cortex_a8)
17709     {
17710       struct a8_branch_to_stub_data data;
17711 
17712       data.writing_section = sec;
17713       data.contents = contents;
17714 
17715       bfd_hash_traverse (& globals->stub_hash_table, make_branch_to_a8_stub,
17716 			 & data);
17717     }
17718 
17719   if (mapcount == 0)
17720     return FALSE;
17721 
17722   if (globals->byteswap_code)
17723     {
17724       qsort (map, mapcount, sizeof (* map), elf32_arm_compare_mapping);
17725 
17726       ptr = map[0].vma;
17727       for (i = 0; i < mapcount; i++)
17728 	{
17729 	  if (i == mapcount - 1)
17730 	    end = sec->size;
17731 	  else
17732 	    end = map[i + 1].vma;
17733 
17734 	  switch (map[i].type)
17735 	    {
17736 	    case 'a':
17737 	      /* Byte swap code words.  */
17738 	      while (ptr + 3 < end)
17739 		{
17740 		  tmp = contents[ptr];
17741 		  contents[ptr] = contents[ptr + 3];
17742 		  contents[ptr + 3] = tmp;
17743 		  tmp = contents[ptr + 1];
17744 		  contents[ptr + 1] = contents[ptr + 2];
17745 		  contents[ptr + 2] = tmp;
17746 		  ptr += 4;
17747 		}
17748 	      break;
17749 
17750 	    case 't':
17751 	      /* Byte swap code halfwords.  */
17752 	      while (ptr + 1 < end)
17753 		{
17754 		  tmp = contents[ptr];
17755 		  contents[ptr] = contents[ptr + 1];
17756 		  contents[ptr + 1] = tmp;
17757 		  ptr += 2;
17758 		}
17759 	      break;
17760 
17761 	    case 'd':
17762 	      /* Leave data alone.  */
17763 	      break;
17764 	    }
17765 	  ptr = end;
17766 	}
17767     }
17768 
17769   free (map);
17770   arm_data->mapcount = -1;
17771   arm_data->mapsize = 0;
17772   arm_data->map = NULL;
17773 
17774   return FALSE;
17775 }
17776 
17777 /* Mangle thumb function symbols as we read them in.  */
17778 
17779 static bfd_boolean
17780 elf32_arm_swap_symbol_in (bfd * abfd,
17781 			  const void *psrc,
17782 			  const void *pshn,
17783 			  Elf_Internal_Sym *dst)
17784 {
17785   if (!bfd_elf32_swap_symbol_in (abfd, psrc, pshn, dst))
17786     return FALSE;
17787   dst->st_target_internal = 0;
17788 
17789   /* New EABI objects mark thumb function symbols by setting the low bit of
17790      the address.  */
17791   if (ELF_ST_TYPE (dst->st_info) == STT_FUNC
17792       || ELF_ST_TYPE (dst->st_info) == STT_GNU_IFUNC)
17793     {
17794       if (dst->st_value & 1)
17795 	{
17796 	  dst->st_value &= ~(bfd_vma) 1;
17797 	  ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal,
17798 				   ST_BRANCH_TO_THUMB);
17799 	}
17800       else
17801 	ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_ARM);
17802     }
17803   else if (ELF_ST_TYPE (dst->st_info) == STT_ARM_TFUNC)
17804     {
17805       dst->st_info = ELF_ST_INFO (ELF_ST_BIND (dst->st_info), STT_FUNC);
17806       ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_TO_THUMB);
17807     }
17808   else if (ELF_ST_TYPE (dst->st_info) == STT_SECTION)
17809     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_LONG);
17810   else
17811     ARM_SET_SYM_BRANCH_TYPE (dst->st_target_internal, ST_BRANCH_UNKNOWN);
17812 
17813   return TRUE;
17814 }
17815 
17816 
17817 /* Mangle thumb function symbols as we write them out.  */
17818 
17819 static void
17820 elf32_arm_swap_symbol_out (bfd *abfd,
17821 			   const Elf_Internal_Sym *src,
17822 			   void *cdst,
17823 			   void *shndx)
17824 {
17825   Elf_Internal_Sym newsym;
17826 
17827   /* We convert STT_ARM_TFUNC symbols into STT_FUNC with the low bit
17828      of the address set, as per the new EABI.  We do this unconditionally
17829      because objcopy does not set the elf header flags until after
17830      it writes out the symbol table.  */
17831   if (ARM_GET_SYM_BRANCH_TYPE (src->st_target_internal) == ST_BRANCH_TO_THUMB)
17832     {
17833       newsym = *src;
17834       if (ELF_ST_TYPE (src->st_info) != STT_GNU_IFUNC)
17835 	newsym.st_info = ELF_ST_INFO (ELF_ST_BIND (src->st_info), STT_FUNC);
17836       if (newsym.st_shndx != SHN_UNDEF)
17837 	{
17838 	  /* Do this only for defined symbols. At link type, the static
17839 	     linker will simulate the work of dynamic linker of resolving
17840 	     symbols and will carry over the thumbness of found symbols to
17841 	     the output symbol table. It's not clear how it happens, but
17842 	     the thumbness of undefined symbols can well be different at
17843 	     runtime, and writing '1' for them will be confusing for users
17844 	     and possibly for dynamic linker itself.
17845 	  */
17846 	  newsym.st_value |= 1;
17847 	}
17848 
17849       src = &newsym;
17850     }
17851   bfd_elf32_swap_symbol_out (abfd, src, cdst, shndx);
17852 }
17853 
17854 /* Add the PT_ARM_EXIDX program header.  */
17855 
17856 static bfd_boolean
17857 elf32_arm_modify_segment_map (bfd *abfd,
17858 			      struct bfd_link_info *info ATTRIBUTE_UNUSED)
17859 {
17860   struct elf_segment_map *m;
17861   asection *sec;
17862 
17863   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17864   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17865     {
17866       /* If there is already a PT_ARM_EXIDX header, then we do not
17867 	 want to add another one.  This situation arises when running
17868 	 "strip"; the input binary already has the header.  */
17869       m = elf_seg_map (abfd);
17870       while (m && m->p_type != PT_ARM_EXIDX)
17871 	m = m->next;
17872       if (!m)
17873 	{
17874 	  m = (struct elf_segment_map *)
17875 	      bfd_zalloc (abfd, sizeof (struct elf_segment_map));
17876 	  if (m == NULL)
17877 	    return FALSE;
17878 	  m->p_type = PT_ARM_EXIDX;
17879 	  m->count = 1;
17880 	  m->sections[0] = sec;
17881 
17882 	  m->next = elf_seg_map (abfd);
17883 	  elf_seg_map (abfd) = m;
17884 	}
17885     }
17886 
17887   return TRUE;
17888 }
17889 
17890 /* We may add a PT_ARM_EXIDX program header.  */
17891 
17892 static int
17893 elf32_arm_additional_program_headers (bfd *abfd,
17894 				      struct bfd_link_info *info ATTRIBUTE_UNUSED)
17895 {
17896   asection *sec;
17897 
17898   sec = bfd_get_section_by_name (abfd, ".ARM.exidx");
17899   if (sec != NULL && (sec->flags & SEC_LOAD) != 0)
17900     return 1;
17901   else
17902     return 0;
17903 }
17904 
17905 /* Hook called by the linker routine which adds symbols from an object
17906    file.  */
17907 
17908 static bfd_boolean
17909 elf32_arm_add_symbol_hook (bfd *abfd, struct bfd_link_info *info,
17910 			   Elf_Internal_Sym *sym, const char **namep,
17911 			   flagword *flagsp, asection **secp, bfd_vma *valp)
17912 {
17913   if (ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC
17914       && (abfd->flags & DYNAMIC) == 0
17915       && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour)
17916     elf_tdata (info->output_bfd)->has_gnu_symbols |= elf_gnu_symbol_ifunc;
17917 
17918   if (elf32_arm_hash_table (info) == NULL)
17919     return FALSE;
17920 
17921   if (elf32_arm_hash_table (info)->vxworks_p
17922       && !elf_vxworks_add_symbol_hook (abfd, info, sym, namep,
17923 				       flagsp, secp, valp))
17924     return FALSE;
17925 
17926   return TRUE;
17927 }
17928 
17929 /* We use this to override swap_symbol_in and swap_symbol_out.  */
17930 const struct elf_size_info elf32_arm_size_info =
17931 {
17932   sizeof (Elf32_External_Ehdr),
17933   sizeof (Elf32_External_Phdr),
17934   sizeof (Elf32_External_Shdr),
17935   sizeof (Elf32_External_Rel),
17936   sizeof (Elf32_External_Rela),
17937   sizeof (Elf32_External_Sym),
17938   sizeof (Elf32_External_Dyn),
17939   sizeof (Elf_External_Note),
17940   4,
17941   1,
17942   32, 2,
17943   ELFCLASS32, EV_CURRENT,
17944   bfd_elf32_write_out_phdrs,
17945   bfd_elf32_write_shdrs_and_ehdr,
17946   bfd_elf32_checksum_contents,
17947   bfd_elf32_write_relocs,
17948   elf32_arm_swap_symbol_in,
17949   elf32_arm_swap_symbol_out,
17950   bfd_elf32_slurp_reloc_table,
17951   bfd_elf32_slurp_symbol_table,
17952   bfd_elf32_swap_dyn_in,
17953   bfd_elf32_swap_dyn_out,
17954   bfd_elf32_swap_reloc_in,
17955   bfd_elf32_swap_reloc_out,
17956   bfd_elf32_swap_reloca_in,
17957   bfd_elf32_swap_reloca_out
17958 };
17959 
17960 static bfd_vma
17961 read_code32 (const bfd *abfd, const bfd_byte *addr)
17962 {
17963   /* V7 BE8 code is always little endian.  */
17964   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17965     return bfd_getl32 (addr);
17966 
17967   return bfd_get_32 (abfd, addr);
17968 }
17969 
17970 static bfd_vma
17971 read_code16 (const bfd *abfd, const bfd_byte *addr)
17972 {
17973   /* V7 BE8 code is always little endian.  */
17974   if ((elf_elfheader (abfd)->e_flags & EF_ARM_BE8) != 0)
17975     return bfd_getl16 (addr);
17976 
17977   return bfd_get_16 (abfd, addr);
17978 }
17979 
17980 /* Return size of plt0 entry starting at ADDR
17981    or (bfd_vma) -1 if size can not be determined.  */
17982 
17983 static bfd_vma
17984 elf32_arm_plt0_size (const bfd *abfd, const bfd_byte *addr)
17985 {
17986   bfd_vma first_word;
17987   bfd_vma plt0_size;
17988 
17989   first_word = read_code32 (abfd, addr);
17990 
17991   if (first_word == elf32_arm_plt0_entry[0])
17992     plt0_size = 4 * ARRAY_SIZE (elf32_arm_plt0_entry);
17993   else if (first_word == elf32_thumb2_plt0_entry[0])
17994     plt0_size = 4 * ARRAY_SIZE (elf32_thumb2_plt0_entry);
17995   else
17996     /* We don't yet handle this PLT format.  */
17997     return (bfd_vma) -1;
17998 
17999   return plt0_size;
18000 }
18001 
18002 /* Return size of plt entry starting at offset OFFSET
18003    of plt section located at address START
18004    or (bfd_vma) -1 if size can not be determined.  */
18005 
18006 static bfd_vma
18007 elf32_arm_plt_size (const bfd *abfd, const bfd_byte *start, bfd_vma offset)
18008 {
18009   bfd_vma first_insn;
18010   bfd_vma plt_size = 0;
18011   const bfd_byte *addr = start + offset;
18012 
18013   /* PLT entry size if fixed on Thumb-only platforms.  */
18014   if (read_code32 (abfd, start) == elf32_thumb2_plt0_entry[0])
18015       return 4 * ARRAY_SIZE (elf32_thumb2_plt_entry);
18016 
18017   /* Respect Thumb stub if necessary.  */
18018   if (read_code16 (abfd, addr) == elf32_arm_plt_thumb_stub[0])
18019     {
18020       plt_size += 2 * ARRAY_SIZE(elf32_arm_plt_thumb_stub);
18021     }
18022 
18023   /* Strip immediate from first add.  */
18024   first_insn = read_code32 (abfd, addr + plt_size) & 0xffffff00;
18025 
18026 #ifdef FOUR_WORD_PLT
18027   if (first_insn == elf32_arm_plt_entry[0])
18028     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry);
18029 #else
18030   if (first_insn == elf32_arm_plt_entry_long[0])
18031     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_long);
18032   else if (first_insn == elf32_arm_plt_entry_short[0])
18033     plt_size += 4 * ARRAY_SIZE (elf32_arm_plt_entry_short);
18034 #endif
18035   else
18036     /* We don't yet handle this PLT format.  */
18037     return (bfd_vma) -1;
18038 
18039   return plt_size;
18040 }
18041 
18042 /* Implementation is shamelessly borrowed from _bfd_elf_get_synthetic_symtab.  */
18043 
18044 static long
18045 elf32_arm_get_synthetic_symtab (bfd *abfd,
18046 			       long symcount ATTRIBUTE_UNUSED,
18047 			       asymbol **syms ATTRIBUTE_UNUSED,
18048 			       long dynsymcount,
18049 			       asymbol **dynsyms,
18050 			       asymbol **ret)
18051 {
18052   asection *relplt;
18053   asymbol *s;
18054   arelent *p;
18055   long count, i, n;
18056   size_t size;
18057   Elf_Internal_Shdr *hdr;
18058   char *names;
18059   asection *plt;
18060   bfd_vma offset;
18061   bfd_byte *data;
18062 
18063   *ret = NULL;
18064 
18065   if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
18066     return 0;
18067 
18068   if (dynsymcount <= 0)
18069     return 0;
18070 
18071   relplt = bfd_get_section_by_name (abfd, ".rel.plt");
18072   if (relplt == NULL)
18073     return 0;
18074 
18075   hdr = &elf_section_data (relplt)->this_hdr;
18076   if (hdr->sh_link != elf_dynsymtab (abfd)
18077       || (hdr->sh_type != SHT_REL && hdr->sh_type != SHT_RELA))
18078     return 0;
18079 
18080   plt = bfd_get_section_by_name (abfd, ".plt");
18081   if (plt == NULL)
18082     return 0;
18083 
18084   if (!elf32_arm_size_info.slurp_reloc_table (abfd, relplt, dynsyms, TRUE))
18085     return -1;
18086 
18087   data = plt->contents;
18088   if (data == NULL)
18089     {
18090       if (!bfd_get_full_section_contents(abfd, (asection *) plt, &data) || data == NULL)
18091 	return -1;
18092       bfd_cache_section_contents((asection *) plt, data);
18093     }
18094 
18095   count = relplt->size / hdr->sh_entsize;
18096   size = count * sizeof (asymbol);
18097   p = relplt->relocation;
18098   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18099     {
18100       size += strlen ((*p->sym_ptr_ptr)->name) + sizeof ("@plt");
18101       if (p->addend != 0)
18102 	size += sizeof ("+0x") - 1 + 8;
18103     }
18104 
18105   s = *ret = (asymbol *) bfd_malloc (size);
18106   if (s == NULL)
18107     return -1;
18108 
18109   offset = elf32_arm_plt0_size (abfd, data);
18110   if (offset == (bfd_vma) -1)
18111     return -1;
18112 
18113   names = (char *) (s + count);
18114   p = relplt->relocation;
18115   n = 0;
18116   for (i = 0; i < count; i++, p += elf32_arm_size_info.int_rels_per_ext_rel)
18117     {
18118       size_t len;
18119 
18120       bfd_vma plt_size = elf32_arm_plt_size (abfd, data, offset);
18121       if (plt_size == (bfd_vma) -1)
18122 	break;
18123 
18124       *s = **p->sym_ptr_ptr;
18125       /* Undefined syms won't have BSF_LOCAL or BSF_GLOBAL set.  Since
18126 	 we are defining a symbol, ensure one of them is set.  */
18127       if ((s->flags & BSF_LOCAL) == 0)
18128 	s->flags |= BSF_GLOBAL;
18129       s->flags |= BSF_SYNTHETIC;
18130       s->section = plt;
18131       s->value = offset;
18132       s->name = names;
18133       s->udata.p = NULL;
18134       len = strlen ((*p->sym_ptr_ptr)->name);
18135       memcpy (names, (*p->sym_ptr_ptr)->name, len);
18136       names += len;
18137       if (p->addend != 0)
18138 	{
18139 	  char buf[30], *a;
18140 
18141 	  memcpy (names, "+0x", sizeof ("+0x") - 1);
18142 	  names += sizeof ("+0x") - 1;
18143 	  bfd_sprintf_vma (abfd, buf, p->addend);
18144 	  for (a = buf; *a == '0'; ++a)
18145 	    ;
18146 	  len = strlen (a);
18147 	  memcpy (names, a, len);
18148 	  names += len;
18149 	}
18150       memcpy (names, "@plt", sizeof ("@plt"));
18151       names += sizeof ("@plt");
18152       ++s, ++n;
18153       offset += plt_size;
18154     }
18155 
18156   return n;
18157 }
18158 
18159 static bfd_boolean
18160 elf32_arm_section_flags (flagword *flags, const Elf_Internal_Shdr * hdr)
18161 {
18162   if (hdr->sh_flags & SHF_ARM_NOREAD)
18163     *flags |= SEC_ELF_NOREAD;
18164   return TRUE;
18165 }
18166 
18167 static flagword
18168 elf32_arm_lookup_section_flags (char *flag_name)
18169 {
18170   if (!strcmp (flag_name, "SHF_ARM_NOREAD"))
18171     return SHF_ARM_NOREAD;
18172 
18173   return SEC_NO_FLAGS;
18174 }
18175 
18176 static unsigned int
18177 elf32_arm_count_additional_relocs (asection *sec)
18178 {
18179   struct _arm_elf_section_data *arm_data;
18180   arm_data = get_arm_elf_section_data (sec);
18181   return arm_data->additional_reloc_count;
18182 }
18183 
18184 /* Called to set the sh_flags, sh_link and sh_info fields of OSECTION which
18185    has a type >= SHT_LOOS.  Returns TRUE if these fields were initialised
18186    FALSE otherwise.  ISECTION is the best guess matching section from the
18187    input bfd IBFD, but it might be NULL.  */
18188 
18189 static bfd_boolean
18190 elf32_arm_copy_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
18191 				       bfd *obfd ATTRIBUTE_UNUSED,
18192 				       const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
18193 				       Elf_Internal_Shdr *osection)
18194 {
18195   switch (osection->sh_type)
18196     {
18197     case SHT_ARM_EXIDX:
18198       {
18199 	Elf_Internal_Shdr **oheaders = elf_elfsections (obfd);
18200 	Elf_Internal_Shdr **iheaders = elf_elfsections (ibfd);
18201 	unsigned i = 0;
18202 
18203 	osection->sh_flags = SHF_ALLOC | SHF_LINK_ORDER;
18204 	osection->sh_info = 0;
18205 
18206 	/* The sh_link field must be set to the text section associated with
18207 	   this index section.  Unfortunately the ARM EHABI does not specify
18208 	   exactly how to determine this association.  Our caller does try
18209 	   to match up OSECTION with its corresponding input section however
18210 	   so that is a good first guess.  */
18211 	if (isection != NULL
18212 	    && osection->bfd_section != NULL
18213 	    && isection->bfd_section != NULL
18214 	    && isection->bfd_section->output_section != NULL
18215 	    && isection->bfd_section->output_section == osection->bfd_section
18216 	    && iheaders != NULL
18217 	    && isection->sh_link > 0
18218 	    && isection->sh_link < elf_numsections (ibfd)
18219 	    && iheaders[isection->sh_link]->bfd_section != NULL
18220 	    && iheaders[isection->sh_link]->bfd_section->output_section != NULL
18221 	    )
18222 	  {
18223 	    for (i = elf_numsections (obfd); i-- > 0;)
18224 	      if (oheaders[i]->bfd_section
18225 		  == iheaders[isection->sh_link]->bfd_section->output_section)
18226 		break;
18227 	  }
18228 
18229 	if (i == 0)
18230 	  {
18231 	    /* Failing that we have to find a matching section ourselves.  If
18232 	       we had the output section name available we could compare that
18233 	       with input section names.  Unfortunately we don't.  So instead
18234 	       we use a simple heuristic and look for the nearest executable
18235 	       section before this one.  */
18236 	    for (i = elf_numsections (obfd); i-- > 0;)
18237 	      if (oheaders[i] == osection)
18238 		break;
18239 	    if (i == 0)
18240 	      break;
18241 
18242 	    while (i-- > 0)
18243 	      if (oheaders[i]->sh_type == SHT_PROGBITS
18244 		  && (oheaders[i]->sh_flags & (SHF_ALLOC | SHF_EXECINSTR))
18245 		  == (SHF_ALLOC | SHF_EXECINSTR))
18246 		break;
18247 	  }
18248 
18249 	if (i)
18250 	  {
18251 	    osection->sh_link = i;
18252 	    /* If the text section was part of a group
18253 	       then the index section should be too.  */
18254 	    if (oheaders[i]->sh_flags & SHF_GROUP)
18255 	      osection->sh_flags |= SHF_GROUP;
18256 	    return TRUE;
18257 	  }
18258       }
18259       break;
18260 
18261     case SHT_ARM_PREEMPTMAP:
18262       osection->sh_flags = SHF_ALLOC;
18263       break;
18264 
18265     case SHT_ARM_ATTRIBUTES:
18266     case SHT_ARM_DEBUGOVERLAY:
18267     case SHT_ARM_OVERLAYSECTION:
18268     default:
18269       break;
18270     }
18271 
18272   return FALSE;
18273 }
18274 
18275 /* Returns TRUE if NAME is an ARM mapping symbol.
18276    Traditionally the symbols $a, $d and $t have been used.
18277    The ARM ELF standard also defines $x (for A64 code).  It also allows a
18278    period initiated suffix to be added to the symbol: "$[adtx]\.[:sym_char]+".
18279    Other tools might also produce $b (Thumb BL), $f, $p, $m and $v, but we do
18280    not support them here.  $t.x indicates the start of ThumbEE instructions.  */
18281 
18282 static bfd_boolean
18283 is_arm_mapping_symbol (const char * name)
18284 {
18285   return name != NULL /* Paranoia.  */
18286     && name[0] == '$' /* Note: if objcopy --prefix-symbols has been used then
18287 			 the mapping symbols could have acquired a prefix.
18288 			 We do not support this here, since such symbols no
18289 			 longer conform to the ARM ELF ABI.  */
18290     && (name[1] == 'a' || name[1] == 'd' || name[1] == 't' || name[1] == 'x')
18291     && (name[2] == 0 || name[2] == '.');
18292   /* FIXME: Strictly speaking the symbol is only a valid mapping symbol if
18293      any characters that follow the period are legal characters for the body
18294      of a symbol's name.  For now we just assume that this is the case.  */
18295 }
18296 
18297 /* Make sure that mapping symbols in object files are not removed via the
18298    "strip --strip-unneeded" tool.  These symbols are needed in order to
18299    correctly generate interworking veneers, and for byte swapping code
18300    regions.  Once an object file has been linked, it is safe to remove the
18301    symbols as they will no longer be needed.  */
18302 
18303 static void
18304 elf32_arm_backend_symbol_processing (bfd *abfd, asymbol *sym)
18305 {
18306   if (((abfd->flags & (EXEC_P | DYNAMIC)) == 0)
18307       && sym->section != bfd_abs_section_ptr
18308       && is_arm_mapping_symbol (sym->name))
18309     sym->flags |= BSF_KEEP;
18310 }
18311 
18312 #undef  elf_backend_copy_special_section_fields
18313 #define elf_backend_copy_special_section_fields elf32_arm_copy_special_section_fields
18314 
18315 #define ELF_ARCH			bfd_arch_arm
18316 #define ELF_TARGET_ID			ARM_ELF_DATA
18317 #define ELF_MACHINE_CODE		EM_ARM
18318 #ifdef __QNXTARGET__
18319 #define ELF_MAXPAGESIZE			0x1000
18320 #else
18321 #define ELF_MAXPAGESIZE			0x10000
18322 #endif
18323 #define ELF_MINPAGESIZE			0x1000
18324 #define ELF_COMMONPAGESIZE		0x1000
18325 
18326 #define bfd_elf32_mkobject		        elf32_arm_mkobject
18327 
18328 #define bfd_elf32_bfd_copy_private_bfd_data	elf32_arm_copy_private_bfd_data
18329 #define bfd_elf32_bfd_merge_private_bfd_data	elf32_arm_merge_private_bfd_data
18330 #define bfd_elf32_bfd_set_private_flags		elf32_arm_set_private_flags
18331 #define bfd_elf32_bfd_print_private_bfd_data	elf32_arm_print_private_bfd_data
18332 #define bfd_elf32_bfd_link_hash_table_create    elf32_arm_link_hash_table_create
18333 #define bfd_elf32_bfd_reloc_type_lookup		elf32_arm_reloc_type_lookup
18334 #define bfd_elf32_bfd_reloc_name_lookup		elf32_arm_reloc_name_lookup
18335 #define bfd_elf32_find_nearest_line	        elf32_arm_find_nearest_line
18336 #define bfd_elf32_find_inliner_info	        elf32_arm_find_inliner_info
18337 #define bfd_elf32_new_section_hook		elf32_arm_new_section_hook
18338 #define bfd_elf32_bfd_is_target_special_symbol	elf32_arm_is_target_special_symbol
18339 #define bfd_elf32_bfd_final_link		elf32_arm_final_link
18340 #define bfd_elf32_get_synthetic_symtab  elf32_arm_get_synthetic_symtab
18341 
18342 #define elf_backend_get_symbol_type             elf32_arm_get_symbol_type
18343 #define elf_backend_gc_mark_hook                elf32_arm_gc_mark_hook
18344 #define elf_backend_gc_mark_extra_sections	elf32_arm_gc_mark_extra_sections
18345 #define elf_backend_gc_sweep_hook               elf32_arm_gc_sweep_hook
18346 #define elf_backend_check_relocs                elf32_arm_check_relocs
18347 #define elf_backend_relocate_section		elf32_arm_relocate_section
18348 #define elf_backend_write_section		elf32_arm_write_section
18349 #define elf_backend_adjust_dynamic_symbol	elf32_arm_adjust_dynamic_symbol
18350 #define elf_backend_create_dynamic_sections     elf32_arm_create_dynamic_sections
18351 #define elf_backend_finish_dynamic_symbol	elf32_arm_finish_dynamic_symbol
18352 #define elf_backend_finish_dynamic_sections	elf32_arm_finish_dynamic_sections
18353 #define elf_backend_size_dynamic_sections	elf32_arm_size_dynamic_sections
18354 #define elf_backend_always_size_sections	elf32_arm_always_size_sections
18355 #define elf_backend_init_index_section		_bfd_elf_init_2_index_sections
18356 #define elf_backend_post_process_headers	elf32_arm_post_process_headers
18357 #define elf_backend_reloc_type_class		elf32_arm_reloc_type_class
18358 #define elf_backend_object_p			elf32_arm_object_p
18359 #define elf_backend_fake_sections  		elf32_arm_fake_sections
18360 #define elf_backend_section_from_shdr  		elf32_arm_section_from_shdr
18361 #define elf_backend_final_write_processing      elf32_arm_final_write_processing
18362 #define elf_backend_copy_indirect_symbol        elf32_arm_copy_indirect_symbol
18363 #define elf_backend_size_info			elf32_arm_size_info
18364 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
18365 #define elf_backend_additional_program_headers  elf32_arm_additional_program_headers
18366 #define elf_backend_output_arch_local_syms      elf32_arm_output_arch_local_syms
18367 #define elf_backend_begin_write_processing      elf32_arm_begin_write_processing
18368 #define elf_backend_add_symbol_hook		elf32_arm_add_symbol_hook
18369 #define elf_backend_count_additional_relocs	elf32_arm_count_additional_relocs
18370 #define elf_backend_symbol_processing		elf32_arm_backend_symbol_processing
18371 
18372 #define elf_backend_can_refcount       1
18373 #define elf_backend_can_gc_sections    1
18374 #define elf_backend_plt_readonly       1
18375 #define elf_backend_want_got_plt       1
18376 #define elf_backend_want_plt_sym       0
18377 #define elf_backend_may_use_rel_p      1
18378 #define elf_backend_may_use_rela_p     0
18379 #define elf_backend_default_use_rela_p 0
18380 
18381 #define elf_backend_got_header_size	12
18382 #define elf_backend_extern_protected_data 1
18383 
18384 #undef  elf_backend_obj_attrs_vendor
18385 #define elf_backend_obj_attrs_vendor		"aeabi"
18386 #undef  elf_backend_obj_attrs_section
18387 #define elf_backend_obj_attrs_section		".ARM.attributes"
18388 #undef  elf_backend_obj_attrs_arg_type
18389 #define elf_backend_obj_attrs_arg_type		elf32_arm_obj_attrs_arg_type
18390 #undef  elf_backend_obj_attrs_section_type
18391 #define elf_backend_obj_attrs_section_type	SHT_ARM_ATTRIBUTES
18392 #define elf_backend_obj_attrs_order		elf32_arm_obj_attrs_order
18393 #define elf_backend_obj_attrs_handle_unknown 	elf32_arm_obj_attrs_handle_unknown
18394 
18395 #undef elf_backend_section_flags
18396 #define elf_backend_section_flags		elf32_arm_section_flags
18397 #undef elf_backend_lookup_section_flags_hook
18398 #define elf_backend_lookup_section_flags_hook   elf32_arm_lookup_section_flags
18399 
18400 #include "elf32-target.h"
18401 
18402 /* Native Client targets.  */
18403 
18404 #undef	TARGET_LITTLE_SYM
18405 #define TARGET_LITTLE_SYM		arm_elf32_nacl_le_vec
18406 #undef	TARGET_LITTLE_NAME
18407 #define TARGET_LITTLE_NAME		"elf32-littlearm-nacl"
18408 #undef	TARGET_BIG_SYM
18409 #define TARGET_BIG_SYM			arm_elf32_nacl_be_vec
18410 #undef	TARGET_BIG_NAME
18411 #define TARGET_BIG_NAME			"elf32-bigarm-nacl"
18412 
18413 /* Like elf32_arm_link_hash_table_create -- but overrides
18414    appropriately for NaCl.  */
18415 
18416 static struct bfd_link_hash_table *
18417 elf32_arm_nacl_link_hash_table_create (bfd *abfd)
18418 {
18419   struct bfd_link_hash_table *ret;
18420 
18421   ret = elf32_arm_link_hash_table_create (abfd);
18422   if (ret)
18423     {
18424       struct elf32_arm_link_hash_table *htab
18425 	= (struct elf32_arm_link_hash_table *) ret;
18426 
18427       htab->nacl_p = 1;
18428 
18429       htab->plt_header_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt0_entry);
18430       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_nacl_plt_entry);
18431     }
18432   return ret;
18433 }
18434 
18435 /* Since NaCl doesn't use the ARM-specific unwind format, we don't
18436    really need to use elf32_arm_modify_segment_map.  But we do it
18437    anyway just to reduce gratuitous differences with the stock ARM backend.  */
18438 
18439 static bfd_boolean
18440 elf32_arm_nacl_modify_segment_map (bfd *abfd, struct bfd_link_info *info)
18441 {
18442   return (elf32_arm_modify_segment_map (abfd, info)
18443 	  && nacl_modify_segment_map (abfd, info));
18444 }
18445 
18446 static void
18447 elf32_arm_nacl_final_write_processing (bfd *abfd, bfd_boolean linker)
18448 {
18449   elf32_arm_final_write_processing (abfd, linker);
18450   nacl_final_write_processing (abfd, linker);
18451 }
18452 
18453 static bfd_vma
18454 elf32_arm_nacl_plt_sym_val (bfd_vma i, const asection *plt,
18455 			    const arelent *rel ATTRIBUTE_UNUSED)
18456 {
18457   return plt->vma
18458     + 4 * (ARRAY_SIZE (elf32_arm_nacl_plt0_entry) +
18459 	   i * ARRAY_SIZE (elf32_arm_nacl_plt_entry));
18460 }
18461 
18462 #undef	elf32_bed
18463 #define elf32_bed				elf32_arm_nacl_bed
18464 #undef  bfd_elf32_bfd_link_hash_table_create
18465 #define bfd_elf32_bfd_link_hash_table_create	\
18466   elf32_arm_nacl_link_hash_table_create
18467 #undef	elf_backend_plt_alignment
18468 #define elf_backend_plt_alignment		4
18469 #undef	elf_backend_modify_segment_map
18470 #define	elf_backend_modify_segment_map		elf32_arm_nacl_modify_segment_map
18471 #undef	elf_backend_modify_program_headers
18472 #define	elf_backend_modify_program_headers	nacl_modify_program_headers
18473 #undef  elf_backend_final_write_processing
18474 #define elf_backend_final_write_processing	elf32_arm_nacl_final_write_processing
18475 #undef bfd_elf32_get_synthetic_symtab
18476 #undef  elf_backend_plt_sym_val
18477 #define elf_backend_plt_sym_val			elf32_arm_nacl_plt_sym_val
18478 #undef  elf_backend_copy_special_section_fields
18479 
18480 #undef	ELF_MINPAGESIZE
18481 #undef	ELF_COMMONPAGESIZE
18482 
18483 
18484 #include "elf32-target.h"
18485 
18486 /* Reset to defaults.  */
18487 #undef	elf_backend_plt_alignment
18488 #undef	elf_backend_modify_segment_map
18489 #define elf_backend_modify_segment_map		elf32_arm_modify_segment_map
18490 #undef	elf_backend_modify_program_headers
18491 #undef  elf_backend_final_write_processing
18492 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
18493 #undef	ELF_MINPAGESIZE
18494 #define ELF_MINPAGESIZE			0x1000
18495 #undef	ELF_COMMONPAGESIZE
18496 #define ELF_COMMONPAGESIZE		0x1000
18497 
18498 
18499 /* VxWorks Targets.  */
18500 
18501 #undef  TARGET_LITTLE_SYM
18502 #define TARGET_LITTLE_SYM               arm_elf32_vxworks_le_vec
18503 #undef  TARGET_LITTLE_NAME
18504 #define TARGET_LITTLE_NAME              "elf32-littlearm-vxworks"
18505 #undef  TARGET_BIG_SYM
18506 #define TARGET_BIG_SYM                  arm_elf32_vxworks_be_vec
18507 #undef  TARGET_BIG_NAME
18508 #define TARGET_BIG_NAME                 "elf32-bigarm-vxworks"
18509 
18510 /* Like elf32_arm_link_hash_table_create -- but overrides
18511    appropriately for VxWorks.  */
18512 
18513 static struct bfd_link_hash_table *
18514 elf32_arm_vxworks_link_hash_table_create (bfd *abfd)
18515 {
18516   struct bfd_link_hash_table *ret;
18517 
18518   ret = elf32_arm_link_hash_table_create (abfd);
18519   if (ret)
18520     {
18521       struct elf32_arm_link_hash_table *htab
18522 	= (struct elf32_arm_link_hash_table *) ret;
18523       htab->use_rel = 0;
18524       htab->vxworks_p = 1;
18525     }
18526   return ret;
18527 }
18528 
18529 static void
18530 elf32_arm_vxworks_final_write_processing (bfd *abfd, bfd_boolean linker)
18531 {
18532   elf32_arm_final_write_processing (abfd, linker);
18533   elf_vxworks_final_write_processing (abfd, linker);
18534 }
18535 
18536 #undef  elf32_bed
18537 #define elf32_bed elf32_arm_vxworks_bed
18538 
18539 #undef  bfd_elf32_bfd_link_hash_table_create
18540 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_vxworks_link_hash_table_create
18541 #undef  elf_backend_final_write_processing
18542 #define elf_backend_final_write_processing	elf32_arm_vxworks_final_write_processing
18543 #undef  elf_backend_emit_relocs
18544 #define elf_backend_emit_relocs			elf_vxworks_emit_relocs
18545 
18546 #undef  elf_backend_may_use_rel_p
18547 #define elf_backend_may_use_rel_p	0
18548 #undef  elf_backend_may_use_rela_p
18549 #define elf_backend_may_use_rela_p	1
18550 #undef  elf_backend_default_use_rela_p
18551 #define elf_backend_default_use_rela_p	1
18552 #undef  elf_backend_want_plt_sym
18553 #define elf_backend_want_plt_sym	1
18554 #undef  ELF_MAXPAGESIZE
18555 #define ELF_MAXPAGESIZE			0x1000
18556 
18557 #include "elf32-target.h"
18558 
18559 
18560 /* Merge backend specific data from an object file to the output
18561    object file when linking.  */
18562 
18563 static bfd_boolean
18564 elf32_arm_merge_private_bfd_data (bfd * ibfd, bfd * obfd)
18565 {
18566   flagword out_flags;
18567   flagword in_flags;
18568   bfd_boolean flags_compatible = TRUE;
18569   asection *sec;
18570 
18571   /* Check if we have the same endianness.  */
18572   if (! _bfd_generic_verify_endian_match (ibfd, obfd))
18573     return FALSE;
18574 
18575   if (! is_arm_elf (ibfd) || ! is_arm_elf (obfd))
18576     return TRUE;
18577 
18578   if (!elf32_arm_merge_eabi_attributes (ibfd, obfd))
18579     return FALSE;
18580 
18581   /* The input BFD must have had its flags initialised.  */
18582   /* The following seems bogus to me -- The flags are initialized in
18583      the assembler but I don't think an elf_flags_init field is
18584      written into the object.  */
18585   /* BFD_ASSERT (elf_flags_init (ibfd)); */
18586 
18587   in_flags  = elf_elfheader (ibfd)->e_flags;
18588   out_flags = elf_elfheader (obfd)->e_flags;
18589 
18590   /* In theory there is no reason why we couldn't handle this.  However
18591      in practice it isn't even close to working and there is no real
18592      reason to want it.  */
18593   if (EF_ARM_EABI_VERSION (in_flags) >= EF_ARM_EABI_VER4
18594       && !(ibfd->flags & DYNAMIC)
18595       && (in_flags & EF_ARM_BE8))
18596     {
18597       _bfd_error_handler (_("error: %B is already in final BE8 format"),
18598 			  ibfd);
18599       return FALSE;
18600     }
18601 
18602   if (!elf_flags_init (obfd))
18603     {
18604       /* If the input is the default architecture and had the default
18605 	 flags then do not bother setting the flags for the output
18606 	 architecture, instead allow future merges to do this.  If no
18607 	 future merges ever set these flags then they will retain their
18608 	 uninitialised values, which surprise surprise, correspond
18609 	 to the default values.  */
18610       if (bfd_get_arch_info (ibfd)->the_default
18611 	  && elf_elfheader (ibfd)->e_flags == 0)
18612 	return TRUE;
18613 
18614       elf_flags_init (obfd) = TRUE;
18615       elf_elfheader (obfd)->e_flags = in_flags;
18616 
18617       if (bfd_get_arch (obfd) == bfd_get_arch (ibfd)
18618 	  && bfd_get_arch_info (obfd)->the_default)
18619 	return bfd_set_arch_mach (obfd, bfd_get_arch (ibfd), bfd_get_mach (ibfd));
18620 
18621       return TRUE;
18622     }
18623 
18624   /* Determine what should happen if the input ARM architecture
18625      does not match the output ARM architecture.  */
18626   if (! bfd_arm_merge_machines (ibfd, obfd))
18627     return FALSE;
18628 
18629   /* Identical flags must be compatible.  */
18630   if (in_flags == out_flags)
18631     return TRUE;
18632 
18633   /* Check to see if the input BFD actually contains any sections.  If
18634      not, its flags may not have been initialised either, but it
18635      cannot actually cause any incompatiblity.  Do not short-circuit
18636      dynamic objects; their section list may be emptied by
18637     elf_link_add_object_symbols.
18638 
18639     Also check to see if there are no code sections in the input.
18640     In this case there is no need to check for code specific flags.
18641     XXX - do we need to worry about floating-point format compatability
18642     in data sections ?  */
18643   if (!(ibfd->flags & DYNAMIC))
18644     {
18645       bfd_boolean null_input_bfd = TRUE;
18646       bfd_boolean only_data_sections = TRUE;
18647 
18648       for (sec = ibfd->sections; sec != NULL; sec = sec->next)
18649 	{
18650 	  /* Ignore synthetic glue sections.  */
18651 	  if (strcmp (sec->name, ".glue_7")
18652 	      && strcmp (sec->name, ".glue_7t"))
18653 	    {
18654 	      if ((bfd_get_section_flags (ibfd, sec)
18655 		   & (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18656 		  == (SEC_LOAD | SEC_CODE | SEC_HAS_CONTENTS))
18657 		only_data_sections = FALSE;
18658 
18659 	      null_input_bfd = FALSE;
18660 	      break;
18661 	    }
18662 	}
18663 
18664       if (null_input_bfd || only_data_sections)
18665 	return TRUE;
18666     }
18667 
18668   /* Complain about various flag mismatches.  */
18669   if (!elf32_arm_versions_compatible (EF_ARM_EABI_VERSION (in_flags),
18670 				      EF_ARM_EABI_VERSION (out_flags)))
18671     {
18672       _bfd_error_handler
18673 	(_("error: Source object %B has EABI version %d, but target %B has EABI version %d"),
18674 	 ibfd, obfd,
18675 	 (in_flags & EF_ARM_EABIMASK) >> 24,
18676 	 (out_flags & EF_ARM_EABIMASK) >> 24);
18677       return FALSE;
18678     }
18679 
18680   /* Not sure what needs to be checked for EABI versions >= 1.  */
18681   /* VxWorks libraries do not use these flags.  */
18682   if (get_elf_backend_data (obfd) != &elf32_arm_vxworks_bed
18683       && get_elf_backend_data (ibfd) != &elf32_arm_vxworks_bed
18684       && EF_ARM_EABI_VERSION (in_flags) == EF_ARM_EABI_UNKNOWN)
18685     {
18686       if ((in_flags & EF_ARM_APCS_26) != (out_flags & EF_ARM_APCS_26))
18687 	{
18688 	  _bfd_error_handler
18689 	    (_("error: %B is compiled for APCS-%d, whereas target %B uses APCS-%d"),
18690 	     ibfd, obfd,
18691 	     in_flags & EF_ARM_APCS_26 ? 26 : 32,
18692 	     out_flags & EF_ARM_APCS_26 ? 26 : 32);
18693 	  flags_compatible = FALSE;
18694 	}
18695 
18696       if ((in_flags & EF_ARM_APCS_FLOAT) != (out_flags & EF_ARM_APCS_FLOAT))
18697 	{
18698 	  if (in_flags & EF_ARM_APCS_FLOAT)
18699 	    _bfd_error_handler
18700 	      (_("error: %B passes floats in float registers, whereas %B passes them in integer registers"),
18701 	       ibfd, obfd);
18702 	  else
18703 	    _bfd_error_handler
18704 	      (_("error: %B passes floats in integer registers, whereas %B passes them in float registers"),
18705 	       ibfd, obfd);
18706 
18707 	  flags_compatible = FALSE;
18708 	}
18709 
18710       if ((in_flags & EF_ARM_VFP_FLOAT) != (out_flags & EF_ARM_VFP_FLOAT))
18711 	{
18712 	  if (in_flags & EF_ARM_VFP_FLOAT)
18713 	    _bfd_error_handler
18714 	      (_("error: %B uses VFP instructions, whereas %B does not"),
18715 	       ibfd, obfd);
18716 	  else
18717 	    _bfd_error_handler
18718 	      (_("error: %B uses FPA instructions, whereas %B does not"),
18719 	       ibfd, obfd);
18720 
18721 	  flags_compatible = FALSE;
18722 	}
18723 
18724       if ((in_flags & EF_ARM_MAVERICK_FLOAT) != (out_flags & EF_ARM_MAVERICK_FLOAT))
18725 	{
18726 	  if (in_flags & EF_ARM_MAVERICK_FLOAT)
18727 	    _bfd_error_handler
18728 	      (_("error: %B uses Maverick instructions, whereas %B does not"),
18729 	       ibfd, obfd);
18730 	  else
18731 	    _bfd_error_handler
18732 	      (_("error: %B does not use Maverick instructions, whereas %B does"),
18733 	       ibfd, obfd);
18734 
18735 	  flags_compatible = FALSE;
18736 	}
18737 
18738 #ifdef EF_ARM_SOFT_FLOAT
18739       if ((in_flags & EF_ARM_SOFT_FLOAT) != (out_flags & EF_ARM_SOFT_FLOAT))
18740 	{
18741 	  /* We can allow interworking between code that is VFP format
18742 	     layout, and uses either soft float or integer regs for
18743 	     passing floating point arguments and results.  We already
18744 	     know that the APCS_FLOAT flags match; similarly for VFP
18745 	     flags.  */
18746 	  if ((in_flags & EF_ARM_APCS_FLOAT) != 0
18747 	      || (in_flags & EF_ARM_VFP_FLOAT) == 0)
18748 	    {
18749 	      if (in_flags & EF_ARM_SOFT_FLOAT)
18750 		_bfd_error_handler
18751 		  (_("error: %B uses software FP, whereas %B uses hardware FP"),
18752 		   ibfd, obfd);
18753 	      else
18754 		_bfd_error_handler
18755 		  (_("error: %B uses hardware FP, whereas %B uses software FP"),
18756 		   ibfd, obfd);
18757 
18758 	      flags_compatible = FALSE;
18759 	    }
18760 	}
18761 #endif
18762 
18763       /* Interworking mismatch is only a warning.  */
18764       if ((in_flags & EF_ARM_INTERWORK) != (out_flags & EF_ARM_INTERWORK))
18765 	{
18766 	  if (in_flags & EF_ARM_INTERWORK)
18767 	    {
18768 	      _bfd_error_handler
18769 		(_("Warning: %B supports interworking, whereas %B does not"),
18770 		 ibfd, obfd);
18771 	    }
18772 	  else
18773 	    {
18774 	      _bfd_error_handler
18775 		(_("Warning: %B does not support interworking, whereas %B does"),
18776 		 ibfd, obfd);
18777 	    }
18778 	}
18779     }
18780 
18781   return flags_compatible;
18782 }
18783 
18784 
18785 /* Symbian OS Targets.  */
18786 
18787 #undef  TARGET_LITTLE_SYM
18788 #define TARGET_LITTLE_SYM               arm_elf32_symbian_le_vec
18789 #undef  TARGET_LITTLE_NAME
18790 #define TARGET_LITTLE_NAME              "elf32-littlearm-symbian"
18791 #undef  TARGET_BIG_SYM
18792 #define TARGET_BIG_SYM                  arm_elf32_symbian_be_vec
18793 #undef  TARGET_BIG_NAME
18794 #define TARGET_BIG_NAME                 "elf32-bigarm-symbian"
18795 
18796 /* Like elf32_arm_link_hash_table_create -- but overrides
18797    appropriately for Symbian OS.  */
18798 
18799 static struct bfd_link_hash_table *
18800 elf32_arm_symbian_link_hash_table_create (bfd *abfd)
18801 {
18802   struct bfd_link_hash_table *ret;
18803 
18804   ret = elf32_arm_link_hash_table_create (abfd);
18805   if (ret)
18806     {
18807       struct elf32_arm_link_hash_table *htab
18808 	= (struct elf32_arm_link_hash_table *)ret;
18809       /* There is no PLT header for Symbian OS.  */
18810       htab->plt_header_size = 0;
18811       /* The PLT entries are each one instruction and one word.  */
18812       htab->plt_entry_size = 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry);
18813       htab->symbian_p = 1;
18814       /* Symbian uses armv5t or above, so use_blx is always true.  */
18815       htab->use_blx = 1;
18816       htab->root.is_relocatable_executable = 1;
18817     }
18818   return ret;
18819 }
18820 
18821 static const struct bfd_elf_special_section
18822 elf32_arm_symbian_special_sections[] =
18823 {
18824   /* In a BPABI executable, the dynamic linking sections do not go in
18825      the loadable read-only segment.  The post-linker may wish to
18826      refer to these sections, but they are not part of the final
18827      program image.  */
18828   { STRING_COMMA_LEN (".dynamic"),       0, SHT_DYNAMIC,  0 },
18829   { STRING_COMMA_LEN (".dynstr"),        0, SHT_STRTAB,   0 },
18830   { STRING_COMMA_LEN (".dynsym"),        0, SHT_DYNSYM,   0 },
18831   { STRING_COMMA_LEN (".got"),           0, SHT_PROGBITS, 0 },
18832   { STRING_COMMA_LEN (".hash"),          0, SHT_HASH,     0 },
18833   /* These sections do not need to be writable as the SymbianOS
18834      postlinker will arrange things so that no dynamic relocation is
18835      required.  */
18836   { STRING_COMMA_LEN (".init_array"),    0, SHT_INIT_ARRAY,    SHF_ALLOC },
18837   { STRING_COMMA_LEN (".fini_array"),    0, SHT_FINI_ARRAY,    SHF_ALLOC },
18838   { STRING_COMMA_LEN (".preinit_array"), 0, SHT_PREINIT_ARRAY, SHF_ALLOC },
18839   { NULL,                             0, 0, 0,                 0 }
18840 };
18841 
18842 static void
18843 elf32_arm_symbian_begin_write_processing (bfd *abfd,
18844 					  struct bfd_link_info *link_info)
18845 {
18846   /* BPABI objects are never loaded directly by an OS kernel; they are
18847      processed by a postlinker first, into an OS-specific format.  If
18848      the D_PAGED bit is set on the file, BFD will align segments on
18849      page boundaries, so that an OS can directly map the file.  With
18850      BPABI objects, that just results in wasted space.  In addition,
18851      because we clear the D_PAGED bit, map_sections_to_segments will
18852      recognize that the program headers should not be mapped into any
18853      loadable segment.  */
18854   abfd->flags &= ~D_PAGED;
18855   elf32_arm_begin_write_processing (abfd, link_info);
18856 }
18857 
18858 static bfd_boolean
18859 elf32_arm_symbian_modify_segment_map (bfd *abfd,
18860 				      struct bfd_link_info *info)
18861 {
18862   struct elf_segment_map *m;
18863   asection *dynsec;
18864 
18865   /* BPABI shared libraries and executables should have a PT_DYNAMIC
18866      segment.  However, because the .dynamic section is not marked
18867      with SEC_LOAD, the generic ELF code will not create such a
18868      segment.  */
18869   dynsec = bfd_get_section_by_name (abfd, ".dynamic");
18870   if (dynsec)
18871     {
18872       for (m = elf_seg_map (abfd); m != NULL; m = m->next)
18873 	if (m->p_type == PT_DYNAMIC)
18874 	  break;
18875 
18876       if (m == NULL)
18877 	{
18878 	  m = _bfd_elf_make_dynamic_segment (abfd, dynsec);
18879 	  m->next = elf_seg_map (abfd);
18880 	  elf_seg_map (abfd) = m;
18881 	}
18882     }
18883 
18884   /* Also call the generic arm routine.  */
18885   return elf32_arm_modify_segment_map (abfd, info);
18886 }
18887 
18888 /* Return address for Ith PLT stub in section PLT, for relocation REL
18889    or (bfd_vma) -1 if it should not be included.  */
18890 
18891 static bfd_vma
18892 elf32_arm_symbian_plt_sym_val (bfd_vma i, const asection *plt,
18893 			       const arelent *rel ATTRIBUTE_UNUSED)
18894 {
18895   return plt->vma + 4 * ARRAY_SIZE (elf32_arm_symbian_plt_entry) * i;
18896 }
18897 
18898 #undef  elf32_bed
18899 #define elf32_bed elf32_arm_symbian_bed
18900 
18901 /* The dynamic sections are not allocated on SymbianOS; the postlinker
18902    will process them and then discard them.  */
18903 #undef  ELF_DYNAMIC_SEC_FLAGS
18904 #define ELF_DYNAMIC_SEC_FLAGS \
18905   (SEC_HAS_CONTENTS | SEC_IN_MEMORY | SEC_LINKER_CREATED)
18906 
18907 #undef elf_backend_emit_relocs
18908 
18909 #undef  bfd_elf32_bfd_link_hash_table_create
18910 #define bfd_elf32_bfd_link_hash_table_create	elf32_arm_symbian_link_hash_table_create
18911 #undef  elf_backend_special_sections
18912 #define elf_backend_special_sections 		elf32_arm_symbian_special_sections
18913 #undef  elf_backend_begin_write_processing
18914 #define elf_backend_begin_write_processing	elf32_arm_symbian_begin_write_processing
18915 #undef  elf_backend_final_write_processing
18916 #define elf_backend_final_write_processing	elf32_arm_final_write_processing
18917 
18918 #undef  elf_backend_modify_segment_map
18919 #define elf_backend_modify_segment_map elf32_arm_symbian_modify_segment_map
18920 
18921 /* There is no .got section for BPABI objects, and hence no header.  */
18922 #undef  elf_backend_got_header_size
18923 #define elf_backend_got_header_size 0
18924 
18925 /* Similarly, there is no .got.plt section.  */
18926 #undef  elf_backend_want_got_plt
18927 #define elf_backend_want_got_plt 0
18928 
18929 #undef  elf_backend_plt_sym_val
18930 #define elf_backend_plt_sym_val		elf32_arm_symbian_plt_sym_val
18931 
18932 #undef  elf_backend_may_use_rel_p
18933 #define elf_backend_may_use_rel_p	1
18934 #undef  elf_backend_may_use_rela_p
18935 #define elf_backend_may_use_rela_p	0
18936 #undef  elf_backend_default_use_rela_p
18937 #define elf_backend_default_use_rela_p	0
18938 #undef  elf_backend_want_plt_sym
18939 #define elf_backend_want_plt_sym	0
18940 #undef  ELF_MAXPAGESIZE
18941 #define ELF_MAXPAGESIZE			0x8000
18942 
18943 #include "elf32-target.h"
18944