1 /* X86-64 specific support for ELF
2 Copyright (C) 2000-2024 Free Software Foundation, Inc.
3 Contributed by Jan Hubicka <jh@suse.cz>.
4
5 This file is part of BFD, the Binary File Descriptor library.
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3 of the License, or
10 (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston,
20 MA 02110-1301, USA. */
21
22 #include "elfxx-x86.h"
23 #include "dwarf2.h"
24 #include "libiberty.h"
25 #include "sframe.h"
26
27 #include "opcode/i386.h"
28
29 #ifdef CORE_HEADER
30 #include <stdarg.h>
31 #include CORE_HEADER
32 #endif
33
34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */
35 #define MINUS_ONE (~ (bfd_vma) 0)
36
37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the
38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get
39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE
40 since they are the same. */
41
42 /* The relocation "howto" table. Order of fields:
43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow,
44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */
45 static reloc_howto_type x86_64_elf_howto_table[] =
46 {
47 HOWTO(R_X86_64_NONE, 0, 0, 0, false, 0, complain_overflow_dont,
48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000,
49 false),
50 HOWTO(R_X86_64_64, 0, 8, 64, false, 0, complain_overflow_dont,
51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE,
52 false),
53 HOWTO(R_X86_64_PC32, 0, 4, 32, true, 0, complain_overflow_signed,
54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff,
55 true),
56 HOWTO(R_X86_64_GOT32, 0, 4, 32, false, 0, complain_overflow_signed,
57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff,
58 false),
59 HOWTO(R_X86_64_PLT32, 0, 4, 32, true, 0, complain_overflow_signed,
60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff,
61 true),
62 HOWTO(R_X86_64_COPY, 0, 4, 32, false, 0, complain_overflow_bitfield,
63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff,
64 false),
65 HOWTO(R_X86_64_GLOB_DAT, 0, 8, 64, false, 0, complain_overflow_dont,
66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE,
67 false),
68 HOWTO(R_X86_64_JUMP_SLOT, 0, 8, 64, false, 0, complain_overflow_dont,
69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE,
70 false),
71 HOWTO(R_X86_64_RELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE,
73 false),
74 HOWTO(R_X86_64_GOTPCREL, 0, 4, 32, true, 0, complain_overflow_signed,
75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff,
76 true),
77 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_unsigned,
78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
79 false),
80 HOWTO(R_X86_64_32S, 0, 4, 32, false, 0, complain_overflow_signed,
81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff,
82 false),
83 HOWTO(R_X86_64_16, 0, 2, 16, false, 0, complain_overflow_bitfield,
84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false),
85 HOWTO(R_X86_64_PC16, 0, 2, 16, true, 0, complain_overflow_bitfield,
86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true),
87 HOWTO(R_X86_64_8, 0, 1, 8, false, 0, complain_overflow_bitfield,
88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false),
89 HOWTO(R_X86_64_PC8, 0, 1, 8, true, 0, complain_overflow_signed,
90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true),
91 HOWTO(R_X86_64_DTPMOD64, 0, 8, 64, false, 0, complain_overflow_dont,
92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE,
93 false),
94 HOWTO(R_X86_64_DTPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE,
96 false),
97 HOWTO(R_X86_64_TPOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE,
99 false),
100 HOWTO(R_X86_64_TLSGD, 0, 4, 32, true, 0, complain_overflow_signed,
101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff,
102 true),
103 HOWTO(R_X86_64_TLSLD, 0, 4, 32, true, 0, complain_overflow_signed,
104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff,
105 true),
106 HOWTO(R_X86_64_DTPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff,
108 false),
109 HOWTO(R_X86_64_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff,
111 true),
112 HOWTO(R_X86_64_TPOFF32, 0, 4, 32, false, 0, complain_overflow_signed,
113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff,
114 false),
115 HOWTO(R_X86_64_PC64, 0, 8, 64, true, 0, complain_overflow_dont,
116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE,
117 true),
118 HOWTO(R_X86_64_GOTOFF64, 0, 8, 64, false, 0, complain_overflow_dont,
119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE,
120 false),
121 HOWTO(R_X86_64_GOTPC32, 0, 4, 32, true, 0, complain_overflow_signed,
122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff,
123 true),
124 HOWTO(R_X86_64_GOT64, 0, 8, 64, false, 0, complain_overflow_signed,
125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE,
126 false),
127 HOWTO(R_X86_64_GOTPCREL64, 0, 8, 64, true, 0, complain_overflow_signed,
128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE,
129 true),
130 HOWTO(R_X86_64_GOTPC64, 0, 8, 64, true, 0, complain_overflow_signed,
131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE,
132 true),
133 HOWTO(R_X86_64_GOTPLT64, 0, 8, 64, false, 0, complain_overflow_signed,
134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE,
135 false),
136 HOWTO(R_X86_64_PLTOFF64, 0, 8, 64, false, 0, complain_overflow_signed,
137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE,
138 false),
139 HOWTO(R_X86_64_SIZE32, 0, 4, 32, false, 0, complain_overflow_unsigned,
140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff,
141 false),
142 HOWTO(R_X86_64_SIZE64, 0, 8, 64, false, 0, complain_overflow_dont,
143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE,
144 false),
145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
146 complain_overflow_bitfield, bfd_elf_generic_reloc,
147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, false, 0,
149 complain_overflow_dont, bfd_elf_generic_reloc,
150 "R_X86_64_TLSDESC_CALL",
151 false, 0, 0, false),
152 HOWTO(R_X86_64_TLSDESC, 0, 8, 64, false, 0,
153 complain_overflow_dont, bfd_elf_generic_reloc,
154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false),
155 HOWTO(R_X86_64_IRELATIVE, 0, 8, 64, false, 0, complain_overflow_dont,
156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE,
157 false),
158 HOWTO(R_X86_64_RELATIVE64, 0, 8, 64, false, 0, complain_overflow_dont,
159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE,
160 false),
161 HOWTO(R_X86_64_PC32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff,
163 true),
164 HOWTO(R_X86_64_PLT32_BND, 0, 4, 32, true, 0, complain_overflow_signed,
165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff,
166 true),
167 HOWTO(R_X86_64_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff,
169 true),
170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff,
172 true),
173 HOWTO(R_X86_64_CODE_4_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed,
174 bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTPCRELX", false, 0, 0xffffffff,
175 true),
176 HOWTO(R_X86_64_CODE_4_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed,
177 bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTTPOFF", false, 0, 0xffffffff,
178 true),
179 HOWTO(R_X86_64_CODE_4_GOTPC32_TLSDESC, 0, 4, 32, true, 0,
180 complain_overflow_bitfield, bfd_elf_generic_reloc,
181 "R_X86_64_CODE_4_GOTPC32_TLSDESC", false, 0, 0xffffffff, true),
182
183 /* We have a gap in the reloc numbers here.
184 R_X86_64_standard counts the number up to this point, and
185 R_X86_64_vt_offset is the value to subtract from a reloc type of
186 R_X86_64_GNU_VT* to form an index into this table. */
187 #define R_X86_64_standard (R_X86_64_CODE_4_GOTPC32_TLSDESC + 1)
188 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard)
189
190 /* GNU extension to record C++ vtable hierarchy. */
191 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 8, 0, false, 0, complain_overflow_dont,
192 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false),
193
194 /* GNU extension to record C++ vtable member usage. */
195 HOWTO (R_X86_64_GNU_VTENTRY, 0, 8, 0, false, 0, complain_overflow_dont,
196 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0,
197 false),
198
199 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */
200 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_bitfield,
201 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff,
202 false)
203 };
204
205 /* Map BFD relocs to the x86_64 elf relocs. */
206 struct elf_reloc_map
207 {
208 bfd_reloc_code_real_type bfd_reloc_val;
209 unsigned char elf_reloc_val;
210 };
211
212 static const struct elf_reloc_map x86_64_reloc_map[] =
213 {
214 { BFD_RELOC_NONE, R_X86_64_NONE, },
215 { BFD_RELOC_64, R_X86_64_64, },
216 { BFD_RELOC_32_PCREL, R_X86_64_PC32, },
217 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,},
218 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,},
219 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, },
220 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, },
221 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, },
222 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, },
223 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, },
224 { BFD_RELOC_32, R_X86_64_32, },
225 { BFD_RELOC_X86_64_32S, R_X86_64_32S, },
226 { BFD_RELOC_16, R_X86_64_16, },
227 { BFD_RELOC_16_PCREL, R_X86_64_PC16, },
228 { BFD_RELOC_8, R_X86_64_8, },
229 { BFD_RELOC_8_PCREL, R_X86_64_PC8, },
230 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, },
231 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, },
232 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, },
233 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, },
234 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, },
235 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, },
236 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, },
237 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, },
238 { BFD_RELOC_64_PCREL, R_X86_64_PC64, },
239 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, },
240 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, },
241 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, },
242 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, },
243 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, },
244 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, },
245 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, },
246 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, },
247 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, },
248 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, },
249 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, },
250 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, },
251 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, },
252 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, },
253 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, },
254 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, },
255 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, },
256 { BFD_RELOC_X86_64_CODE_4_GOTPCRELX, R_X86_64_CODE_4_GOTPCRELX, },
257 { BFD_RELOC_X86_64_CODE_4_GOTTPOFF, R_X86_64_CODE_4_GOTTPOFF, },
258 { BFD_RELOC_X86_64_CODE_4_GOTPC32_TLSDESC, R_X86_64_CODE_4_GOTPC32_TLSDESC, },
259 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, },
260 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, },
261 };
262
263 static reloc_howto_type *
elf_x86_64_rtype_to_howto(bfd * abfd,unsigned r_type)264 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type)
265 {
266 unsigned i;
267
268 if (r_type == (unsigned int) R_X86_64_32)
269 {
270 if (ABI_64_P (abfd))
271 i = r_type;
272 else
273 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1;
274 }
275 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT
276 || r_type >= (unsigned int) R_X86_64_max)
277 {
278 if (r_type >= (unsigned int) R_X86_64_standard)
279 {
280 /* xgettext:c-format */
281 _bfd_error_handler (_("%pB: unsupported relocation type %#x"),
282 abfd, r_type);
283 bfd_set_error (bfd_error_bad_value);
284 return NULL;
285 }
286 i = r_type;
287 }
288 else
289 i = r_type - (unsigned int) R_X86_64_vt_offset;
290 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type);
291 return &x86_64_elf_howto_table[i];
292 }
293
294 /* Given a BFD reloc type, return a HOWTO structure. */
295 static reloc_howto_type *
elf_x86_64_reloc_type_lookup(bfd * abfd,bfd_reloc_code_real_type code)296 elf_x86_64_reloc_type_lookup (bfd *abfd,
297 bfd_reloc_code_real_type code)
298 {
299 unsigned int i;
300
301 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map);
302 i++)
303 {
304 if (x86_64_reloc_map[i].bfd_reloc_val == code)
305 return elf_x86_64_rtype_to_howto (abfd,
306 x86_64_reloc_map[i].elf_reloc_val);
307 }
308 return NULL;
309 }
310
311 static reloc_howto_type *
elf_x86_64_reloc_name_lookup(bfd * abfd,const char * r_name)312 elf_x86_64_reloc_name_lookup (bfd *abfd,
313 const char *r_name)
314 {
315 unsigned int i;
316
317 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0)
318 {
319 /* Get x32 R_X86_64_32. */
320 reloc_howto_type *reloc
321 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1];
322 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32);
323 return reloc;
324 }
325
326 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++)
327 if (x86_64_elf_howto_table[i].name != NULL
328 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0)
329 return &x86_64_elf_howto_table[i];
330
331 return NULL;
332 }
333
334 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */
335
336 static bool
elf_x86_64_info_to_howto(bfd * abfd,arelent * cache_ptr,Elf_Internal_Rela * dst)337 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr,
338 Elf_Internal_Rela *dst)
339 {
340 unsigned r_type;
341
342 r_type = ELF32_R_TYPE (dst->r_info);
343 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type);
344 if (cache_ptr->howto == NULL)
345 return false;
346 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE);
347 return true;
348 }
349
350 /* Support for core dump NOTE sections. */
351 static bool
elf_x86_64_grok_prstatus(bfd * abfd,Elf_Internal_Note * note)352 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note)
353 {
354 int offset;
355 size_t size;
356
357 switch (note->descsz)
358 {
359 default:
360 return false;
361
362 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */
363 /* pr_cursig */
364 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12);
365
366 /* pr_pid */
367 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24);
368
369 /* pr_reg */
370 offset = 72;
371 size = 216;
372
373 break;
374
375 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */
376 /* pr_cursig */
377 elf_tdata (abfd)->core->signal
378 = bfd_get_16 (abfd, note->descdata + 12);
379
380 /* pr_pid */
381 elf_tdata (abfd)->core->lwpid
382 = bfd_get_32 (abfd, note->descdata + 32);
383
384 /* pr_reg */
385 offset = 112;
386 size = 216;
387
388 break;
389 }
390
391 /* Make a ".reg/999" section. */
392 return _bfd_elfcore_make_pseudosection (abfd, ".reg",
393 size, note->descpos + offset);
394 }
395
396 static bool
elf_x86_64_grok_psinfo(bfd * abfd,Elf_Internal_Note * note)397 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note)
398 {
399 switch (note->descsz)
400 {
401 default:
402 return false;
403
404 case 124:
405 /* sizeof (struct elf_external_linux_prpsinfo32_ugid16). */
406 elf_tdata (abfd)->core->pid
407 = bfd_get_32 (abfd, note->descdata + 12);
408 elf_tdata (abfd)->core->program
409 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16);
410 elf_tdata (abfd)->core->command
411 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80);
412 break;
413
414 case 128:
415 /* sizeof (struct elf_external_linux_prpsinfo32_ugid32). */
416 elf_tdata (abfd)->core->pid
417 = bfd_get_32 (abfd, note->descdata + 12);
418 elf_tdata (abfd)->core->program
419 = _bfd_elfcore_strndup (abfd, note->descdata + 32, 16);
420 elf_tdata (abfd)->core->command
421 = _bfd_elfcore_strndup (abfd, note->descdata + 48, 80);
422 break;
423
424 case 136:
425 /* sizeof (struct elf_prpsinfo) on Linux/x86_64. */
426 elf_tdata (abfd)->core->pid
427 = bfd_get_32 (abfd, note->descdata + 24);
428 elf_tdata (abfd)->core->program
429 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16);
430 elf_tdata (abfd)->core->command
431 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80);
432 }
433
434 /* Note that for some reason, a spurious space is tacked
435 onto the end of the args in some (at least one anyway)
436 implementations, so strip it off if it exists. */
437
438 {
439 char *command = elf_tdata (abfd)->core->command;
440 int n = strlen (command);
441
442 if (0 < n && command[n - 1] == ' ')
443 command[n - 1] = '\0';
444 }
445
446 return true;
447 }
448
449 #ifdef CORE_HEADER
450 # if GCC_VERSION >= 8000
451 # pragma GCC diagnostic push
452 # pragma GCC diagnostic ignored "-Wstringop-truncation"
453 # endif
454 static char *
elf_x86_64_write_core_note(bfd * abfd,char * buf,int * bufsiz,int note_type,...)455 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz,
456 int note_type, ...)
457 {
458 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
459 va_list ap;
460 const char *fname, *psargs;
461 long pid;
462 int cursig;
463 const void *gregs;
464
465 switch (note_type)
466 {
467 default:
468 return NULL;
469
470 case NT_PRPSINFO:
471 va_start (ap, note_type);
472 fname = va_arg (ap, const char *);
473 psargs = va_arg (ap, const char *);
474 va_end (ap);
475
476 if (bed->s->elfclass == ELFCLASS32)
477 {
478 prpsinfo32_t data;
479 memset (&data, 0, sizeof (data));
480 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
481 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
482 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
483 &data, sizeof (data));
484 }
485 else
486 {
487 prpsinfo64_t data;
488 memset (&data, 0, sizeof (data));
489 strncpy (data.pr_fname, fname, sizeof (data.pr_fname));
490 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs));
491 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
492 &data, sizeof (data));
493 }
494 /* NOTREACHED */
495
496 case NT_PRSTATUS:
497 va_start (ap, note_type);
498 pid = va_arg (ap, long);
499 cursig = va_arg (ap, int);
500 gregs = va_arg (ap, const void *);
501 va_end (ap);
502
503 if (bed->s->elfclass == ELFCLASS32)
504 {
505 if (bed->elf_machine_code == EM_X86_64)
506 {
507 prstatusx32_t prstat;
508 memset (&prstat, 0, sizeof (prstat));
509 prstat.pr_pid = pid;
510 prstat.pr_cursig = cursig;
511 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
512 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
513 &prstat, sizeof (prstat));
514 }
515 else
516 {
517 prstatus32_t prstat;
518 memset (&prstat, 0, sizeof (prstat));
519 prstat.pr_pid = pid;
520 prstat.pr_cursig = cursig;
521 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
522 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
523 &prstat, sizeof (prstat));
524 }
525 }
526 else
527 {
528 prstatus64_t prstat;
529 memset (&prstat, 0, sizeof (prstat));
530 prstat.pr_pid = pid;
531 prstat.pr_cursig = cursig;
532 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg));
533 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type,
534 &prstat, sizeof (prstat));
535 }
536 }
537 /* NOTREACHED */
538 }
539 # if GCC_VERSION >= 8000
540 # pragma GCC diagnostic pop
541 # endif
542 #endif
543
544 /* Functions for the x86-64 ELF linker. */
545
546 /* The size in bytes of an entry in the global offset table. */
547
548 #define GOT_ENTRY_SIZE 8
549
550 /* The size in bytes of an entry in the lazy procedure linkage table. */
551
552 #define LAZY_PLT_ENTRY_SIZE 16
553
554 /* The size in bytes of an entry in the non-lazy procedure linkage
555 table. */
556
557 #define NON_LAZY_PLT_ENTRY_SIZE 8
558
559 /* The first entry in a lazy procedure linkage table looks like this.
560 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this
561 works. */
562
563 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
564 {
565 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
566 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */
567 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */
568 };
569
570 /* Subsequent entries in a lazy procedure linkage table look like this. */
571
572 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] =
573 {
574 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
575 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
576 0x68, /* pushq immediate */
577 0, 0, 0, 0, /* replaced with index into relocation table. */
578 0xe9, /* jmp relative */
579 0, 0, 0, 0 /* replaced with offset to start of .plt0. */
580 };
581
582 /* The first entry in a lazy procedure linkage table with BND prefix
583 like this. */
584
585 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] =
586 {
587 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
588 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */
589 0x0f, 0x1f, 0 /* nopl (%rax) */
590 };
591
592 /* Subsequent entries for branches with BND prefx in a lazy procedure
593 linkage table look like this. */
594
595 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] =
596 {
597 0x68, 0, 0, 0, 0, /* pushq immediate */
598 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
599 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */
600 };
601
602 /* The first entry in the IBT-enabled lazy procedure linkage table is the
603 the same as the lazy PLT with BND prefix so that bound registers are
604 preserved when control is passed to dynamic linker. Subsequent
605 entries for a IBT-enabled lazy procedure linkage table look like
606 this. */
607
608 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
609 {
610 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
611 0x68, 0, 0, 0, 0, /* pushq immediate */
612 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */
613 0x90 /* nop */
614 };
615
616 /* The first entry in the x32 IBT-enabled lazy procedure linkage table
617 is the same as the normal lazy PLT. Subsequent entries for an
618 x32 IBT-enabled lazy procedure linkage table look like this. */
619
620 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
621 {
622 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
623 0x68, 0, 0, 0, 0, /* pushq immediate */
624 0xe9, 0, 0, 0, 0, /* jmpq relative */
625 0x66, 0x90 /* xchg %ax,%ax */
626 };
627
628 /* Entries in the non-lazey procedure linkage table look like this. */
629
630 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
631 {
632 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
633 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
634 0x66, 0x90 /* xchg %ax,%ax */
635 };
636
637 /* Entries for branches with BND prefix in the non-lazey procedure
638 linkage table look like this. */
639
640 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] =
641 {
642 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
643 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
644 0x90 /* nop */
645 };
646
647 /* Entries for branches with IBT-enabled in the non-lazey procedure
648 linkage table look like this. They have the same size as the lazy
649 PLT entry. */
650
651 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
652 {
653 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
654 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */
655 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
656 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */
657 };
658
659 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure
660 linkage table look like this. They have the same size as the lazy
661 PLT entry. */
662
663 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] =
664 {
665 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
666 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */
667 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */
668 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */
669 };
670
671 /* The TLSDESC entry in a lazy procedure linkage table. */
672 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] =
673 {
674 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */
675 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */
676 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */
677 };
678
679 /* .eh_frame covering the lazy .plt section. */
680
681 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] =
682 {
683 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
684 0, 0, 0, 0, /* CIE ID */
685 1, /* CIE version */
686 'z', 'R', 0, /* Augmentation string */
687 1, /* Code alignment factor */
688 0x78, /* Data alignment factor */
689 16, /* Return address column */
690 1, /* Augmentation size */
691 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
692 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
693 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
694 DW_CFA_nop, DW_CFA_nop,
695
696 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
697 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
698 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
699 0, 0, 0, 0, /* .plt size goes here */
700 0, /* Augmentation size */
701 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
702 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
703 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
704 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
705 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
706 11, /* Block length */
707 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
708 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
709 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge,
710 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
711 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
712 };
713
714 /* .eh_frame covering the lazy BND .plt section. */
715
716 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] =
717 {
718 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
719 0, 0, 0, 0, /* CIE ID */
720 1, /* CIE version */
721 'z', 'R', 0, /* Augmentation string */
722 1, /* Code alignment factor */
723 0x78, /* Data alignment factor */
724 16, /* Return address column */
725 1, /* Augmentation size */
726 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
727 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
728 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
729 DW_CFA_nop, DW_CFA_nop,
730
731 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
732 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
733 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
734 0, 0, 0, 0, /* .plt size goes here */
735 0, /* Augmentation size */
736 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
737 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
738 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
739 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
740 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
741 11, /* Block length */
742 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
743 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
744 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge,
745 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
746 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
747 };
748
749 /* .eh_frame covering the lazy .plt section with IBT-enabled. */
750
751 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] =
752 {
753 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
754 0, 0, 0, 0, /* CIE ID */
755 1, /* CIE version */
756 'z', 'R', 0, /* Augmentation string */
757 1, /* Code alignment factor */
758 0x78, /* Data alignment factor */
759 16, /* Return address column */
760 1, /* Augmentation size */
761 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
762 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
763 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
764 DW_CFA_nop, DW_CFA_nop,
765
766 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
767 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
768 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
769 0, 0, 0, 0, /* .plt size goes here */
770 0, /* Augmentation size */
771 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
772 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
773 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
774 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
775 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
776 11, /* Block length */
777 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
778 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
779 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge,
780 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
781 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
782 };
783
784 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */
785
786 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] =
787 {
788 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
789 0, 0, 0, 0, /* CIE ID */
790 1, /* CIE version */
791 'z', 'R', 0, /* Augmentation string */
792 1, /* Code alignment factor */
793 0x78, /* Data alignment factor */
794 16, /* Return address column */
795 1, /* Augmentation size */
796 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
797 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
798 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
799 DW_CFA_nop, DW_CFA_nop,
800
801 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */
802 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
803 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */
804 0, 0, 0, 0, /* .plt size goes here */
805 0, /* Augmentation size */
806 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */
807 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */
808 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */
809 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */
810 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */
811 11, /* Block length */
812 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */
813 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */
814 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge,
815 DW_OP_lit3, DW_OP_shl, DW_OP_plus,
816 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
817 };
818
819 /* .eh_frame covering the non-lazy .plt section. */
820
821 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] =
822 {
823 #define PLT_GOT_FDE_LENGTH 20
824 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */
825 0, 0, 0, 0, /* CIE ID */
826 1, /* CIE version */
827 'z', 'R', 0, /* Augmentation string */
828 1, /* Code alignment factor */
829 0x78, /* Data alignment factor */
830 16, /* Return address column */
831 1, /* Augmentation size */
832 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */
833 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */
834 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */
835 DW_CFA_nop, DW_CFA_nop,
836
837 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */
838 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */
839 0, 0, 0, 0, /* the start of non-lazy .plt goes here */
840 0, 0, 0, 0, /* non-lazy .plt size goes here */
841 0, /* Augmentation size */
842 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop,
843 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop
844 };
845
846 static const sframe_frame_row_entry elf_x86_64_sframe_null_fre =
847 {
848 0,
849 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
850 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
851 };
852
853 /* .sframe FRE covering the .plt section entry. */
854 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre1 =
855 {
856 0, /* SFrame FRE start address. */
857 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
858 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
859 };
860
861 /* .sframe FRE covering the .plt section entry. */
862 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre2 =
863 {
864 6, /* SFrame FRE start address. */
865 {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
866 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
867 };
868
869 /* .sframe FRE covering the .plt section entry. */
870 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre1 =
871 {
872 0, /* SFrame FRE start address. */
873 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
874 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
875 };
876
877 /* .sframe FRE covering the .plt section entry. */
878 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre2 =
879 {
880 11, /* SFrame FRE start address. */
881 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
882 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
883 };
884
885 /* .sframe FRE covering the second .plt section entry. */
886 static const sframe_frame_row_entry elf_x86_64_sframe_sec_pltn_fre1 =
887 {
888 0, /* SFrame FRE start address. */
889 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */
890 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */
891 };
892
893 /* SFrame helper object for non-lazy PLT. Also used for IBT enabled PLT. */
894 static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_plt =
895 {
896 LAZY_PLT_ENTRY_SIZE,
897 2, /* Number of FREs for PLT0. */
898 /* Array of SFrame FREs for plt0. */
899 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
900 LAZY_PLT_ENTRY_SIZE,
901 1, /* Number of FREs for PLTn. */
902 /* Array of SFrame FREs for plt. */
903 { &elf_x86_64_sframe_sec_pltn_fre1, &elf_x86_64_sframe_null_fre },
904 0,
905 0, /* There is no second PLT necessary. */
906 { &elf_x86_64_sframe_null_fre }
907 };
908
909 /* SFrame helper object for lazy PLT. Also used for IBT enabled PLT. */
910 static const struct elf_x86_sframe_plt elf_x86_64_sframe_plt =
911 {
912 LAZY_PLT_ENTRY_SIZE,
913 2, /* Number of FREs for PLT0. */
914 /* Array of SFrame FREs for plt0. */
915 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 },
916 LAZY_PLT_ENTRY_SIZE,
917 2, /* Number of FREs for PLTn. */
918 /* Array of SFrame FREs for plt. */
919 { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_pltn_fre2 },
920 NON_LAZY_PLT_ENTRY_SIZE,
921 1, /* Number of FREs for PLTn for second PLT. */
922 /* FREs for second plt (stack trace info for .plt.got is
923 identical). Used when IBT or non-lazy PLT is in effect. */
924 { &elf_x86_64_sframe_sec_pltn_fre1 }
925 };
926
927 /* These are the standard parameters. */
928 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt =
929 {
930 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
931 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
932 elf_x86_64_lazy_plt_entry, /* plt_entry */
933 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
934 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
935 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
936 6, /* plt_tlsdesc_got1_offset */
937 12, /* plt_tlsdesc_got2_offset */
938 10, /* plt_tlsdesc_got1_insn_end */
939 16, /* plt_tlsdesc_got2_insn_end */
940 2, /* plt0_got1_offset */
941 8, /* plt0_got2_offset */
942 12, /* plt0_got2_insn_end */
943 2, /* plt_got_offset */
944 7, /* plt_reloc_offset */
945 12, /* plt_plt_offset */
946 6, /* plt_got_insn_size */
947 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */
948 6, /* plt_lazy_offset */
949 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
950 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */
951 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */
952 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */
953 };
954
955 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt =
956 {
957 elf_x86_64_non_lazy_plt_entry, /* plt_entry */
958 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */
959 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
960 2, /* plt_got_offset */
961 6, /* plt_got_insn_size */
962 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
963 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
964 };
965
966 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt =
967 {
968 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
969 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
970 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */
971 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
972 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
973 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
974 6, /* plt_tlsdesc_got1_offset */
975 12, /* plt_tlsdesc_got2_offset */
976 10, /* plt_tlsdesc_got1_insn_end */
977 16, /* plt_tlsdesc_got2_insn_end */
978 2, /* plt0_got1_offset */
979 1+8, /* plt0_got2_offset */
980 1+12, /* plt0_got2_insn_end */
981 1+2, /* plt_got_offset */
982 1, /* plt_reloc_offset */
983 7, /* plt_plt_offset */
984 1+6, /* plt_got_insn_size */
985 11, /* plt_plt_insn_end */
986 0, /* plt_lazy_offset */
987 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
988 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */
989 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */
990 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */
991 };
992
993 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt =
994 {
995 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */
996 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */
997 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
998 1+2, /* plt_got_offset */
999 1+6, /* plt_got_insn_size */
1000 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1001 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1002 };
1003
1004 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt =
1005 {
1006 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */
1007 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1008 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */
1009 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1010 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1011 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1012 6, /* plt_tlsdesc_got1_offset */
1013 12, /* plt_tlsdesc_got2_offset */
1014 10, /* plt_tlsdesc_got1_insn_end */
1015 16, /* plt_tlsdesc_got2_insn_end */
1016 2, /* plt0_got1_offset */
1017 1+8, /* plt0_got2_offset */
1018 1+12, /* plt0_got2_insn_end */
1019 4+1+2, /* plt_got_offset */
1020 4+1, /* plt_reloc_offset */
1021 4+1+6, /* plt_plt_offset */
1022 4+1+6, /* plt_got_insn_size */
1023 4+1+5+5, /* plt_plt_insn_end */
1024 0, /* plt_lazy_offset */
1025 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */
1026 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */
1027 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1028 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1029 };
1030
1031 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt =
1032 {
1033 elf_x86_64_lazy_plt0_entry, /* plt0_entry */
1034 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */
1035 elf_x32_lazy_ibt_plt_entry, /* plt_entry */
1036 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1037 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */
1038 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */
1039 6, /* plt_tlsdesc_got1_offset */
1040 12, /* plt_tlsdesc_got2_offset */
1041 10, /* plt_tlsdesc_got1_insn_end */
1042 16, /* plt_tlsdesc_got2_insn_end */
1043 2, /* plt0_got1_offset */
1044 8, /* plt0_got2_offset */
1045 12, /* plt0_got2_insn_end */
1046 4+2, /* plt_got_offset */
1047 4+1, /* plt_reloc_offset */
1048 4+6, /* plt_plt_offset */
1049 4+6, /* plt_got_insn_size */
1050 4+5+5, /* plt_plt_insn_end */
1051 0, /* plt_lazy_offset */
1052 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */
1053 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */
1054 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */
1055 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */
1056 };
1057
1058 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt =
1059 {
1060 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */
1061 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1062 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1063 4+1+2, /* plt_got_offset */
1064 4+1+6, /* plt_got_insn_size */
1065 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1066 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1067 };
1068
1069 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt =
1070 {
1071 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */
1072 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */
1073 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */
1074 4+2, /* plt_got_offset */
1075 4+6, /* plt_got_insn_size */
1076 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */
1077 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */
1078 };
1079
1080 static bool
elf64_x86_64_elf_object_p(bfd * abfd)1081 elf64_x86_64_elf_object_p (bfd *abfd)
1082 {
1083 /* Set the right machine number for an x86-64 elf64 file. */
1084 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64);
1085 return true;
1086 }
1087
1088 static bool
elf32_x86_64_elf_object_p(bfd * abfd)1089 elf32_x86_64_elf_object_p (bfd *abfd)
1090 {
1091 /* Set the right machine number for an x86-64 elf32 file. */
1092 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32);
1093 return true;
1094 }
1095
1096 /* Return TRUE if the TLS access code sequence support transition
1097 from R_TYPE. */
1098
1099 static bool
elf_x86_64_check_tls_transition(bfd * abfd,struct bfd_link_info * info,asection * sec,bfd_byte * contents,Elf_Internal_Shdr * symtab_hdr,struct elf_link_hash_entry ** sym_hashes,unsigned int r_type,const Elf_Internal_Rela * rel,const Elf_Internal_Rela * relend)1100 elf_x86_64_check_tls_transition (bfd *abfd,
1101 struct bfd_link_info *info,
1102 asection *sec,
1103 bfd_byte *contents,
1104 Elf_Internal_Shdr *symtab_hdr,
1105 struct elf_link_hash_entry **sym_hashes,
1106 unsigned int r_type,
1107 const Elf_Internal_Rela *rel,
1108 const Elf_Internal_Rela *relend)
1109 {
1110 unsigned int val;
1111 unsigned long r_symndx;
1112 bool largepic = false;
1113 struct elf_link_hash_entry *h;
1114 bfd_vma offset;
1115 struct elf_x86_link_hash_table *htab;
1116 bfd_byte *call;
1117 bool indirect_call;
1118
1119 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1120 offset = rel->r_offset;
1121 switch (r_type)
1122 {
1123 case R_X86_64_TLSGD:
1124 case R_X86_64_TLSLD:
1125 if ((rel + 1) >= relend)
1126 return false;
1127
1128 if (r_type == R_X86_64_TLSGD)
1129 {
1130 /* Check transition from GD access model. For 64bit, only
1131 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1132 .word 0x6666; rex64; call __tls_get_addr@PLT
1133 or
1134 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
1135 .byte 0x66; rex64
1136 call *__tls_get_addr@GOTPCREL(%rip)
1137 which may be converted to
1138 addr32 call __tls_get_addr
1139 can transit to different access model. For 32bit, only
1140 leaq foo@tlsgd(%rip), %rdi
1141 .word 0x6666; rex64; call __tls_get_addr@PLT
1142 or
1143 leaq foo@tlsgd(%rip), %rdi
1144 .byte 0x66; rex64
1145 call *__tls_get_addr@GOTPCREL(%rip)
1146 which may be converted to
1147 addr32 call __tls_get_addr
1148 can transit to different access model. For largepic,
1149 we also support:
1150 leaq foo@tlsgd(%rip), %rdi
1151 movabsq $__tls_get_addr@pltoff, %rax
1152 addq $r15, %rax
1153 call *%rax
1154 or
1155 leaq foo@tlsgd(%rip), %rdi
1156 movabsq $__tls_get_addr@pltoff, %rax
1157 addq $rbx, %rax
1158 call *%rax */
1159
1160 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d };
1161
1162 if ((offset + 12) > sec->size)
1163 return false;
1164
1165 call = contents + offset + 4;
1166 if (call[0] != 0x66
1167 || !((call[1] == 0x48
1168 && call[2] == 0xff
1169 && call[3] == 0x15)
1170 || (call[1] == 0x48
1171 && call[2] == 0x67
1172 && call[3] == 0xe8)
1173 || (call[1] == 0x66
1174 && call[2] == 0x48
1175 && call[3] == 0xe8)))
1176 {
1177 if (!ABI_64_P (abfd)
1178 || (offset + 19) > sec->size
1179 || offset < 3
1180 || memcmp (call - 7, leaq + 1, 3) != 0
1181 || memcmp (call, "\x48\xb8", 2) != 0
1182 || call[11] != 0x01
1183 || call[13] != 0xff
1184 || call[14] != 0xd0
1185 || !((call[10] == 0x48 && call[12] == 0xd8)
1186 || (call[10] == 0x4c && call[12] == 0xf8)))
1187 return false;
1188 largepic = true;
1189 }
1190 else if (ABI_64_P (abfd))
1191 {
1192 if (offset < 4
1193 || memcmp (contents + offset - 4, leaq, 4) != 0)
1194 return false;
1195 }
1196 else
1197 {
1198 if (offset < 3
1199 || memcmp (contents + offset - 3, leaq + 1, 3) != 0)
1200 return false;
1201 }
1202 indirect_call = call[2] == 0xff;
1203 }
1204 else
1205 {
1206 /* Check transition from LD access model. Only
1207 leaq foo@tlsld(%rip), %rdi;
1208 call __tls_get_addr@PLT
1209 or
1210 leaq foo@tlsld(%rip), %rdi;
1211 call *__tls_get_addr@GOTPCREL(%rip)
1212 which may be converted to
1213 addr32 call __tls_get_addr
1214 can transit to different access model. For largepic
1215 we also support:
1216 leaq foo@tlsld(%rip), %rdi
1217 movabsq $__tls_get_addr@pltoff, %rax
1218 addq $r15, %rax
1219 call *%rax
1220 or
1221 leaq foo@tlsld(%rip), %rdi
1222 movabsq $__tls_get_addr@pltoff, %rax
1223 addq $rbx, %rax
1224 call *%rax */
1225
1226 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d };
1227
1228 if (offset < 3 || (offset + 9) > sec->size)
1229 return false;
1230
1231 if (memcmp (contents + offset - 3, lea, 3) != 0)
1232 return false;
1233
1234 call = contents + offset + 4;
1235 if (!(call[0] == 0xe8
1236 || (call[0] == 0xff && call[1] == 0x15)
1237 || (call[0] == 0x67 && call[1] == 0xe8)))
1238 {
1239 if (!ABI_64_P (abfd)
1240 || (offset + 19) > sec->size
1241 || memcmp (call, "\x48\xb8", 2) != 0
1242 || call[11] != 0x01
1243 || call[13] != 0xff
1244 || call[14] != 0xd0
1245 || !((call[10] == 0x48 && call[12] == 0xd8)
1246 || (call[10] == 0x4c && call[12] == 0xf8)))
1247 return false;
1248 largepic = true;
1249 }
1250 indirect_call = call[0] == 0xff;
1251 }
1252
1253 r_symndx = htab->r_sym (rel[1].r_info);
1254 if (r_symndx < symtab_hdr->sh_info)
1255 return false;
1256
1257 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
1258 if (h == NULL
1259 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr)
1260 return false;
1261 else
1262 {
1263 r_type = (ELF32_R_TYPE (rel[1].r_info)
1264 & ~R_X86_64_converted_reloc_bit);
1265 if (largepic)
1266 return r_type == R_X86_64_PLTOFF64;
1267 else if (indirect_call)
1268 return (r_type == R_X86_64_GOTPCRELX || r_type == R_X86_64_GOTPCREL);
1269 else
1270 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32);
1271 }
1272
1273 case R_X86_64_CODE_4_GOTTPOFF:
1274 /* Check transition from IE access model:
1275 mov foo@gottpoff(%rip), %reg
1276 add foo@gottpoff(%rip), %reg
1277 where reg is one of r16 to r31. */
1278
1279 if (offset < 4
1280 || (offset + 4) > sec->size
1281 || contents[offset - 4] != 0xd5)
1282 return false;
1283
1284 goto check_gottpoff;
1285
1286 case R_X86_64_GOTTPOFF:
1287 /* Check transition from IE access model:
1288 mov foo@gottpoff(%rip), %reg
1289 add foo@gottpoff(%rip), %reg
1290 */
1291
1292 /* Check REX prefix first. */
1293 if (offset >= 3 && (offset + 4) <= sec->size)
1294 {
1295 val = bfd_get_8 (abfd, contents + offset - 3);
1296 if (val != 0x48 && val != 0x4c)
1297 {
1298 /* X32 may have 0x44 REX prefix or no REX prefix. */
1299 if (ABI_64_P (abfd))
1300 return false;
1301 }
1302 }
1303 else
1304 {
1305 /* X32 may not have any REX prefix. */
1306 if (ABI_64_P (abfd))
1307 return false;
1308 if (offset < 2 || (offset + 3) > sec->size)
1309 return false;
1310 }
1311
1312 check_gottpoff:
1313 val = bfd_get_8 (abfd, contents + offset - 2);
1314 if (val != 0x8b && val != 0x03)
1315 return false;
1316
1317 val = bfd_get_8 (abfd, contents + offset - 1);
1318 return (val & 0xc7) == 5;
1319
1320 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
1321 /* Check transition from GDesc access model:
1322 lea x@tlsdesc(%rip), %reg
1323 where reg is one of r16 to r31. */
1324
1325 if (offset < 4
1326 || (offset + 4) > sec->size
1327 || contents[offset - 4] != 0xd5)
1328 return false;
1329
1330 goto check_tlsdesc;
1331
1332 case R_X86_64_GOTPC32_TLSDESC:
1333 /* Check transition from GDesc access model:
1334 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
1335 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
1336
1337 Make sure it's a leaq adding rip to a 32-bit offset
1338 into any register, although it's probably almost always
1339 going to be rax. */
1340
1341 if (offset < 3 || (offset + 4) > sec->size)
1342 return false;
1343
1344 val = bfd_get_8 (abfd, contents + offset - 3);
1345 val &= 0xfb;
1346 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40))
1347 return false;
1348
1349 check_tlsdesc:
1350 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d)
1351 return false;
1352
1353 val = bfd_get_8 (abfd, contents + offset - 1);
1354 return (val & 0xc7) == 0x05;
1355
1356 case R_X86_64_TLSDESC_CALL:
1357 /* Check transition from GDesc access model:
1358 call *x@tlsdesc(%rax) <--- LP64 mode.
1359 call *x@tlsdesc(%eax) <--- X32 mode.
1360 */
1361 if (offset + 2 <= sec->size)
1362 {
1363 unsigned int prefix;
1364 call = contents + offset;
1365 prefix = 0;
1366 if (!ABI_64_P (abfd))
1367 {
1368 /* Check for call *x@tlsdesc(%eax). */
1369 if (call[0] == 0x67)
1370 {
1371 prefix = 1;
1372 if (offset + 3 > sec->size)
1373 return false;
1374 }
1375 }
1376 /* Make sure that it's a call *x@tlsdesc(%rax). */
1377 return call[prefix] == 0xff && call[1 + prefix] == 0x10;
1378 }
1379
1380 return false;
1381
1382 default:
1383 abort ();
1384 }
1385 }
1386
1387 /* Return TRUE if the TLS access transition is OK or no transition
1388 will be performed. Update R_TYPE if there is a transition. */
1389
1390 static bool
elf_x86_64_tls_transition(struct bfd_link_info * info,bfd * abfd,asection * sec,bfd_byte * contents,Elf_Internal_Shdr * symtab_hdr,struct elf_link_hash_entry ** sym_hashes,unsigned int * r_type,int tls_type,const Elf_Internal_Rela * rel,const Elf_Internal_Rela * relend,struct elf_link_hash_entry * h,unsigned long r_symndx,bool from_relocate_section)1391 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd,
1392 asection *sec, bfd_byte *contents,
1393 Elf_Internal_Shdr *symtab_hdr,
1394 struct elf_link_hash_entry **sym_hashes,
1395 unsigned int *r_type, int tls_type,
1396 const Elf_Internal_Rela *rel,
1397 const Elf_Internal_Rela *relend,
1398 struct elf_link_hash_entry *h,
1399 unsigned long r_symndx,
1400 bool from_relocate_section)
1401 {
1402 unsigned int from_type = *r_type;
1403 unsigned int to_type = from_type;
1404 bool check = true;
1405
1406 /* Skip TLS transition for functions. */
1407 if (h != NULL
1408 && (h->type == STT_FUNC
1409 || h->type == STT_GNU_IFUNC))
1410 return true;
1411
1412 switch (from_type)
1413 {
1414 case R_X86_64_TLSGD:
1415 case R_X86_64_GOTPC32_TLSDESC:
1416 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
1417 case R_X86_64_TLSDESC_CALL:
1418 case R_X86_64_GOTTPOFF:
1419 case R_X86_64_CODE_4_GOTTPOFF:
1420 if (bfd_link_executable (info))
1421 {
1422 if (h == NULL)
1423 to_type = R_X86_64_TPOFF32;
1424 else
1425 to_type = R_X86_64_GOTTPOFF;
1426 }
1427
1428 /* When we are called from elf_x86_64_relocate_section, there may
1429 be additional transitions based on TLS_TYPE. */
1430 if (from_relocate_section)
1431 {
1432 unsigned int new_to_type = to_type;
1433
1434 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type))
1435 new_to_type = R_X86_64_TPOFF32;
1436
1437 if (to_type == R_X86_64_TLSGD
1438 || to_type == R_X86_64_GOTPC32_TLSDESC
1439 || to_type == R_X86_64_CODE_4_GOTPC32_TLSDESC
1440 || to_type == R_X86_64_TLSDESC_CALL)
1441 {
1442 if (tls_type == GOT_TLS_IE)
1443 new_to_type = R_X86_64_GOTTPOFF;
1444 }
1445
1446 /* We checked the transition before when we were called from
1447 elf_x86_64_scan_relocs. We only want to check the new
1448 transition which hasn't been checked before. */
1449 check = new_to_type != to_type && from_type == to_type;
1450 to_type = new_to_type;
1451 }
1452
1453 break;
1454
1455 case R_X86_64_TLSLD:
1456 if (bfd_link_executable (info))
1457 to_type = R_X86_64_TPOFF32;
1458 break;
1459
1460 default:
1461 return true;
1462 }
1463
1464 /* Return TRUE if there is no transition. */
1465 if (from_type == to_type
1466 || (from_type == R_X86_64_CODE_4_GOTTPOFF
1467 && to_type == R_X86_64_GOTTPOFF))
1468 return true;
1469
1470 /* Check if the transition can be performed. */
1471 if (check
1472 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents,
1473 symtab_hdr, sym_hashes,
1474 from_type, rel, relend))
1475 {
1476 reloc_howto_type *from, *to;
1477 const char *name;
1478
1479 from = elf_x86_64_rtype_to_howto (abfd, from_type);
1480 to = elf_x86_64_rtype_to_howto (abfd, to_type);
1481
1482 if (from == NULL || to == NULL)
1483 return false;
1484
1485 if (h)
1486 name = h->root.root.string;
1487 else
1488 {
1489 struct elf_x86_link_hash_table *htab;
1490
1491 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
1492 if (htab == NULL)
1493 name = "*unknown*";
1494 else
1495 {
1496 Elf_Internal_Sym *isym;
1497
1498 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
1499 abfd, r_symndx);
1500 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL);
1501 }
1502 }
1503
1504 _bfd_error_handler
1505 /* xgettext:c-format */
1506 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64
1507 " in section `%pA' failed"),
1508 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec);
1509 bfd_set_error (bfd_error_bad_value);
1510 return false;
1511 }
1512
1513 *r_type = to_type;
1514 return true;
1515 }
1516
1517 static bool
elf_x86_64_need_pic(struct bfd_link_info * info,bfd * input_bfd,asection * sec,struct elf_link_hash_entry * h,Elf_Internal_Shdr * symtab_hdr,Elf_Internal_Sym * isym,reloc_howto_type * howto)1518 elf_x86_64_need_pic (struct bfd_link_info *info,
1519 bfd *input_bfd, asection *sec,
1520 struct elf_link_hash_entry *h,
1521 Elf_Internal_Shdr *symtab_hdr,
1522 Elf_Internal_Sym *isym,
1523 reloc_howto_type *howto)
1524 {
1525 const char *v = "";
1526 const char *und = "";
1527 const char *pic = "";
1528 const char *object;
1529
1530 const char *name;
1531 if (h)
1532 {
1533 name = h->root.root.string;
1534 switch (ELF_ST_VISIBILITY (h->other))
1535 {
1536 case STV_HIDDEN:
1537 v = _("hidden symbol ");
1538 break;
1539 case STV_INTERNAL:
1540 v = _("internal symbol ");
1541 break;
1542 case STV_PROTECTED:
1543 v = _("protected symbol ");
1544 break;
1545 default:
1546 if (((struct elf_x86_link_hash_entry *) h)->def_protected)
1547 v = _("protected symbol ");
1548 else
1549 v = _("symbol ");
1550 pic = NULL;
1551 break;
1552 }
1553
1554 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic)
1555 und = _("undefined ");
1556 }
1557 else
1558 {
1559 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL);
1560 pic = NULL;
1561 }
1562
1563 if (bfd_link_dll (info))
1564 {
1565 object = _("a shared object");
1566 if (!pic)
1567 pic = _("; recompile with -fPIC");
1568 }
1569 else
1570 {
1571 if (bfd_link_pie (info))
1572 object = _("a PIE object");
1573 else
1574 object = _("a PDE object");
1575 if (!pic)
1576 pic = _("; recompile with -fPIE");
1577 }
1578
1579 /* xgettext:c-format */
1580 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can "
1581 "not be used when making %s%s"),
1582 input_bfd, howto->name, und, v, name,
1583 object, pic);
1584 bfd_set_error (bfd_error_bad_value);
1585 sec->check_relocs_failed = 1;
1586 return false;
1587 }
1588
1589 /* With the local symbol, foo, we convert
1590 mov foo@GOTPCREL(%rip), %reg
1591 to
1592 lea foo(%rip), %reg
1593 and convert
1594 call/jmp *foo@GOTPCREL(%rip)
1595 to
1596 nop call foo/jmp foo nop
1597 When PIC is false, convert
1598 test %reg, foo@GOTPCREL(%rip)
1599 to
1600 test $foo, %reg
1601 and convert
1602 binop foo@GOTPCREL(%rip), %reg
1603 to
1604 binop $foo, %reg
1605 where binop is one of adc, add, and, cmp, or, sbb, sub, xor
1606 instructions. */
1607
1608 static bool
elf_x86_64_convert_load_reloc(bfd * abfd,bfd_byte * contents,unsigned int * r_type_p,Elf_Internal_Rela * irel,struct elf_link_hash_entry * h,bool * converted,struct bfd_link_info * link_info)1609 elf_x86_64_convert_load_reloc (bfd *abfd,
1610 bfd_byte *contents,
1611 unsigned int *r_type_p,
1612 Elf_Internal_Rela *irel,
1613 struct elf_link_hash_entry *h,
1614 bool *converted,
1615 struct bfd_link_info *link_info)
1616 {
1617 struct elf_x86_link_hash_table *htab;
1618 bool is_pic;
1619 bool no_overflow;
1620 bool relocx;
1621 bool to_reloc_pc32;
1622 bool abs_symbol;
1623 bool local_ref;
1624 asection *tsec;
1625 bfd_signed_vma raddend;
1626 unsigned int opcode;
1627 unsigned int modrm;
1628 unsigned int r_type = *r_type_p;
1629 unsigned int r_symndx;
1630 bfd_vma roff = irel->r_offset;
1631 bfd_vma abs_relocation;
1632
1633 if (roff < (r_type == R_X86_64_CODE_4_GOTPCRELX
1634 ? 4 : (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)))
1635 return true;
1636
1637 raddend = irel->r_addend;
1638 /* Addend for 32-bit PC-relative relocation must be -4. */
1639 if (raddend != -4)
1640 return true;
1641
1642 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA);
1643 is_pic = bfd_link_pic (link_info);
1644
1645 if (r_type == R_X86_64_CODE_4_GOTPCRELX)
1646 {
1647 /* Skip if this isn't a REX2 instruction. */
1648 opcode = bfd_get_8 (abfd, contents + roff - 4);
1649 if (opcode != 0xd5)
1650 return true;
1651
1652 relocx = true;
1653 }
1654 else
1655 relocx = (r_type == R_X86_64_GOTPCRELX
1656 || r_type == R_X86_64_REX_GOTPCRELX);
1657
1658 /* TRUE if --no-relax is used. */
1659 no_overflow = link_info->disable_target_specific_optimizations > 1;
1660
1661 r_symndx = htab->r_sym (irel->r_info);
1662
1663 opcode = bfd_get_8 (abfd, contents + roff - 2);
1664
1665 /* Convert mov to lea since it has been done for a while. */
1666 if (opcode != 0x8b)
1667 {
1668 /* Only convert R_X86_64_GOTPCRELX, R_X86_64_REX_GOTPCRELX
1669 and R_X86_64_CODE_4_GOTPCRELX for call, jmp or one of adc,
1670 add, and, cmp, or, sbb, sub, test, xor instructions. */
1671 if (!relocx)
1672 return true;
1673 }
1674
1675 /* We convert only to R_X86_64_PC32:
1676 1. Branch.
1677 2. R_X86_64_GOTPCREL since we can't modify REX byte.
1678 3. no_overflow is true.
1679 4. PIC.
1680 */
1681 to_reloc_pc32 = (opcode == 0xff
1682 || !relocx
1683 || no_overflow
1684 || is_pic);
1685
1686 abs_symbol = false;
1687 abs_relocation = 0;
1688
1689 /* Get the symbol referred to by the reloc. */
1690 if (h == NULL)
1691 {
1692 Elf_Internal_Sym *isym
1693 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx);
1694
1695 /* Skip relocation against undefined symbols. */
1696 if (isym->st_shndx == SHN_UNDEF)
1697 return true;
1698
1699 local_ref = true;
1700 if (isym->st_shndx == SHN_ABS)
1701 {
1702 tsec = bfd_abs_section_ptr;
1703 abs_symbol = true;
1704 abs_relocation = isym->st_value;
1705 }
1706 else if (isym->st_shndx == SHN_COMMON)
1707 tsec = bfd_com_section_ptr;
1708 else if (isym->st_shndx == SHN_X86_64_LCOMMON)
1709 tsec = &_bfd_elf_large_com_section;
1710 else
1711 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx);
1712 }
1713 else
1714 {
1715 /* Undefined weak symbol is only bound locally in executable
1716 and its reference is resolved as 0 without relocation
1717 overflow. We can only perform this optimization for
1718 GOTPCRELX relocations since we need to modify REX byte.
1719 It is OK convert mov with R_X86_64_GOTPCREL to
1720 R_X86_64_PC32. */
1721 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h);
1722
1723 abs_symbol = ABS_SYMBOL_P (h);
1724 abs_relocation = h->root.u.def.value;
1725
1726 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */
1727 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h);
1728 if ((relocx || opcode == 0x8b)
1729 && (h->root.type == bfd_link_hash_undefweak
1730 && !eh->linker_def
1731 && local_ref))
1732 {
1733 if (opcode == 0xff)
1734 {
1735 /* Skip for branch instructions since R_X86_64_PC32
1736 may overflow. */
1737 if (no_overflow)
1738 return true;
1739 }
1740 else if (relocx)
1741 {
1742 /* For non-branch instructions, we can convert to
1743 R_X86_64_32/R_X86_64_32S since we know if there
1744 is a REX byte. */
1745 to_reloc_pc32 = false;
1746 }
1747
1748 /* Since we don't know the current PC when PIC is true,
1749 we can't convert to R_X86_64_PC32. */
1750 if (to_reloc_pc32 && is_pic)
1751 return true;
1752
1753 goto convert;
1754 }
1755 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since
1756 ld.so may use its link-time address. */
1757 else if (h->start_stop
1758 || eh->linker_def
1759 || ((h->def_regular
1760 || h->root.type == bfd_link_hash_defined
1761 || h->root.type == bfd_link_hash_defweak)
1762 && h != htab->elf.hdynamic
1763 && local_ref))
1764 {
1765 /* bfd_link_hash_new or bfd_link_hash_undefined is
1766 set by an assignment in a linker script in
1767 bfd_elf_record_link_assignment. start_stop is set
1768 on __start_SECNAME/__stop_SECNAME which mark section
1769 SECNAME. */
1770 if (h->start_stop
1771 || eh->linker_def
1772 || (h->def_regular
1773 && (h->root.type == bfd_link_hash_new
1774 || h->root.type == bfd_link_hash_undefined
1775 || ((h->root.type == bfd_link_hash_defined
1776 || h->root.type == bfd_link_hash_defweak)
1777 && h->root.u.def.section == bfd_und_section_ptr))))
1778 {
1779 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */
1780 if (no_overflow)
1781 return true;
1782 goto convert;
1783 }
1784 tsec = h->root.u.def.section;
1785 }
1786 else
1787 return true;
1788 }
1789
1790 /* Don't convert GOTPCREL relocation against large section. */
1791 if (elf_section_data (tsec) != NULL
1792 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0)
1793 return true;
1794
1795 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */
1796 if (no_overflow)
1797 return true;
1798
1799 convert:
1800 if (opcode == 0xff)
1801 {
1802 /* We have "call/jmp *foo@GOTPCREL(%rip)". */
1803 unsigned int nop;
1804 unsigned int disp;
1805 bfd_vma nop_offset;
1806
1807 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to
1808 R_X86_64_PC32. */
1809 modrm = bfd_get_8 (abfd, contents + roff - 1);
1810 if (modrm == 0x25)
1811 {
1812 /* Convert to "jmp foo nop". */
1813 modrm = 0xe9;
1814 nop = NOP_OPCODE;
1815 nop_offset = irel->r_offset + 3;
1816 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1817 irel->r_offset -= 1;
1818 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1819 }
1820 else
1821 {
1822 struct elf_x86_link_hash_entry *eh
1823 = (struct elf_x86_link_hash_entry *) h;
1824
1825 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE
1826 is a nop prefix. */
1827 modrm = 0xe8;
1828 /* To support TLS optimization, always use addr32 prefix for
1829 "call *__tls_get_addr@GOTPCREL(%rip)". */
1830 if (eh && eh->tls_get_addr)
1831 {
1832 nop = 0x67;
1833 nop_offset = irel->r_offset - 2;
1834 }
1835 else
1836 {
1837 nop = htab->params->call_nop_byte;
1838 if (htab->params->call_nop_as_suffix)
1839 {
1840 nop_offset = irel->r_offset + 3;
1841 disp = bfd_get_32 (abfd, contents + irel->r_offset);
1842 irel->r_offset -= 1;
1843 bfd_put_32 (abfd, disp, contents + irel->r_offset);
1844 }
1845 else
1846 nop_offset = irel->r_offset - 2;
1847 }
1848 }
1849 bfd_put_8 (abfd, nop, contents + nop_offset);
1850 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1);
1851 r_type = R_X86_64_PC32;
1852 }
1853 else
1854 {
1855 unsigned int rex = 0;
1856 unsigned int rex_mask = REX_R;
1857 unsigned int rex2 = 0;
1858 unsigned int rex2_mask = REX_R | REX_R << 4;
1859 bool rex_w = false;
1860
1861 if (r_type == R_X86_64_CODE_4_GOTPCRELX)
1862 {
1863 rex2 = bfd_get_8 (abfd, contents + roff - 3);
1864 rex_w = (rex2 & REX_W) != 0;
1865 }
1866 else if (r_type == R_X86_64_REX_GOTPCRELX)
1867 {
1868 rex = bfd_get_8 (abfd, contents + roff - 3);
1869 rex_w = (rex & REX_W) != 0;
1870 }
1871
1872 if (opcode == 0x8b)
1873 {
1874 if (abs_symbol && local_ref && relocx)
1875 to_reloc_pc32 = false;
1876
1877 if (to_reloc_pc32)
1878 {
1879 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1880 "lea foo(%rip), %reg". */
1881 opcode = 0x8d;
1882 r_type = R_X86_64_PC32;
1883 }
1884 else
1885 {
1886 /* Convert "mov foo@GOTPCREL(%rip), %reg" to
1887 "mov $foo, %reg". */
1888 opcode = 0xc7;
1889 modrm = bfd_get_8 (abfd, contents + roff - 1);
1890 modrm = 0xc0 | (modrm & 0x38) >> 3;
1891 if (rex_w && ABI_64_P (link_info->output_bfd))
1892 {
1893 /* Keep the REX_W bit in REX byte for LP64. */
1894 r_type = R_X86_64_32S;
1895 goto rewrite_modrm_rex;
1896 }
1897 else
1898 {
1899 /* If the REX_W bit in REX byte isn't needed,
1900 use R_X86_64_32 and clear the W bit to avoid
1901 sign-extend imm32 to imm64. */
1902 r_type = R_X86_64_32;
1903 /* Clear the W bit in REX byte and REX2 payload. */
1904 rex_mask |= REX_W;
1905 rex2_mask |= REX_W;
1906 goto rewrite_modrm_rex;
1907 }
1908 }
1909 }
1910 else
1911 {
1912 /* R_X86_64_PC32 isn't supported. */
1913 if (to_reloc_pc32)
1914 return true;
1915
1916 modrm = bfd_get_8 (abfd, contents + roff - 1);
1917 if (opcode == 0x85)
1918 {
1919 /* Convert "test %reg, foo@GOTPCREL(%rip)" to
1920 "test $foo, %reg". */
1921 modrm = 0xc0 | (modrm & 0x38) >> 3;
1922 opcode = 0xf7;
1923 }
1924 else
1925 {
1926 /* Convert "binop foo@GOTPCREL(%rip), %reg" to
1927 "binop $foo, %reg". */
1928 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c);
1929 opcode = 0x81;
1930 }
1931
1932 /* Use R_X86_64_32 with 32-bit operand to avoid relocation
1933 overflow when sign-extending imm32 to imm64. */
1934 r_type = rex_w ? R_X86_64_32S : R_X86_64_32;
1935
1936 rewrite_modrm_rex:
1937 if (abs_relocation)
1938 {
1939 /* Check if R_X86_64_32S/R_X86_64_32 fits. */
1940 if (r_type == R_X86_64_32S)
1941 {
1942 if ((abs_relocation + 0x80000000) > 0xffffffff)
1943 return true;
1944 }
1945 else
1946 {
1947 if (abs_relocation > 0xffffffff)
1948 return true;
1949 }
1950 }
1951
1952 bfd_put_8 (abfd, modrm, contents + roff - 1);
1953
1954 if (rex)
1955 {
1956 /* Move the R bit to the B bit in REX byte. */
1957 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2;
1958 bfd_put_8 (abfd, rex, contents + roff - 3);
1959 }
1960 else if (rex2)
1961 {
1962 /* Move the R bits to the B bits in REX2 payload byte. */
1963 rex2 = ((rex2 & ~rex2_mask)
1964 | (rex2 & (REX_R | REX_R << 4)) >> 2);
1965 bfd_put_8 (abfd, rex2, contents + roff - 3);
1966 }
1967
1968 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */
1969 irel->r_addend = 0;
1970 }
1971
1972 bfd_put_8 (abfd, opcode, contents + roff - 2);
1973 }
1974
1975 *r_type_p = r_type;
1976 irel->r_info = htab->r_info (r_symndx,
1977 r_type | R_X86_64_converted_reloc_bit);
1978
1979 *converted = true;
1980
1981 return true;
1982 }
1983
1984 /* Look through the relocs for a section during the first phase, and
1985 calculate needed space in the global offset table, and procedure
1986 linkage table. */
1987
1988 static bool
elf_x86_64_scan_relocs(bfd * abfd,struct bfd_link_info * info,asection * sec,const Elf_Internal_Rela * relocs)1989 elf_x86_64_scan_relocs (bfd *abfd, struct bfd_link_info *info,
1990 asection *sec,
1991 const Elf_Internal_Rela *relocs)
1992 {
1993 struct elf_x86_link_hash_table *htab;
1994 Elf_Internal_Shdr *symtab_hdr;
1995 struct elf_link_hash_entry **sym_hashes;
1996 const Elf_Internal_Rela *rel;
1997 const Elf_Internal_Rela *rel_end;
1998 bfd_byte *contents;
1999 bool converted;
2000
2001 if (bfd_link_relocatable (info))
2002 return true;
2003
2004 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2005 if (htab == NULL)
2006 {
2007 sec->check_relocs_failed = 1;
2008 return false;
2009 }
2010
2011 BFD_ASSERT (is_x86_elf (abfd, htab));
2012
2013 /* Get the section contents. */
2014 if (elf_section_data (sec)->this_hdr.contents != NULL)
2015 contents = elf_section_data (sec)->this_hdr.contents;
2016 else if (!bfd_malloc_and_get_section (abfd, sec, &contents))
2017 {
2018 sec->check_relocs_failed = 1;
2019 return false;
2020 }
2021
2022 symtab_hdr = &elf_symtab_hdr (abfd);
2023 sym_hashes = elf_sym_hashes (abfd);
2024
2025 converted = false;
2026
2027 rel_end = relocs + sec->reloc_count;
2028 for (rel = relocs; rel < rel_end; rel++)
2029 {
2030 unsigned int r_type;
2031 unsigned int r_symndx;
2032 struct elf_link_hash_entry *h;
2033 struct elf_x86_link_hash_entry *eh;
2034 Elf_Internal_Sym *isym;
2035 const char *name;
2036 bool size_reloc;
2037 bool converted_reloc;
2038 bool no_dynreloc;
2039
2040 r_symndx = htab->r_sym (rel->r_info);
2041 r_type = ELF32_R_TYPE (rel->r_info);
2042
2043 /* Don't check R_X86_64_NONE. */
2044 if (r_type == R_X86_64_NONE)
2045 continue;
2046
2047 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr))
2048 {
2049 /* xgettext:c-format */
2050 _bfd_error_handler (_("%pB: bad symbol index: %d"),
2051 abfd, r_symndx);
2052 goto error_return;
2053 }
2054
2055 if (r_symndx < symtab_hdr->sh_info)
2056 {
2057 /* A local symbol. */
2058 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2059 abfd, r_symndx);
2060 if (isym == NULL)
2061 goto error_return;
2062
2063 /* Check relocation against local STT_GNU_IFUNC symbol. */
2064 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC)
2065 {
2066 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel,
2067 true);
2068 if (h == NULL)
2069 goto error_return;
2070
2071 /* Fake a STT_GNU_IFUNC symbol. */
2072 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr,
2073 isym, NULL);
2074 h->type = STT_GNU_IFUNC;
2075 h->def_regular = 1;
2076 h->ref_regular = 1;
2077 h->forced_local = 1;
2078 h->root.type = bfd_link_hash_defined;
2079 }
2080 else
2081 h = NULL;
2082 }
2083 else
2084 {
2085 isym = NULL;
2086 h = sym_hashes[r_symndx - symtab_hdr->sh_info];
2087 while (h->root.type == bfd_link_hash_indirect
2088 || h->root.type == bfd_link_hash_warning)
2089 h = (struct elf_link_hash_entry *) h->root.u.i.link;
2090 }
2091
2092 /* Check invalid x32 relocations. */
2093 if (!ABI_64_P (abfd))
2094 switch (r_type)
2095 {
2096 default:
2097 break;
2098
2099 case R_X86_64_DTPOFF64:
2100 case R_X86_64_TPOFF64:
2101 case R_X86_64_PC64:
2102 case R_X86_64_GOTOFF64:
2103 case R_X86_64_GOT64:
2104 case R_X86_64_GOTPCREL64:
2105 case R_X86_64_GOTPC64:
2106 case R_X86_64_GOTPLT64:
2107 case R_X86_64_PLTOFF64:
2108 {
2109 if (h)
2110 name = h->root.root.string;
2111 else
2112 name = bfd_elf_sym_name (abfd, symtab_hdr, isym,
2113 NULL);
2114 _bfd_error_handler
2115 /* xgettext:c-format */
2116 (_("%pB: relocation %s against symbol `%s' isn't "
2117 "supported in x32 mode"), abfd,
2118 x86_64_elf_howto_table[r_type].name, name);
2119 bfd_set_error (bfd_error_bad_value);
2120 goto error_return;
2121 }
2122 break;
2123 }
2124
2125 eh = (struct elf_x86_link_hash_entry *) h;
2126
2127 if (h != NULL)
2128 {
2129 /* It is referenced by a non-shared object. */
2130 h->ref_regular = 1;
2131 }
2132
2133 converted_reloc = false;
2134 if ((r_type == R_X86_64_GOTPCREL
2135 || r_type == R_X86_64_GOTPCRELX
2136 || r_type == R_X86_64_REX_GOTPCRELX
2137 || r_type == R_X86_64_CODE_4_GOTPCRELX)
2138 && (h == NULL || h->type != STT_GNU_IFUNC))
2139 {
2140 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel;
2141 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type,
2142 irel, h, &converted_reloc,
2143 info))
2144 goto error_return;
2145
2146 if (converted_reloc)
2147 converted = true;
2148 }
2149
2150 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym,
2151 symtab_hdr, &no_dynreloc))
2152 return false;
2153
2154 if (! elf_x86_64_tls_transition (info, abfd, sec, contents,
2155 symtab_hdr, sym_hashes,
2156 &r_type, GOT_UNKNOWN,
2157 rel, rel_end, h, r_symndx, false))
2158 goto error_return;
2159
2160 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */
2161 if (h == htab->elf.hgot)
2162 htab->got_referenced = true;
2163
2164 switch (r_type)
2165 {
2166 case R_X86_64_TLSLD:
2167 htab->tls_ld_or_ldm_got.refcount = 1;
2168 goto create_got;
2169
2170 case R_X86_64_TPOFF32:
2171 if (!bfd_link_executable (info) && ABI_64_P (abfd))
2172 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2173 &x86_64_elf_howto_table[r_type]);
2174 if (eh != NULL)
2175 eh->zero_undefweak &= 0x2;
2176 break;
2177
2178 case R_X86_64_GOTTPOFF:
2179 case R_X86_64_CODE_4_GOTTPOFF:
2180 if (!bfd_link_executable (info))
2181 info->flags |= DF_STATIC_TLS;
2182 /* Fall through */
2183
2184 case R_X86_64_GOT32:
2185 case R_X86_64_GOTPCREL:
2186 case R_X86_64_GOTPCRELX:
2187 case R_X86_64_REX_GOTPCRELX:
2188 case R_X86_64_CODE_4_GOTPCRELX:
2189 case R_X86_64_TLSGD:
2190 case R_X86_64_GOT64:
2191 case R_X86_64_GOTPCREL64:
2192 case R_X86_64_GOTPLT64:
2193 case R_X86_64_GOTPC32_TLSDESC:
2194 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
2195 case R_X86_64_TLSDESC_CALL:
2196 /* This symbol requires a global offset table entry. */
2197 {
2198 int tls_type, old_tls_type;
2199
2200 switch (r_type)
2201 {
2202 default:
2203 tls_type = GOT_NORMAL;
2204 if (h)
2205 {
2206 if (ABS_SYMBOL_P (h))
2207 tls_type = GOT_ABS;
2208 }
2209 else if (isym->st_shndx == SHN_ABS)
2210 tls_type = GOT_ABS;
2211 break;
2212 case R_X86_64_TLSGD:
2213 tls_type = GOT_TLS_GD;
2214 break;
2215 case R_X86_64_GOTTPOFF:
2216 case R_X86_64_CODE_4_GOTTPOFF:
2217 tls_type = GOT_TLS_IE;
2218 break;
2219 case R_X86_64_GOTPC32_TLSDESC:
2220 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
2221 case R_X86_64_TLSDESC_CALL:
2222 tls_type = GOT_TLS_GDESC;
2223 break;
2224 }
2225
2226 if (h != NULL)
2227 {
2228 h->got.refcount = 1;
2229 old_tls_type = eh->tls_type;
2230 }
2231 else
2232 {
2233 bfd_signed_vma *local_got_refcounts;
2234
2235 if (!elf_x86_allocate_local_got_info (abfd,
2236 symtab_hdr->sh_info))
2237 goto error_return;
2238
2239 /* This is a global offset table entry for a local symbol. */
2240 local_got_refcounts = elf_local_got_refcounts (abfd);
2241 local_got_refcounts[r_symndx] = 1;
2242 old_tls_type
2243 = elf_x86_local_got_tls_type (abfd) [r_symndx];
2244 }
2245
2246 /* If a TLS symbol is accessed using IE at least once,
2247 there is no point to use dynamic model for it. */
2248 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN
2249 && (! GOT_TLS_GD_ANY_P (old_tls_type)
2250 || tls_type != GOT_TLS_IE))
2251 {
2252 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type))
2253 tls_type = old_tls_type;
2254 else if (GOT_TLS_GD_ANY_P (old_tls_type)
2255 && GOT_TLS_GD_ANY_P (tls_type))
2256 tls_type |= old_tls_type;
2257 else
2258 {
2259 if (h)
2260 name = h->root.root.string;
2261 else
2262 name = bfd_elf_sym_name (abfd, symtab_hdr,
2263 isym, NULL);
2264 _bfd_error_handler
2265 /* xgettext:c-format */
2266 (_("%pB: '%s' accessed both as normal and"
2267 " thread local symbol"),
2268 abfd, name);
2269 bfd_set_error (bfd_error_bad_value);
2270 goto error_return;
2271 }
2272 }
2273
2274 if (old_tls_type != tls_type)
2275 {
2276 if (eh != NULL)
2277 eh->tls_type = tls_type;
2278 else
2279 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type;
2280 }
2281 }
2282 /* Fall through */
2283
2284 case R_X86_64_GOTOFF64:
2285 case R_X86_64_GOTPC32:
2286 case R_X86_64_GOTPC64:
2287 create_got:
2288 if (eh != NULL)
2289 eh->zero_undefweak &= 0x2;
2290 break;
2291
2292 case R_X86_64_PLT32:
2293 /* This symbol requires a procedure linkage table entry. We
2294 actually build the entry in adjust_dynamic_symbol,
2295 because this might be a case of linking PIC code which is
2296 never referenced by a dynamic object, in which case we
2297 don't need to generate a procedure linkage table entry
2298 after all. */
2299
2300 /* If this is a local symbol, we resolve it directly without
2301 creating a procedure linkage table entry. */
2302 if (h == NULL)
2303 continue;
2304
2305 eh->zero_undefweak &= 0x2;
2306 h->needs_plt = 1;
2307 h->plt.refcount = 1;
2308 break;
2309
2310 case R_X86_64_PLTOFF64:
2311 /* This tries to form the 'address' of a function relative
2312 to GOT. For global symbols we need a PLT entry. */
2313 if (h != NULL)
2314 {
2315 h->needs_plt = 1;
2316 h->plt.refcount = 1;
2317 }
2318 goto create_got;
2319
2320 case R_X86_64_SIZE32:
2321 case R_X86_64_SIZE64:
2322 size_reloc = true;
2323 goto do_size;
2324
2325 case R_X86_64_32:
2326 if (!ABI_64_P (abfd))
2327 goto pointer;
2328 /* Fall through. */
2329 case R_X86_64_8:
2330 case R_X86_64_16:
2331 case R_X86_64_32S:
2332 /* Check relocation overflow as these relocs may lead to
2333 run-time relocation overflow. Don't error out for
2334 sections we don't care about, such as debug sections or
2335 when relocation overflow check is disabled. */
2336 if (!htab->params->no_reloc_overflow_check
2337 && !converted_reloc
2338 && (bfd_link_pic (info)
2339 || (bfd_link_executable (info)
2340 && h != NULL
2341 && !h->def_regular
2342 && h->def_dynamic
2343 && (sec->flags & SEC_READONLY) == 0)))
2344 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym,
2345 &x86_64_elf_howto_table[r_type]);
2346 /* Fall through. */
2347
2348 case R_X86_64_PC8:
2349 case R_X86_64_PC16:
2350 case R_X86_64_PC32:
2351 case R_X86_64_PC64:
2352 case R_X86_64_64:
2353 pointer:
2354 if (eh != NULL && (sec->flags & SEC_CODE) != 0)
2355 eh->zero_undefweak |= 0x2;
2356 /* We are called after all symbols have been resolved. Only
2357 relocation against STT_GNU_IFUNC symbol must go through
2358 PLT. */
2359 if (h != NULL
2360 && (bfd_link_executable (info)
2361 || h->type == STT_GNU_IFUNC))
2362 {
2363 bool func_pointer_ref = false;
2364
2365 if (r_type == R_X86_64_PC32)
2366 {
2367 /* Since something like ".long foo - ." may be used
2368 as pointer, make sure that PLT is used if foo is
2369 a function defined in a shared library. */
2370 if ((sec->flags & SEC_CODE) == 0)
2371 {
2372 h->pointer_equality_needed = 1;
2373 if (bfd_link_pie (info)
2374 && h->type == STT_FUNC
2375 && !h->def_regular
2376 && h->def_dynamic)
2377 {
2378 h->needs_plt = 1;
2379 h->plt.refcount = 1;
2380 }
2381 }
2382 }
2383 else if (r_type != R_X86_64_PC64)
2384 {
2385 /* At run-time, R_X86_64_64 can be resolved for both
2386 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S
2387 can only be resolved for x32. Function pointer
2388 reference doesn't need PLT for pointer equality. */
2389 if ((sec->flags & SEC_READONLY) == 0
2390 && (r_type == R_X86_64_64
2391 || (!ABI_64_P (abfd)
2392 && (r_type == R_X86_64_32
2393 || r_type == R_X86_64_32S))))
2394 func_pointer_ref = true;
2395
2396 /* IFUNC symbol needs pointer equality in PDE so that
2397 function pointer reference will be resolved to its
2398 PLT entry directly. */
2399 if (!func_pointer_ref
2400 || (bfd_link_pde (info)
2401 && h->type == STT_GNU_IFUNC))
2402 h->pointer_equality_needed = 1;
2403 }
2404
2405 if (!func_pointer_ref)
2406 {
2407 /* If this reloc is in a read-only section, we might
2408 need a copy reloc. We can't check reliably at this
2409 stage whether the section is read-only, as input
2410 sections have not yet been mapped to output sections.
2411 Tentatively set the flag for now, and correct in
2412 adjust_dynamic_symbol. */
2413 h->non_got_ref = 1;
2414
2415 if (!elf_has_indirect_extern_access (sec->owner))
2416 eh->non_got_ref_without_indirect_extern_access = 1;
2417
2418 /* We may need a .plt entry if the symbol is a function
2419 defined in a shared lib or is a function referenced
2420 from the code or read-only section. */
2421 if (!h->def_regular
2422 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0)
2423 h->plt.refcount = 1;
2424
2425 if (htab->elf.target_os != is_solaris
2426 && h->pointer_equality_needed
2427 && h->type == STT_FUNC
2428 && eh->def_protected
2429 && !SYMBOL_DEFINED_NON_SHARED_P (h)
2430 && h->def_dynamic)
2431 {
2432 /* Disallow non-canonical reference to canonical
2433 protected function. */
2434 _bfd_error_handler
2435 /* xgettext:c-format */
2436 (_("%pB: non-canonical reference to canonical "
2437 "protected function `%s' in %pB"),
2438 abfd, h->root.root.string,
2439 h->root.u.def.section->owner);
2440 bfd_set_error (bfd_error_bad_value);
2441 goto error_return;
2442 }
2443 }
2444 }
2445
2446 size_reloc = false;
2447 do_size:
2448 if (!no_dynreloc
2449 && NEED_DYNAMIC_RELOCATION_P (true, info, true, h, sec,
2450 r_type,
2451 htab->pointer_r_type))
2452 {
2453 struct elf_dyn_relocs *p;
2454 struct elf_dyn_relocs **head;
2455
2456 /* If this is a global symbol, we count the number of
2457 relocations we need for this symbol. */
2458 if (h != NULL)
2459 head = &h->dyn_relocs;
2460 else
2461 {
2462 /* Track dynamic relocs needed for local syms too.
2463 We really need local syms available to do this
2464 easily. Oh well. */
2465 asection *s;
2466 void **vpp;
2467
2468 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache,
2469 abfd, r_symndx);
2470 if (isym == NULL)
2471 goto error_return;
2472
2473 s = bfd_section_from_elf_index (abfd, isym->st_shndx);
2474 if (s == NULL)
2475 s = sec;
2476
2477 /* Beware of type punned pointers vs strict aliasing
2478 rules. */
2479 vpp = &(elf_section_data (s)->local_dynrel);
2480 head = (struct elf_dyn_relocs **)vpp;
2481 }
2482
2483 p = *head;
2484 if (p == NULL || p->sec != sec)
2485 {
2486 size_t amt = sizeof *p;
2487
2488 p = ((struct elf_dyn_relocs *)
2489 bfd_alloc (htab->elf.dynobj, amt));
2490 if (p == NULL)
2491 goto error_return;
2492 p->next = *head;
2493 *head = p;
2494 p->sec = sec;
2495 p->count = 0;
2496 p->pc_count = 0;
2497 }
2498
2499 p->count += 1;
2500 /* Count size relocation as PC-relative relocation. */
2501 if (X86_PCREL_TYPE_P (true, r_type) || size_reloc)
2502 p->pc_count += 1;
2503 }
2504 break;
2505
2506 /* This relocation describes the C++ object vtable hierarchy.
2507 Reconstruct it for later use during GC. */
2508 case R_X86_64_GNU_VTINHERIT:
2509 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset))
2510 goto error_return;
2511 break;
2512
2513 /* This relocation describes which C++ vtable entries are actually
2514 used. Record for later use during GC. */
2515 case R_X86_64_GNU_VTENTRY:
2516 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend))
2517 goto error_return;
2518 break;
2519
2520 default:
2521 break;
2522 }
2523 }
2524
2525 if (elf_section_data (sec)->this_hdr.contents != contents)
2526 {
2527 if (!converted && !_bfd_link_keep_memory (info))
2528 free (contents);
2529 else
2530 {
2531 /* Cache the section contents for elf_link_input_bfd if any
2532 load is converted or --no-keep-memory isn't used. */
2533 elf_section_data (sec)->this_hdr.contents = contents;
2534 info->cache_size += sec->size;
2535 }
2536 }
2537
2538 /* Cache relocations if any load is converted. */
2539 if (elf_section_data (sec)->relocs != relocs && converted)
2540 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs;
2541
2542 return true;
2543
2544 error_return:
2545 if (elf_section_data (sec)->this_hdr.contents != contents)
2546 free (contents);
2547 sec->check_relocs_failed = 1;
2548 return false;
2549 }
2550
2551 static bool
elf_x86_64_always_size_sections(bfd * output_bfd,struct bfd_link_info * info)2552 elf_x86_64_always_size_sections (bfd *output_bfd,
2553 struct bfd_link_info *info)
2554 {
2555 bfd *abfd;
2556
2557 /* Scan relocations after rel_from_abs has been set on __ehdr_start. */
2558 for (abfd = info->input_bfds;
2559 abfd != (bfd *) NULL;
2560 abfd = abfd->link.next)
2561 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour
2562 && !_bfd_elf_link_iterate_on_relocs (abfd, info,
2563 elf_x86_64_scan_relocs))
2564 return false;
2565
2566 return _bfd_x86_elf_always_size_sections (output_bfd, info);
2567 }
2568
2569 /* Return the relocation value for @tpoff relocation
2570 if STT_TLS virtual address is ADDRESS. */
2571
2572 static bfd_vma
elf_x86_64_tpoff(struct bfd_link_info * info,bfd_vma address)2573 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address)
2574 {
2575 struct elf_link_hash_table *htab = elf_hash_table (info);
2576 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd);
2577 bfd_vma static_tls_size;
2578
2579 /* If tls_segment is NULL, we should have signalled an error already. */
2580 if (htab->tls_sec == NULL)
2581 return 0;
2582
2583 /* Consider special static TLS alignment requirements. */
2584 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment);
2585 return address - static_tls_size - htab->tls_sec->vma;
2586 }
2587
2588 /* Relocate an x86_64 ELF section. */
2589
2590 static int
elf_x86_64_relocate_section(bfd * output_bfd,struct bfd_link_info * info,bfd * input_bfd,asection * input_section,bfd_byte * contents,Elf_Internal_Rela * relocs,Elf_Internal_Sym * local_syms,asection ** local_sections)2591 elf_x86_64_relocate_section (bfd *output_bfd,
2592 struct bfd_link_info *info,
2593 bfd *input_bfd,
2594 asection *input_section,
2595 bfd_byte *contents,
2596 Elf_Internal_Rela *relocs,
2597 Elf_Internal_Sym *local_syms,
2598 asection **local_sections)
2599 {
2600 struct elf_x86_link_hash_table *htab;
2601 Elf_Internal_Shdr *symtab_hdr;
2602 struct elf_link_hash_entry **sym_hashes;
2603 bfd_vma *local_got_offsets;
2604 bfd_vma *local_tlsdesc_gotents;
2605 Elf_Internal_Rela *rel;
2606 Elf_Internal_Rela *wrel;
2607 Elf_Internal_Rela *relend;
2608 unsigned int plt_entry_size;
2609 bool status;
2610
2611 /* Skip if check_relocs or scan_relocs failed. */
2612 if (input_section->check_relocs_failed)
2613 return false;
2614
2615 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
2616 if (htab == NULL)
2617 return false;
2618
2619 if (!is_x86_elf (input_bfd, htab))
2620 {
2621 bfd_set_error (bfd_error_wrong_format);
2622 return false;
2623 }
2624
2625 plt_entry_size = htab->plt.plt_entry_size;
2626 symtab_hdr = &elf_symtab_hdr (input_bfd);
2627 sym_hashes = elf_sym_hashes (input_bfd);
2628 local_got_offsets = elf_local_got_offsets (input_bfd);
2629 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd);
2630
2631 _bfd_x86_elf_set_tls_module_base (info);
2632
2633 status = true;
2634 rel = wrel = relocs;
2635 relend = relocs + input_section->reloc_count;
2636 for (; rel < relend; wrel++, rel++)
2637 {
2638 unsigned int r_type, r_type_tls;
2639 reloc_howto_type *howto;
2640 unsigned long r_symndx;
2641 struct elf_link_hash_entry *h;
2642 struct elf_x86_link_hash_entry *eh;
2643 Elf_Internal_Sym *sym;
2644 asection *sec;
2645 bfd_vma off, offplt, plt_offset;
2646 bfd_vma relocation;
2647 bool unresolved_reloc;
2648 bfd_reloc_status_type r;
2649 int tls_type;
2650 asection *base_got, *resolved_plt;
2651 bfd_vma st_size;
2652 bool resolved_to_zero;
2653 bool relative_reloc;
2654 bool converted_reloc;
2655 bool need_copy_reloc_in_pie;
2656 bool no_copyreloc_p;
2657
2658 r_type = ELF32_R_TYPE (rel->r_info);
2659 if (r_type == (int) R_X86_64_GNU_VTINHERIT
2660 || r_type == (int) R_X86_64_GNU_VTENTRY)
2661 {
2662 if (wrel != rel)
2663 *wrel = *rel;
2664 continue;
2665 }
2666
2667 r_symndx = htab->r_sym (rel->r_info);
2668 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0;
2669 if (converted_reloc)
2670 {
2671 r_type &= ~R_X86_64_converted_reloc_bit;
2672 rel->r_info = htab->r_info (r_symndx, r_type);
2673 }
2674
2675 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type);
2676 if (howto == NULL)
2677 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type);
2678
2679 h = NULL;
2680 sym = NULL;
2681 sec = NULL;
2682 unresolved_reloc = false;
2683 if (r_symndx < symtab_hdr->sh_info)
2684 {
2685 sym = local_syms + r_symndx;
2686 sec = local_sections[r_symndx];
2687
2688 relocation = _bfd_elf_rela_local_sym (output_bfd, sym,
2689 &sec, rel);
2690 st_size = sym->st_size;
2691
2692 /* Relocate against local STT_GNU_IFUNC symbol. */
2693 if (!bfd_link_relocatable (info)
2694 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC)
2695 {
2696 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd,
2697 rel, false);
2698 if (h == NULL)
2699 abort ();
2700
2701 /* Set STT_GNU_IFUNC symbol value. */
2702 h->root.u.def.value = sym->st_value;
2703 h->root.u.def.section = sec;
2704 }
2705 }
2706 else
2707 {
2708 bool warned ATTRIBUTE_UNUSED;
2709 bool ignored ATTRIBUTE_UNUSED;
2710
2711 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel,
2712 r_symndx, symtab_hdr, sym_hashes,
2713 h, sec, relocation,
2714 unresolved_reloc, warned, ignored);
2715 st_size = h->size;
2716 }
2717
2718 if (sec != NULL && discarded_section (sec))
2719 {
2720 _bfd_clear_contents (howto, input_bfd, input_section,
2721 contents, rel->r_offset);
2722 wrel->r_offset = rel->r_offset;
2723 wrel->r_info = 0;
2724 wrel->r_addend = 0;
2725
2726 /* For ld -r, remove relocations in debug sections against
2727 sections defined in discarded sections. Not done for
2728 eh_frame editing code expects to be present. */
2729 if (bfd_link_relocatable (info)
2730 && (input_section->flags & SEC_DEBUGGING))
2731 wrel--;
2732
2733 continue;
2734 }
2735
2736 if (bfd_link_relocatable (info))
2737 {
2738 if (wrel != rel)
2739 *wrel = *rel;
2740 continue;
2741 }
2742
2743 if (rel->r_addend == 0 && !ABI_64_P (output_bfd))
2744 {
2745 if (r_type == R_X86_64_64)
2746 {
2747 /* For x32, treat R_X86_64_64 like R_X86_64_32 and
2748 zero-extend it to 64bit if addend is zero. */
2749 r_type = R_X86_64_32;
2750 memset (contents + rel->r_offset + 4, 0, 4);
2751 }
2752 else if (r_type == R_X86_64_SIZE64)
2753 {
2754 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and
2755 zero-extend it to 64bit if addend is zero. */
2756 r_type = R_X86_64_SIZE32;
2757 memset (contents + rel->r_offset + 4, 0, 4);
2758 }
2759 }
2760
2761 eh = (struct elf_x86_link_hash_entry *) h;
2762
2763 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle
2764 it here if it is defined in a non-shared object. */
2765 if (h != NULL
2766 && h->type == STT_GNU_IFUNC
2767 && h->def_regular)
2768 {
2769 bfd_vma plt_index;
2770 const char *name;
2771
2772 if ((input_section->flags & SEC_ALLOC) == 0)
2773 {
2774 /* If this is a SHT_NOTE section without SHF_ALLOC, treat
2775 STT_GNU_IFUNC symbol as STT_FUNC. */
2776 if (elf_section_type (input_section) == SHT_NOTE)
2777 goto skip_ifunc;
2778 /* Dynamic relocs are not propagated for SEC_DEBUGGING
2779 sections because such sections are not SEC_ALLOC and
2780 thus ld.so will not process them. */
2781 if ((input_section->flags & SEC_DEBUGGING) != 0)
2782 continue;
2783 abort ();
2784 }
2785
2786 switch (r_type)
2787 {
2788 default:
2789 break;
2790
2791 case R_X86_64_GOTPCREL:
2792 case R_X86_64_GOTPCRELX:
2793 case R_X86_64_REX_GOTPCRELX:
2794 case R_X86_64_CODE_4_GOTPCRELX:
2795 case R_X86_64_GOTPCREL64:
2796 base_got = htab->elf.sgot;
2797 off = h->got.offset;
2798
2799 if (base_got == NULL)
2800 abort ();
2801
2802 if (off == (bfd_vma) -1)
2803 {
2804 /* We can't use h->got.offset here to save state, or
2805 even just remember the offset, as finish_dynamic_symbol
2806 would use that as offset into .got. */
2807
2808 if (h->plt.offset == (bfd_vma) -1)
2809 abort ();
2810
2811 if (htab->elf.splt != NULL)
2812 {
2813 plt_index = (h->plt.offset / plt_entry_size
2814 - htab->plt.has_plt0);
2815 off = (plt_index + 3) * GOT_ENTRY_SIZE;
2816 base_got = htab->elf.sgotplt;
2817 }
2818 else
2819 {
2820 plt_index = h->plt.offset / plt_entry_size;
2821 off = plt_index * GOT_ENTRY_SIZE;
2822 base_got = htab->elf.igotplt;
2823 }
2824
2825 if (h->dynindx == -1
2826 || h->forced_local
2827 || info->symbolic)
2828 {
2829 /* This references the local defitionion. We must
2830 initialize this entry in the global offset table.
2831 Since the offset must always be a multiple of 8,
2832 we use the least significant bit to record
2833 whether we have initialized it already.
2834
2835 When doing a dynamic link, we create a .rela.got
2836 relocation entry to initialize the value. This
2837 is done in the finish_dynamic_symbol routine. */
2838 if ((off & 1) != 0)
2839 off &= ~1;
2840 else
2841 {
2842 bfd_put_64 (output_bfd, relocation,
2843 base_got->contents + off);
2844 /* Note that this is harmless for the GOTPLT64
2845 case, as -1 | 1 still is -1. */
2846 h->got.offset |= 1;
2847 }
2848 }
2849 }
2850
2851 relocation = (base_got->output_section->vma
2852 + base_got->output_offset + off);
2853
2854 goto do_relocation;
2855 }
2856
2857 if (h->plt.offset == (bfd_vma) -1)
2858 {
2859 /* Handle static pointers of STT_GNU_IFUNC symbols. */
2860 if (r_type == htab->pointer_r_type
2861 && (input_section->flags & SEC_CODE) == 0)
2862 goto do_ifunc_pointer;
2863 goto bad_ifunc_reloc;
2864 }
2865
2866 /* STT_GNU_IFUNC symbol must go through PLT. */
2867 if (htab->elf.splt != NULL)
2868 {
2869 if (htab->plt_second != NULL)
2870 {
2871 resolved_plt = htab->plt_second;
2872 plt_offset = eh->plt_second.offset;
2873 }
2874 else
2875 {
2876 resolved_plt = htab->elf.splt;
2877 plt_offset = h->plt.offset;
2878 }
2879 }
2880 else
2881 {
2882 resolved_plt = htab->elf.iplt;
2883 plt_offset = h->plt.offset;
2884 }
2885
2886 relocation = (resolved_plt->output_section->vma
2887 + resolved_plt->output_offset + plt_offset);
2888
2889 switch (r_type)
2890 {
2891 default:
2892 bad_ifunc_reloc:
2893 if (h->root.root.string)
2894 name = h->root.root.string;
2895 else
2896 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym,
2897 NULL);
2898 _bfd_error_handler
2899 /* xgettext:c-format */
2900 (_("%pB: relocation %s against STT_GNU_IFUNC "
2901 "symbol `%s' isn't supported"), input_bfd,
2902 howto->name, name);
2903 bfd_set_error (bfd_error_bad_value);
2904 return false;
2905
2906 case R_X86_64_32S:
2907 if (bfd_link_pic (info))
2908 abort ();
2909 goto do_relocation;
2910
2911 case R_X86_64_32:
2912 if (ABI_64_P (output_bfd))
2913 goto do_relocation;
2914 /* FALLTHROUGH */
2915 case R_X86_64_64:
2916 do_ifunc_pointer:
2917 if (rel->r_addend != 0)
2918 {
2919 if (h->root.root.string)
2920 name = h->root.root.string;
2921 else
2922 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
2923 sym, NULL);
2924 _bfd_error_handler
2925 /* xgettext:c-format */
2926 (_("%pB: relocation %s against STT_GNU_IFUNC "
2927 "symbol `%s' has non-zero addend: %" PRId64),
2928 input_bfd, howto->name, name, (int64_t) rel->r_addend);
2929 bfd_set_error (bfd_error_bad_value);
2930 return false;
2931 }
2932
2933 /* Generate dynamic relcoation only when there is a
2934 non-GOT reference in a shared object or there is no
2935 PLT. */
2936 if ((bfd_link_pic (info) && h->non_got_ref)
2937 || h->plt.offset == (bfd_vma) -1)
2938 {
2939 Elf_Internal_Rela outrel;
2940 asection *sreloc;
2941
2942 /* Need a dynamic relocation to get the real function
2943 address. */
2944 outrel.r_offset = _bfd_elf_section_offset (output_bfd,
2945 info,
2946 input_section,
2947 rel->r_offset);
2948 if (outrel.r_offset == (bfd_vma) -1
2949 || outrel.r_offset == (bfd_vma) -2)
2950 abort ();
2951
2952 outrel.r_offset += (input_section->output_section->vma
2953 + input_section->output_offset);
2954
2955 if (POINTER_LOCAL_IFUNC_P (info, h))
2956 {
2957 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
2958 h->root.root.string,
2959 h->root.u.def.section->owner);
2960
2961 /* This symbol is resolved locally. */
2962 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
2963 outrel.r_addend = (h->root.u.def.value
2964 + h->root.u.def.section->output_section->vma
2965 + h->root.u.def.section->output_offset);
2966
2967 if (htab->params->report_relative_reloc)
2968 _bfd_x86_elf_link_report_relative_reloc
2969 (info, input_section, h, sym,
2970 "R_X86_64_IRELATIVE", &outrel);
2971 }
2972 else
2973 {
2974 outrel.r_info = htab->r_info (h->dynindx, r_type);
2975 outrel.r_addend = 0;
2976 }
2977
2978 /* Dynamic relocations are stored in
2979 1. .rela.ifunc section in PIC object.
2980 2. .rela.got section in dynamic executable.
2981 3. .rela.iplt section in static executable. */
2982 if (bfd_link_pic (info))
2983 sreloc = htab->elf.irelifunc;
2984 else if (htab->elf.splt != NULL)
2985 sreloc = htab->elf.srelgot;
2986 else
2987 sreloc = htab->elf.irelplt;
2988 elf_append_rela (output_bfd, sreloc, &outrel);
2989
2990 /* If this reloc is against an external symbol, we
2991 do not want to fiddle with the addend. Otherwise,
2992 we need to include the symbol value so that it
2993 becomes an addend for the dynamic reloc. For an
2994 internal symbol, we have updated addend. */
2995 continue;
2996 }
2997 /* FALLTHROUGH */
2998 case R_X86_64_PC32:
2999 case R_X86_64_PC64:
3000 case R_X86_64_PLT32:
3001 goto do_relocation;
3002 }
3003 }
3004
3005 skip_ifunc:
3006 resolved_to_zero = (eh != NULL
3007 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh));
3008
3009 /* When generating a shared object, the relocations handled here are
3010 copied into the output file to be resolved at run time. */
3011 switch (r_type)
3012 {
3013 case R_X86_64_GOT32:
3014 case R_X86_64_GOT64:
3015 /* Relocation is to the entry for this symbol in the global
3016 offset table. */
3017 case R_X86_64_GOTPCREL:
3018 case R_X86_64_GOTPCRELX:
3019 case R_X86_64_REX_GOTPCRELX:
3020 case R_X86_64_CODE_4_GOTPCRELX:
3021 case R_X86_64_GOTPCREL64:
3022 /* Use global offset table entry as symbol value. */
3023 case R_X86_64_GOTPLT64:
3024 /* This is obsolete and treated the same as GOT64. */
3025 base_got = htab->elf.sgot;
3026
3027 if (htab->elf.sgot == NULL)
3028 abort ();
3029
3030 relative_reloc = false;
3031 if (h != NULL)
3032 {
3033 off = h->got.offset;
3034 if (h->needs_plt
3035 && h->plt.offset != (bfd_vma)-1
3036 && off == (bfd_vma)-1)
3037 {
3038 /* We can't use h->got.offset here to save
3039 state, or even just remember the offset, as
3040 finish_dynamic_symbol would use that as offset into
3041 .got. */
3042 bfd_vma plt_index = (h->plt.offset / plt_entry_size
3043 - htab->plt.has_plt0);
3044 off = (plt_index + 3) * GOT_ENTRY_SIZE;
3045 base_got = htab->elf.sgotplt;
3046 }
3047
3048 if (RESOLVED_LOCALLY_P (info, h, htab))
3049 {
3050 /* We must initialize this entry in the global offset
3051 table. Since the offset must always be a multiple
3052 of 8, we use the least significant bit to record
3053 whether we have initialized it already.
3054
3055 When doing a dynamic link, we create a .rela.got
3056 relocation entry to initialize the value. This is
3057 done in the finish_dynamic_symbol routine. */
3058 if ((off & 1) != 0)
3059 off &= ~1;
3060 else
3061 {
3062 bfd_put_64 (output_bfd, relocation,
3063 base_got->contents + off);
3064 /* Note that this is harmless for the GOTPLT64 case,
3065 as -1 | 1 still is -1. */
3066 h->got.offset |= 1;
3067
3068 /* NB: Don't generate relative relocation here if
3069 it has been generated by DT_RELR. */
3070 if (!info->enable_dt_relr
3071 && GENERATE_RELATIVE_RELOC_P (info, h))
3072 {
3073 /* If this symbol isn't dynamic in PIC,
3074 generate R_X86_64_RELATIVE here. */
3075 eh->no_finish_dynamic_symbol = 1;
3076 relative_reloc = true;
3077 }
3078 }
3079 }
3080 else
3081 unresolved_reloc = false;
3082 }
3083 else
3084 {
3085 if (local_got_offsets == NULL)
3086 abort ();
3087
3088 off = local_got_offsets[r_symndx];
3089
3090 /* The offset must always be a multiple of 8. We use
3091 the least significant bit to record whether we have
3092 already generated the necessary reloc. */
3093 if ((off & 1) != 0)
3094 off &= ~1;
3095 else
3096 {
3097 bfd_put_64 (output_bfd, relocation,
3098 base_got->contents + off);
3099 local_got_offsets[r_symndx] |= 1;
3100
3101 /* NB: GOTPCREL relocations against local absolute
3102 symbol store relocation value in the GOT slot
3103 without relative relocation. Don't generate
3104 relative relocation here if it has been generated
3105 by DT_RELR. */
3106 if (!info->enable_dt_relr
3107 && bfd_link_pic (info)
3108 && !(sym->st_shndx == SHN_ABS
3109 && (r_type == R_X86_64_GOTPCREL
3110 || r_type == R_X86_64_GOTPCRELX
3111 || r_type == R_X86_64_REX_GOTPCRELX
3112 || r_type == R_X86_64_CODE_4_GOTPCRELX)))
3113 relative_reloc = true;
3114 }
3115 }
3116
3117 if (relative_reloc)
3118 {
3119 asection *s;
3120 Elf_Internal_Rela outrel;
3121
3122 /* We need to generate a R_X86_64_RELATIVE reloc
3123 for the dynamic linker. */
3124 s = htab->elf.srelgot;
3125 if (s == NULL)
3126 abort ();
3127
3128 outrel.r_offset = (base_got->output_section->vma
3129 + base_got->output_offset
3130 + off);
3131 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE);
3132 outrel.r_addend = relocation;
3133
3134 if (htab->params->report_relative_reloc)
3135 _bfd_x86_elf_link_report_relative_reloc
3136 (info, input_section, h, sym, "R_X86_64_RELATIVE",
3137 &outrel);
3138
3139 elf_append_rela (output_bfd, s, &outrel);
3140 }
3141
3142 if (off >= (bfd_vma) -2)
3143 abort ();
3144
3145 relocation = base_got->output_section->vma
3146 + base_got->output_offset + off;
3147 if (r_type != R_X86_64_GOTPCREL
3148 && r_type != R_X86_64_GOTPCRELX
3149 && r_type != R_X86_64_REX_GOTPCRELX
3150 && r_type != R_X86_64_CODE_4_GOTPCRELX
3151 && r_type != R_X86_64_GOTPCREL64)
3152 relocation -= htab->elf.sgotplt->output_section->vma
3153 - htab->elf.sgotplt->output_offset;
3154
3155 break;
3156
3157 case R_X86_64_GOTOFF64:
3158 /* Relocation is relative to the start of the global offset
3159 table. */
3160
3161 /* Check to make sure it isn't a protected function or data
3162 symbol for shared library since it may not be local when
3163 used as function address or with copy relocation. We also
3164 need to make sure that a symbol is referenced locally. */
3165 if (bfd_link_pic (info) && h)
3166 {
3167 if (!h->def_regular)
3168 {
3169 const char *v;
3170
3171 switch (ELF_ST_VISIBILITY (h->other))
3172 {
3173 case STV_HIDDEN:
3174 v = _("hidden symbol");
3175 break;
3176 case STV_INTERNAL:
3177 v = _("internal symbol");
3178 break;
3179 case STV_PROTECTED:
3180 v = _("protected symbol");
3181 break;
3182 default:
3183 v = _("symbol");
3184 break;
3185 }
3186
3187 _bfd_error_handler
3188 /* xgettext:c-format */
3189 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s"
3190 " `%s' can not be used when making a shared object"),
3191 input_bfd, v, h->root.root.string);
3192 bfd_set_error (bfd_error_bad_value);
3193 return false;
3194 }
3195 else if (!bfd_link_executable (info)
3196 && !SYMBOL_REFERENCES_LOCAL_P (info, h)
3197 && (h->type == STT_FUNC
3198 || h->type == STT_OBJECT)
3199 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED)
3200 {
3201 _bfd_error_handler
3202 /* xgettext:c-format */
3203 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s"
3204 " `%s' can not be used when making a shared object"),
3205 input_bfd,
3206 h->type == STT_FUNC ? "function" : "data",
3207 h->root.root.string);
3208 bfd_set_error (bfd_error_bad_value);
3209 return false;
3210 }
3211 }
3212
3213 /* Note that sgot is not involved in this
3214 calculation. We always want the start of .got.plt. If we
3215 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is
3216 permitted by the ABI, we might have to change this
3217 calculation. */
3218 relocation -= htab->elf.sgotplt->output_section->vma
3219 + htab->elf.sgotplt->output_offset;
3220 break;
3221
3222 case R_X86_64_GOTPC32:
3223 case R_X86_64_GOTPC64:
3224 /* Use global offset table as symbol value. */
3225 relocation = htab->elf.sgotplt->output_section->vma
3226 + htab->elf.sgotplt->output_offset;
3227 unresolved_reloc = false;
3228 break;
3229
3230 case R_X86_64_PLTOFF64:
3231 /* Relocation is PLT entry relative to GOT. For local
3232 symbols it's the symbol itself relative to GOT. */
3233 if (h != NULL
3234 /* See PLT32 handling. */
3235 && (h->plt.offset != (bfd_vma) -1
3236 || eh->plt_got.offset != (bfd_vma) -1)
3237 && htab->elf.splt != NULL)
3238 {
3239 if (eh->plt_got.offset != (bfd_vma) -1)
3240 {
3241 /* Use the GOT PLT. */
3242 resolved_plt = htab->plt_got;
3243 plt_offset = eh->plt_got.offset;
3244 }
3245 else if (htab->plt_second != NULL)
3246 {
3247 resolved_plt = htab->plt_second;
3248 plt_offset = eh->plt_second.offset;
3249 }
3250 else
3251 {
3252 resolved_plt = htab->elf.splt;
3253 plt_offset = h->plt.offset;
3254 }
3255
3256 relocation = (resolved_plt->output_section->vma
3257 + resolved_plt->output_offset
3258 + plt_offset);
3259 unresolved_reloc = false;
3260 }
3261
3262 relocation -= htab->elf.sgotplt->output_section->vma
3263 + htab->elf.sgotplt->output_offset;
3264 break;
3265
3266 case R_X86_64_PLT32:
3267 /* Relocation is to the entry for this symbol in the
3268 procedure linkage table. */
3269
3270 /* Resolve a PLT32 reloc against a local symbol directly,
3271 without using the procedure linkage table. */
3272 if (h == NULL)
3273 break;
3274
3275 if ((h->plt.offset == (bfd_vma) -1
3276 && eh->plt_got.offset == (bfd_vma) -1)
3277 || htab->elf.splt == NULL)
3278 {
3279 /* We didn't make a PLT entry for this symbol. This
3280 happens when statically linking PIC code, or when
3281 using -Bsymbolic. */
3282 break;
3283 }
3284
3285 use_plt:
3286 if (h->plt.offset != (bfd_vma) -1)
3287 {
3288 if (htab->plt_second != NULL)
3289 {
3290 resolved_plt = htab->plt_second;
3291 plt_offset = eh->plt_second.offset;
3292 }
3293 else
3294 {
3295 resolved_plt = htab->elf.splt;
3296 plt_offset = h->plt.offset;
3297 }
3298 }
3299 else
3300 {
3301 /* Use the GOT PLT. */
3302 resolved_plt = htab->plt_got;
3303 plt_offset = eh->plt_got.offset;
3304 }
3305
3306 relocation = (resolved_plt->output_section->vma
3307 + resolved_plt->output_offset
3308 + plt_offset);
3309 unresolved_reloc = false;
3310 break;
3311
3312 case R_X86_64_SIZE32:
3313 case R_X86_64_SIZE64:
3314 /* Set to symbol size. */
3315 relocation = st_size;
3316 goto direct;
3317
3318 case R_X86_64_PC8:
3319 case R_X86_64_PC16:
3320 case R_X86_64_PC32:
3321 /* Don't complain about -fPIC if the symbol is undefined when
3322 building executable unless it is unresolved weak symbol,
3323 references a dynamic definition in PIE or -z nocopyreloc
3324 is used. */
3325 no_copyreloc_p
3326 = (info->nocopyreloc
3327 || (h != NULL
3328 && !h->root.linker_def
3329 && !h->root.ldscript_def
3330 && eh->def_protected));
3331
3332 if ((input_section->flags & SEC_ALLOC) != 0
3333 && (input_section->flags & SEC_READONLY) != 0
3334 && h != NULL
3335 && ((bfd_link_executable (info)
3336 && ((h->root.type == bfd_link_hash_undefweak
3337 && (eh == NULL
3338 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info,
3339 eh)))
3340 || (bfd_link_pie (info)
3341 && !SYMBOL_DEFINED_NON_SHARED_P (h)
3342 && h->def_dynamic)
3343 || (no_copyreloc_p
3344 && h->def_dynamic
3345 && !(h->root.u.def.section->flags & SEC_CODE))))
3346 || (bfd_link_pie (info)
3347 && h->root.type == bfd_link_hash_undefweak)
3348 || bfd_link_dll (info)))
3349 {
3350 bool fail = false;
3351 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
3352 {
3353 /* Symbol is referenced locally. Make sure it is
3354 defined locally. */
3355 fail = !SYMBOL_DEFINED_NON_SHARED_P (h);
3356 }
3357 else if (bfd_link_pie (info))
3358 {
3359 /* We can only use PC-relative relocations in PIE
3360 from non-code sections. */
3361 if (h->root.type == bfd_link_hash_undefweak
3362 || (h->type == STT_FUNC
3363 && (sec->flags & SEC_CODE) != 0))
3364 fail = true;
3365 }
3366 else if (no_copyreloc_p || bfd_link_dll (info))
3367 {
3368 /* Symbol doesn't need copy reloc and isn't
3369 referenced locally. Don't allow PC-relative
3370 relocations against default and protected
3371 symbols since address of protected function
3372 and location of protected data may not be in
3373 the shared object. */
3374 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT
3375 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED);
3376 }
3377
3378 if (fail)
3379 return elf_x86_64_need_pic (info, input_bfd, input_section,
3380 h, NULL, NULL, howto);
3381 }
3382 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE
3383 as function address. */
3384 else if (h != NULL
3385 && (input_section->flags & SEC_CODE) == 0
3386 && bfd_link_pie (info)
3387 && h->type == STT_FUNC
3388 && !h->def_regular
3389 && h->def_dynamic)
3390 goto use_plt;
3391 /* Fall through. */
3392
3393 case R_X86_64_8:
3394 case R_X86_64_16:
3395 case R_X86_64_32:
3396 case R_X86_64_PC64:
3397 case R_X86_64_64:
3398 /* FIXME: The ABI says the linker should make sure the value is
3399 the same when it's zeroextended to 64 bit. */
3400
3401 direct:
3402 if ((input_section->flags & SEC_ALLOC) == 0)
3403 break;
3404
3405 need_copy_reloc_in_pie = (bfd_link_pie (info)
3406 && h != NULL
3407 && (h->needs_copy
3408 || eh->needs_copy
3409 || (h->root.type
3410 == bfd_link_hash_undefined))
3411 && (X86_PCREL_TYPE_P (true, r_type)
3412 || X86_SIZE_TYPE_P (true,
3413 r_type)));
3414
3415 if (GENERATE_DYNAMIC_RELOCATION_P (true, info, eh, r_type, sec,
3416 need_copy_reloc_in_pie,
3417 resolved_to_zero, false))
3418 {
3419 Elf_Internal_Rela outrel;
3420 bool skip, relocate;
3421 bool generate_dynamic_reloc = true;
3422 asection *sreloc;
3423 const char *relative_reloc_name = NULL;
3424
3425 /* When generating a shared object, these relocations
3426 are copied into the output file to be resolved at run
3427 time. */
3428 skip = false;
3429 relocate = false;
3430
3431 outrel.r_offset =
3432 _bfd_elf_section_offset (output_bfd, info, input_section,
3433 rel->r_offset);
3434 if (outrel.r_offset == (bfd_vma) -1)
3435 skip = true;
3436 else if (outrel.r_offset == (bfd_vma) -2)
3437 skip = true, relocate = true;
3438
3439 outrel.r_offset += (input_section->output_section->vma
3440 + input_section->output_offset);
3441
3442 if (skip)
3443 memset (&outrel, 0, sizeof outrel);
3444
3445 else if (COPY_INPUT_RELOC_P (true, info, h, r_type))
3446 {
3447 outrel.r_info = htab->r_info (h->dynindx, r_type);
3448 outrel.r_addend = rel->r_addend;
3449 }
3450 else
3451 {
3452 /* This symbol is local, or marked to become local.
3453 When relocation overflow check is disabled, we
3454 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */
3455 if (r_type == htab->pointer_r_type
3456 || (r_type == R_X86_64_32
3457 && htab->params->no_reloc_overflow_check))
3458 {
3459 relocate = true;
3460 /* NB: Don't generate relative relocation here if
3461 it has been generated by DT_RELR. */
3462 if (info->enable_dt_relr)
3463 generate_dynamic_reloc = false;
3464 else
3465 {
3466 outrel.r_info =
3467 htab->r_info (0, R_X86_64_RELATIVE);
3468 outrel.r_addend = relocation + rel->r_addend;
3469 relative_reloc_name = "R_X86_64_RELATIVE";
3470 }
3471 }
3472 else if (r_type == R_X86_64_64
3473 && !ABI_64_P (output_bfd))
3474 {
3475 relocate = true;
3476 outrel.r_info = htab->r_info (0,
3477 R_X86_64_RELATIVE64);
3478 outrel.r_addend = relocation + rel->r_addend;
3479 relative_reloc_name = "R_X86_64_RELATIVE64";
3480 /* Check addend overflow. */
3481 if ((outrel.r_addend & 0x80000000)
3482 != (rel->r_addend & 0x80000000))
3483 {
3484 const char *name;
3485 int addend = rel->r_addend;
3486 if (h && h->root.root.string)
3487 name = h->root.root.string;
3488 else
3489 name = bfd_elf_sym_name (input_bfd, symtab_hdr,
3490 sym, NULL);
3491 _bfd_error_handler
3492 /* xgettext:c-format */
3493 (_("%pB: addend %s%#x in relocation %s against "
3494 "symbol `%s' at %#" PRIx64
3495 " in section `%pA' is out of range"),
3496 input_bfd, addend < 0 ? "-" : "", addend,
3497 howto->name, name, (uint64_t) rel->r_offset,
3498 input_section);
3499 bfd_set_error (bfd_error_bad_value);
3500 return false;
3501 }
3502 }
3503 else
3504 {
3505 long sindx;
3506
3507 if (bfd_is_abs_section (sec))
3508 sindx = 0;
3509 else if (sec == NULL || sec->owner == NULL)
3510 {
3511 bfd_set_error (bfd_error_bad_value);
3512 return false;
3513 }
3514 else
3515 {
3516 asection *osec;
3517
3518 /* We are turning this relocation into one
3519 against a section symbol. It would be
3520 proper to subtract the symbol's value,
3521 osec->vma, from the emitted reloc addend,
3522 but ld.so expects buggy relocs. */
3523 osec = sec->output_section;
3524 sindx = elf_section_data (osec)->dynindx;
3525 if (sindx == 0)
3526 {
3527 asection *oi = htab->elf.text_index_section;
3528 sindx = elf_section_data (oi)->dynindx;
3529 }
3530 BFD_ASSERT (sindx != 0);
3531 }
3532
3533 outrel.r_info = htab->r_info (sindx, r_type);
3534 outrel.r_addend = relocation + rel->r_addend;
3535 }
3536 }
3537
3538 if (generate_dynamic_reloc)
3539 {
3540 sreloc = elf_section_data (input_section)->sreloc;
3541
3542 if (sreloc == NULL || sreloc->contents == NULL)
3543 {
3544 r = bfd_reloc_notsupported;
3545 goto check_relocation_error;
3546 }
3547
3548 if (relative_reloc_name
3549 && htab->params->report_relative_reloc)
3550 _bfd_x86_elf_link_report_relative_reloc
3551 (info, input_section, h, sym,
3552 relative_reloc_name, &outrel);
3553
3554 elf_append_rela (output_bfd, sreloc, &outrel);
3555 }
3556
3557 /* If this reloc is against an external symbol, we do
3558 not want to fiddle with the addend. Otherwise, we
3559 need to include the symbol value so that it becomes
3560 an addend for the dynamic reloc. */
3561 if (! relocate)
3562 continue;
3563 }
3564
3565 break;
3566
3567 case R_X86_64_TLSGD:
3568 case R_X86_64_GOTPC32_TLSDESC:
3569 case R_X86_64_CODE_4_GOTPC32_TLSDESC:
3570 case R_X86_64_TLSDESC_CALL:
3571 case R_X86_64_GOTTPOFF:
3572 case R_X86_64_CODE_4_GOTTPOFF:
3573 tls_type = GOT_UNKNOWN;
3574 if (h == NULL && local_got_offsets)
3575 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx];
3576 else if (h != NULL)
3577 tls_type = elf_x86_hash_entry (h)->tls_type;
3578
3579 r_type_tls = r_type;
3580 if (! elf_x86_64_tls_transition (info, input_bfd,
3581 input_section, contents,
3582 symtab_hdr, sym_hashes,
3583 &r_type_tls, tls_type, rel,
3584 relend, h, r_symndx, true))
3585 return false;
3586
3587 if (r_type_tls == R_X86_64_TPOFF32)
3588 {
3589 bfd_vma roff = rel->r_offset;
3590
3591 if (roff >= input_section->size)
3592 goto corrupt_input;
3593
3594 BFD_ASSERT (! unresolved_reloc);
3595
3596 if (r_type == R_X86_64_TLSGD)
3597 {
3598 /* GD->LE transition. For 64bit, change
3599 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3600 .word 0x6666; rex64; call __tls_get_addr@PLT
3601 or
3602 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
3603 .byte 0x66; rex64
3604 call *__tls_get_addr@GOTPCREL(%rip)
3605 which may be converted to
3606 addr32 call __tls_get_addr
3607 into:
3608 movq %fs:0, %rax
3609 leaq foo@tpoff(%rax), %rax
3610 For 32bit, change
3611 leaq foo@tlsgd(%rip), %rdi
3612 .word 0x6666; rex64; call __tls_get_addr@PLT
3613 or
3614 leaq foo@tlsgd(%rip), %rdi
3615 .byte 0x66; rex64
3616 call *__tls_get_addr@GOTPCREL(%rip)
3617 which may be converted to
3618 addr32 call __tls_get_addr
3619 into:
3620 movl %fs:0, %eax
3621 leaq foo@tpoff(%rax), %rax
3622 For largepic, change:
3623 leaq foo@tlsgd(%rip), %rdi
3624 movabsq $__tls_get_addr@pltoff, %rax
3625 addq %r15, %rax
3626 call *%rax
3627 into:
3628 movq %fs:0, %rax
3629 leaq foo@tpoff(%rax), %rax
3630 nopw 0x0(%rax,%rax,1) */
3631 int largepic = 0;
3632 if (ABI_64_P (output_bfd))
3633 {
3634 if (roff + 5 >= input_section->size)
3635 goto corrupt_input;
3636 if (contents[roff + 5] == 0xb8)
3637 {
3638 if (roff < 3
3639 || (roff - 3 + 22) > input_section->size)
3640 {
3641 corrupt_input:
3642 info->callbacks->einfo
3643 (_("%F%P: corrupt input: %pB\n"),
3644 input_bfd);
3645 return false;
3646 }
3647 memcpy (contents + roff - 3,
3648 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80"
3649 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
3650 largepic = 1;
3651 }
3652 else
3653 {
3654 if (roff < 4
3655 || (roff - 4 + 16) > input_section->size)
3656 goto corrupt_input;
3657 memcpy (contents + roff - 4,
3658 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3659 16);
3660 }
3661 }
3662 else
3663 {
3664 if (roff < 3
3665 || (roff - 3 + 15) > input_section->size)
3666 goto corrupt_input;
3667 memcpy (contents + roff - 3,
3668 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0",
3669 15);
3670 }
3671
3672 if (roff + 8 + largepic >= input_section->size)
3673 goto corrupt_input;
3674
3675 bfd_put_32 (output_bfd,
3676 elf_x86_64_tpoff (info, relocation),
3677 contents + roff + 8 + largepic);
3678 /* Skip R_X86_64_PC32, R_X86_64_PLT32,
3679 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */
3680 rel++;
3681 wrel++;
3682 continue;
3683 }
3684 else if (r_type == R_X86_64_GOTPC32_TLSDESC)
3685 {
3686 /* GDesc -> LE transition.
3687 It's originally something like:
3688 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
3689 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
3690
3691 Change it to:
3692 movq $x@tpoff, %rax <--- LP64 mode.
3693 rex movl $x@tpoff, %eax <--- X32 mode.
3694 */
3695
3696 unsigned int val, type;
3697
3698 if (roff < 3)
3699 goto corrupt_input;
3700 type = bfd_get_8 (input_bfd, contents + roff - 3);
3701 val = bfd_get_8 (input_bfd, contents + roff - 1);
3702 bfd_put_8 (output_bfd,
3703 (type & 0x48) | ((type >> 2) & 1),
3704 contents + roff - 3);
3705 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3706 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3707 contents + roff - 1);
3708 bfd_put_32 (output_bfd,
3709 elf_x86_64_tpoff (info, relocation),
3710 contents + roff);
3711 continue;
3712 }
3713 else if (r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC)
3714 {
3715 /* GDesc -> LE transition.
3716 It's originally something like:
3717 lea x@tlsdesc(%rip), %reg
3718
3719 Change it to:
3720 mov $x@tpoff, %reg
3721 where reg is one of r16 to r31. */
3722
3723 unsigned int val, rex2;
3724 unsigned int rex2_mask = REX_R | REX_R << 4;
3725
3726 if (roff < 4)
3727 goto corrupt_input;
3728 rex2 = bfd_get_8 (input_bfd, contents + roff - 3);
3729 val = bfd_get_8 (input_bfd, contents + roff - 1);
3730 /* Move the R bits to the B bits in REX2 payload
3731 byte. */
3732 bfd_put_8 (output_bfd,
3733 ((rex2 & ~rex2_mask)
3734 | (rex2 & rex2_mask) >> 2),
3735 contents + roff - 3);
3736 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2);
3737 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7),
3738 contents + roff - 1);
3739 bfd_put_32 (output_bfd,
3740 elf_x86_64_tpoff (info, relocation),
3741 contents + roff);
3742 continue;
3743 }
3744 else if (r_type == R_X86_64_TLSDESC_CALL)
3745 {
3746 /* GDesc -> LE transition.
3747 It's originally:
3748 call *(%rax) <--- LP64 mode.
3749 call *(%eax) <--- X32 mode.
3750 Turn it into:
3751 xchg %ax,%ax <-- LP64 mode.
3752 nopl (%rax) <-- X32 mode.
3753 */
3754 unsigned int prefix = 0;
3755 if (!ABI_64_P (input_bfd))
3756 {
3757 /* Check for call *x@tlsdesc(%eax). */
3758 if (contents[roff] == 0x67)
3759 prefix = 1;
3760 }
3761 if (prefix)
3762 {
3763 if (roff + 2 >= input_section->size)
3764 goto corrupt_input;
3765
3766 bfd_put_8 (output_bfd, 0x0f, contents + roff);
3767 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
3768 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
3769 }
3770 else
3771 {
3772 if (roff + 1 >= input_section->size)
3773 goto corrupt_input;
3774
3775 bfd_put_8 (output_bfd, 0x66, contents + roff);
3776 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
3777 }
3778 continue;
3779 }
3780 else if (r_type == R_X86_64_GOTTPOFF)
3781 {
3782 /* IE->LE transition:
3783 For 64bit, originally it can be one of:
3784 movq foo@gottpoff(%rip), %reg
3785 addq foo@gottpoff(%rip), %reg
3786 We change it into:
3787 movq $foo, %reg
3788 leaq foo(%reg), %reg
3789 addq $foo, %reg.
3790 For 32bit, originally it can be one of:
3791 movq foo@gottpoff(%rip), %reg
3792 addl foo@gottpoff(%rip), %reg
3793 We change it into:
3794 movq $foo, %reg
3795 leal foo(%reg), %reg
3796 addl $foo, %reg. */
3797
3798 unsigned int val, type, reg;
3799
3800 if (roff >= 3)
3801 val = bfd_get_8 (input_bfd, contents + roff - 3);
3802 else
3803 {
3804 if (roff < 2)
3805 goto corrupt_input;
3806 val = 0;
3807 }
3808 type = bfd_get_8 (input_bfd, contents + roff - 2);
3809 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3810 reg >>= 3;
3811 if (type == 0x8b)
3812 {
3813 /* movq */
3814 if (val == 0x4c)
3815 {
3816 if (roff < 3)
3817 goto corrupt_input;
3818 bfd_put_8 (output_bfd, 0x49,
3819 contents + roff - 3);
3820 }
3821 else if (!ABI_64_P (output_bfd) && val == 0x44)
3822 {
3823 if (roff < 3)
3824 goto corrupt_input;
3825 bfd_put_8 (output_bfd, 0x41,
3826 contents + roff - 3);
3827 }
3828 bfd_put_8 (output_bfd, 0xc7,
3829 contents + roff - 2);
3830 bfd_put_8 (output_bfd, 0xc0 | reg,
3831 contents + roff - 1);
3832 }
3833 else if (reg == 4)
3834 {
3835 /* addq/addl -> addq/addl - addressing with %rsp/%r12
3836 is special */
3837 if (val == 0x4c)
3838 {
3839 if (roff < 3)
3840 goto corrupt_input;
3841 bfd_put_8 (output_bfd, 0x49,
3842 contents + roff - 3);
3843 }
3844 else if (!ABI_64_P (output_bfd) && val == 0x44)
3845 {
3846 if (roff < 3)
3847 goto corrupt_input;
3848 bfd_put_8 (output_bfd, 0x41,
3849 contents + roff - 3);
3850 }
3851 bfd_put_8 (output_bfd, 0x81,
3852 contents + roff - 2);
3853 bfd_put_8 (output_bfd, 0xc0 | reg,
3854 contents + roff - 1);
3855 }
3856 else
3857 {
3858 /* addq/addl -> leaq/leal */
3859 if (val == 0x4c)
3860 {
3861 if (roff < 3)
3862 goto corrupt_input;
3863 bfd_put_8 (output_bfd, 0x4d,
3864 contents + roff - 3);
3865 }
3866 else if (!ABI_64_P (output_bfd) && val == 0x44)
3867 {
3868 if (roff < 3)
3869 goto corrupt_input;
3870 bfd_put_8 (output_bfd, 0x45,
3871 contents + roff - 3);
3872 }
3873 bfd_put_8 (output_bfd, 0x8d,
3874 contents + roff - 2);
3875 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3),
3876 contents + roff - 1);
3877 }
3878 bfd_put_32 (output_bfd,
3879 elf_x86_64_tpoff (info, relocation),
3880 contents + roff);
3881 continue;
3882 }
3883 else if (r_type == R_X86_64_CODE_4_GOTTPOFF)
3884 {
3885 /* IE->LE transition:
3886 Originally it can be one of:
3887 mov foo@gottpoff(%rip), %reg
3888 add foo@gottpoff(%rip), %reg
3889 We change it into:
3890 mov $foo@tpoff, %reg
3891 add $foo@tpoff, %reg
3892 where reg is one of r16 to r31. */
3893
3894 unsigned int rex2, type, reg;
3895 unsigned int rex2_mask = REX_R | REX_R << 4;
3896
3897 if (roff < 4)
3898 goto corrupt_input;
3899
3900 rex2 = bfd_get_8 (input_bfd, contents + roff - 3);
3901 type = bfd_get_8 (input_bfd, contents + roff - 2);
3902 reg = bfd_get_8 (input_bfd, contents + roff - 1);
3903 reg >>= 3;
3904 /* Move the R bits to the B bits in REX2 payload
3905 byte. */
3906 if (type == 0x8b)
3907 type = 0xc7;
3908 else
3909 type = 0x81;
3910 bfd_put_8 (output_bfd,
3911 ((rex2 & ~rex2_mask)
3912 | (rex2 & rex2_mask) >> 2),
3913 contents + roff - 3);
3914 bfd_put_8 (output_bfd, type,
3915 contents + roff - 2);
3916 bfd_put_8 (output_bfd, 0xc0 | reg,
3917 contents + roff - 1);
3918 bfd_put_32 (output_bfd,
3919 elf_x86_64_tpoff (info, relocation),
3920 contents + roff);
3921 continue;
3922 }
3923 else
3924 BFD_ASSERT (false);
3925 }
3926
3927 if (htab->elf.sgot == NULL)
3928 abort ();
3929
3930 if (h != NULL)
3931 {
3932 off = h->got.offset;
3933 offplt = elf_x86_hash_entry (h)->tlsdesc_got;
3934 }
3935 else
3936 {
3937 if (local_got_offsets == NULL)
3938 abort ();
3939
3940 off = local_got_offsets[r_symndx];
3941 offplt = local_tlsdesc_gotents[r_symndx];
3942 }
3943
3944 if ((off & 1) != 0)
3945 off &= ~1;
3946 else
3947 {
3948 Elf_Internal_Rela outrel;
3949 int dr_type, indx;
3950 asection *sreloc;
3951
3952 if (htab->elf.srelgot == NULL)
3953 abort ();
3954
3955 indx = h && h->dynindx != -1 ? h->dynindx : 0;
3956
3957 if (GOT_TLS_GDESC_P (tls_type))
3958 {
3959 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC);
3960 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt
3961 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size);
3962 outrel.r_offset = (htab->elf.sgotplt->output_section->vma
3963 + htab->elf.sgotplt->output_offset
3964 + offplt
3965 + htab->sgotplt_jump_table_size);
3966 sreloc = htab->elf.srelplt;
3967 if (indx == 0)
3968 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3969 else
3970 outrel.r_addend = 0;
3971 elf_append_rela (output_bfd, sreloc, &outrel);
3972 }
3973
3974 sreloc = htab->elf.srelgot;
3975
3976 outrel.r_offset = (htab->elf.sgot->output_section->vma
3977 + htab->elf.sgot->output_offset + off);
3978
3979 if (GOT_TLS_GD_P (tls_type))
3980 dr_type = R_X86_64_DTPMOD64;
3981 else if (GOT_TLS_GDESC_P (tls_type))
3982 goto dr_done;
3983 else
3984 dr_type = R_X86_64_TPOFF64;
3985
3986 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off);
3987 outrel.r_addend = 0;
3988 if ((dr_type == R_X86_64_TPOFF64
3989 || dr_type == R_X86_64_TLSDESC) && indx == 0)
3990 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info);
3991 outrel.r_info = htab->r_info (indx, dr_type);
3992
3993 elf_append_rela (output_bfd, sreloc, &outrel);
3994
3995 if (GOT_TLS_GD_P (tls_type))
3996 {
3997 if (indx == 0)
3998 {
3999 BFD_ASSERT (! unresolved_reloc);
4000 bfd_put_64 (output_bfd,
4001 relocation - _bfd_x86_elf_dtpoff_base (info),
4002 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4003 }
4004 else
4005 {
4006 bfd_put_64 (output_bfd, 0,
4007 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4008 outrel.r_info = htab->r_info (indx,
4009 R_X86_64_DTPOFF64);
4010 outrel.r_offset += GOT_ENTRY_SIZE;
4011 elf_append_rela (output_bfd, sreloc,
4012 &outrel);
4013 }
4014 }
4015
4016 dr_done:
4017 if (h != NULL)
4018 h->got.offset |= 1;
4019 else
4020 local_got_offsets[r_symndx] |= 1;
4021 }
4022
4023 if (off >= (bfd_vma) -2
4024 && ! GOT_TLS_GDESC_P (tls_type))
4025 abort ();
4026 if (r_type_tls == r_type)
4027 {
4028 if (r_type == R_X86_64_GOTPC32_TLSDESC
4029 || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC
4030 || r_type == R_X86_64_TLSDESC_CALL)
4031 relocation = htab->elf.sgotplt->output_section->vma
4032 + htab->elf.sgotplt->output_offset
4033 + offplt + htab->sgotplt_jump_table_size;
4034 else
4035 relocation = htab->elf.sgot->output_section->vma
4036 + htab->elf.sgot->output_offset + off;
4037 unresolved_reloc = false;
4038 }
4039 else
4040 {
4041 bfd_vma roff = rel->r_offset;
4042
4043 if (r_type == R_X86_64_TLSGD)
4044 {
4045 /* GD->IE transition. For 64bit, change
4046 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4047 .word 0x6666; rex64; call __tls_get_addr@PLT
4048 or
4049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi
4050 .byte 0x66; rex64
4051 call *__tls_get_addr@GOTPCREL(%rip
4052 which may be converted to
4053 addr32 call __tls_get_addr
4054 into:
4055 movq %fs:0, %rax
4056 addq foo@gottpoff(%rip), %rax
4057 For 32bit, change
4058 leaq foo@tlsgd(%rip), %rdi
4059 .word 0x6666; rex64; call __tls_get_addr@PLT
4060 or
4061 leaq foo@tlsgd(%rip), %rdi
4062 .byte 0x66; rex64;
4063 call *__tls_get_addr@GOTPCREL(%rip)
4064 which may be converted to
4065 addr32 call __tls_get_addr
4066 into:
4067 movl %fs:0, %eax
4068 addq foo@gottpoff(%rip), %rax
4069 For largepic, change:
4070 leaq foo@tlsgd(%rip), %rdi
4071 movabsq $__tls_get_addr@pltoff, %rax
4072 addq %r15, %rax
4073 call *%rax
4074 into:
4075 movq %fs:0, %rax
4076 addq foo@gottpoff(%rax), %rax
4077 nopw 0x0(%rax,%rax,1) */
4078 int largepic = 0;
4079 if (ABI_64_P (output_bfd))
4080 {
4081 if (contents[roff + 5] == 0xb8)
4082 {
4083 if (roff < 3
4084 || (roff - 3 + 22) > input_section->size)
4085 goto corrupt_input;
4086 memcpy (contents + roff - 3,
4087 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05"
4088 "\0\0\0\0\x66\x0f\x1f\x44\0", 22);
4089 largepic = 1;
4090 }
4091 else
4092 {
4093 if (roff < 4
4094 || (roff - 4 + 16) > input_section->size)
4095 goto corrupt_input;
4096 memcpy (contents + roff - 4,
4097 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4098 16);
4099 }
4100 }
4101 else
4102 {
4103 if (roff < 3
4104 || (roff - 3 + 15) > input_section->size)
4105 goto corrupt_input;
4106 memcpy (contents + roff - 3,
4107 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0",
4108 15);
4109 }
4110
4111 relocation = (htab->elf.sgot->output_section->vma
4112 + htab->elf.sgot->output_offset + off
4113 - roff
4114 - largepic
4115 - input_section->output_section->vma
4116 - input_section->output_offset
4117 - 12);
4118 bfd_put_32 (output_bfd, relocation,
4119 contents + roff + 8 + largepic);
4120 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */
4121 rel++;
4122 wrel++;
4123 continue;
4124 }
4125 else if (r_type == R_X86_64_GOTPC32_TLSDESC
4126 || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC)
4127 {
4128 /* GDesc -> IE transition.
4129 It's originally something like:
4130 leaq x@tlsdesc(%rip), %rax <--- LP64 mode.
4131 rex leal x@tlsdesc(%rip), %eax <--- X32 mode.
4132
4133 Change it to:
4134 # before xchg %ax,%ax in LP64 mode.
4135 movq x@gottpoff(%rip), %rax
4136 # before nopl (%rax) in X32 mode.
4137 rex movl x@gottpoff(%rip), %eax
4138 */
4139
4140 /* Now modify the instruction as appropriate. To
4141 turn a lea into a mov in the form we use it, it
4142 suffices to change the second byte from 0x8d to
4143 0x8b. */
4144 if (roff < 2)
4145 goto corrupt_input;
4146 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2);
4147
4148 bfd_put_32 (output_bfd,
4149 htab->elf.sgot->output_section->vma
4150 + htab->elf.sgot->output_offset + off
4151 - rel->r_offset
4152 - input_section->output_section->vma
4153 - input_section->output_offset
4154 - 4,
4155 contents + roff);
4156 continue;
4157 }
4158 else if (r_type == R_X86_64_TLSDESC_CALL)
4159 {
4160 /* GDesc -> IE transition.
4161 It's originally:
4162 call *(%rax) <--- LP64 mode.
4163 call *(%eax) <--- X32 mode.
4164
4165 Change it to:
4166 xchg %ax, %ax <-- LP64 mode.
4167 nopl (%rax) <-- X32 mode.
4168 */
4169
4170 unsigned int prefix = 0;
4171 if (!ABI_64_P (input_bfd))
4172 {
4173 /* Check for call *x@tlsdesc(%eax). */
4174 if (contents[roff] == 0x67)
4175 prefix = 1;
4176 }
4177 if (prefix)
4178 {
4179 bfd_put_8 (output_bfd, 0x0f, contents + roff);
4180 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1);
4181 bfd_put_8 (output_bfd, 0x00, contents + roff + 2);
4182 }
4183 else
4184 {
4185 bfd_put_8 (output_bfd, 0x66, contents + roff);
4186 bfd_put_8 (output_bfd, 0x90, contents + roff + 1);
4187 }
4188 continue;
4189 }
4190 else
4191 BFD_ASSERT (false);
4192 }
4193 break;
4194
4195 case R_X86_64_TLSLD:
4196 if (! elf_x86_64_tls_transition (info, input_bfd,
4197 input_section, contents,
4198 symtab_hdr, sym_hashes,
4199 &r_type, GOT_UNKNOWN, rel,
4200 relend, h, r_symndx, true))
4201 return false;
4202
4203 if (r_type != R_X86_64_TLSLD)
4204 {
4205 /* LD->LE transition:
4206 leaq foo@tlsld(%rip), %rdi
4207 call __tls_get_addr@PLT
4208 For 64bit, we change it into:
4209 .word 0x6666; .byte 0x66; movq %fs:0, %rax
4210 For 32bit, we change it into:
4211 nopl 0x0(%rax); movl %fs:0, %eax
4212 Or
4213 leaq foo@tlsld(%rip), %rdi;
4214 call *__tls_get_addr@GOTPCREL(%rip)
4215 which may be converted to
4216 addr32 call __tls_get_addr
4217 For 64bit, we change it into:
4218 .word 0x6666; .word 0x6666; movq %fs:0, %rax
4219 For 32bit, we change it into:
4220 nopw 0x0(%rax); movl %fs:0, %eax
4221 For largepic, change:
4222 leaq foo@tlsgd(%rip), %rdi
4223 movabsq $__tls_get_addr@pltoff, %rax
4224 addq %rbx, %rax
4225 call *%rax
4226 into
4227 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1)
4228 movq %fs:0, %eax */
4229
4230 BFD_ASSERT (r_type == R_X86_64_TPOFF32);
4231 if (ABI_64_P (output_bfd))
4232 {
4233 if ((rel->r_offset + 5) >= input_section->size)
4234 goto corrupt_input;
4235 if (contents[rel->r_offset + 5] == 0xb8)
4236 {
4237 if (rel->r_offset < 3
4238 || (rel->r_offset - 3 + 22) > input_section->size)
4239 goto corrupt_input;
4240 memcpy (contents + rel->r_offset - 3,
4241 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0"
4242 "\x64\x48\x8b\x04\x25\0\0\0", 22);
4243 }
4244 else if (contents[rel->r_offset + 4] == 0xff
4245 || contents[rel->r_offset + 4] == 0x67)
4246 {
4247 if (rel->r_offset < 3
4248 || (rel->r_offset - 3 + 13) > input_section->size)
4249 goto corrupt_input;
4250 memcpy (contents + rel->r_offset - 3,
4251 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0",
4252 13);
4253
4254 }
4255 else
4256 {
4257 if (rel->r_offset < 3
4258 || (rel->r_offset - 3 + 12) > input_section->size)
4259 goto corrupt_input;
4260 memcpy (contents + rel->r_offset - 3,
4261 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12);
4262 }
4263 }
4264 else
4265 {
4266 if ((rel->r_offset + 4) >= input_section->size)
4267 goto corrupt_input;
4268 if (contents[rel->r_offset + 4] == 0xff)
4269 {
4270 if (rel->r_offset < 3
4271 || (rel->r_offset - 3 + 13) > input_section->size)
4272 goto corrupt_input;
4273 memcpy (contents + rel->r_offset - 3,
4274 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0",
4275 13);
4276 }
4277 else
4278 {
4279 if (rel->r_offset < 3
4280 || (rel->r_offset - 3 + 12) > input_section->size)
4281 goto corrupt_input;
4282 memcpy (contents + rel->r_offset - 3,
4283 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12);
4284 }
4285 }
4286 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX
4287 and R_X86_64_PLTOFF64. */
4288 rel++;
4289 wrel++;
4290 continue;
4291 }
4292
4293 if (htab->elf.sgot == NULL)
4294 abort ();
4295
4296 off = htab->tls_ld_or_ldm_got.offset;
4297 if (off & 1)
4298 off &= ~1;
4299 else
4300 {
4301 Elf_Internal_Rela outrel;
4302
4303 if (htab->elf.srelgot == NULL)
4304 abort ();
4305
4306 outrel.r_offset = (htab->elf.sgot->output_section->vma
4307 + htab->elf.sgot->output_offset + off);
4308
4309 bfd_put_64 (output_bfd, 0,
4310 htab->elf.sgot->contents + off);
4311 bfd_put_64 (output_bfd, 0,
4312 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE);
4313 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64);
4314 outrel.r_addend = 0;
4315 elf_append_rela (output_bfd, htab->elf.srelgot,
4316 &outrel);
4317 htab->tls_ld_or_ldm_got.offset |= 1;
4318 }
4319 relocation = htab->elf.sgot->output_section->vma
4320 + htab->elf.sgot->output_offset + off;
4321 unresolved_reloc = false;
4322 break;
4323
4324 case R_X86_64_DTPOFF32:
4325 if (!bfd_link_executable (info)
4326 || (input_section->flags & SEC_CODE) == 0)
4327 relocation -= _bfd_x86_elf_dtpoff_base (info);
4328 else
4329 relocation = elf_x86_64_tpoff (info, relocation);
4330 break;
4331
4332 case R_X86_64_TPOFF32:
4333 case R_X86_64_TPOFF64:
4334 BFD_ASSERT (bfd_link_executable (info));
4335 relocation = elf_x86_64_tpoff (info, relocation);
4336 break;
4337
4338 case R_X86_64_DTPOFF64:
4339 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0);
4340 relocation -= _bfd_x86_elf_dtpoff_base (info);
4341 break;
4342
4343 default:
4344 break;
4345 }
4346
4347 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections
4348 because such sections are not SEC_ALLOC and thus ld.so will
4349 not process them. */
4350 if (unresolved_reloc
4351 && !((input_section->flags & SEC_DEBUGGING) != 0
4352 && h->def_dynamic)
4353 && _bfd_elf_section_offset (output_bfd, info, input_section,
4354 rel->r_offset) != (bfd_vma) -1)
4355 {
4356 switch (r_type)
4357 {
4358 case R_X86_64_32S:
4359 sec = h->root.u.def.section;
4360 if ((info->nocopyreloc || eh->def_protected)
4361 && !(h->root.u.def.section->flags & SEC_CODE))
4362 return elf_x86_64_need_pic (info, input_bfd, input_section,
4363 h, NULL, NULL, howto);
4364 /* Fall through. */
4365
4366 default:
4367 _bfd_error_handler
4368 /* xgettext:c-format */
4369 (_("%pB(%pA+%#" PRIx64 "): "
4370 "unresolvable %s relocation against symbol `%s'"),
4371 input_bfd,
4372 input_section,
4373 (uint64_t) rel->r_offset,
4374 howto->name,
4375 h->root.root.string);
4376 return false;
4377 }
4378 }
4379
4380 do_relocation:
4381 r = _bfd_final_link_relocate (howto, input_bfd, input_section,
4382 contents, rel->r_offset,
4383 relocation, rel->r_addend);
4384
4385 check_relocation_error:
4386 if (r != bfd_reloc_ok)
4387 {
4388 const char *name;
4389
4390 if (h != NULL)
4391 name = h->root.root.string;
4392 else
4393 {
4394 name = bfd_elf_string_from_elf_section (input_bfd,
4395 symtab_hdr->sh_link,
4396 sym->st_name);
4397 if (name == NULL)
4398 return false;
4399 if (*name == '\0')
4400 name = bfd_section_name (sec);
4401 }
4402
4403 if (r == bfd_reloc_overflow)
4404 {
4405 if (converted_reloc)
4406 {
4407 info->callbacks->einfo
4408 ("%X%H:", input_bfd, input_section, rel->r_offset);
4409 info->callbacks->einfo
4410 (_(" failed to convert GOTPCREL relocation against "
4411 "'%s'; relink with --no-relax\n"),
4412 name);
4413 status = false;
4414 continue;
4415 }
4416 (*info->callbacks->reloc_overflow)
4417 (info, (h ? &h->root : NULL), name, howto->name,
4418 (bfd_vma) 0, input_bfd, input_section, rel->r_offset);
4419 }
4420 else
4421 {
4422 _bfd_error_handler
4423 /* xgettext:c-format */
4424 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"),
4425 input_bfd, input_section,
4426 (uint64_t) rel->r_offset, name, (int) r);
4427 return false;
4428 }
4429 }
4430
4431 if (wrel != rel)
4432 *wrel = *rel;
4433 }
4434
4435 if (wrel != rel)
4436 {
4437 Elf_Internal_Shdr *rel_hdr;
4438 size_t deleted = rel - wrel;
4439
4440 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section);
4441 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4442 if (rel_hdr->sh_size == 0)
4443 {
4444 /* It is too late to remove an empty reloc section. Leave
4445 one NONE reloc.
4446 ??? What is wrong with an empty section??? */
4447 rel_hdr->sh_size = rel_hdr->sh_entsize;
4448 deleted -= 1;
4449 }
4450 rel_hdr = _bfd_elf_single_rel_hdr (input_section);
4451 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted;
4452 input_section->reloc_count -= deleted;
4453 }
4454
4455 return status;
4456 }
4457
4458 /* Finish up dynamic symbol handling. We set the contents of various
4459 dynamic sections here. */
4460
4461 static bool
elf_x86_64_finish_dynamic_symbol(bfd * output_bfd,struct bfd_link_info * info,struct elf_link_hash_entry * h,Elf_Internal_Sym * sym)4462 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd,
4463 struct bfd_link_info *info,
4464 struct elf_link_hash_entry *h,
4465 Elf_Internal_Sym *sym)
4466 {
4467 struct elf_x86_link_hash_table *htab;
4468 bool use_plt_second;
4469 struct elf_x86_link_hash_entry *eh;
4470 bool local_undefweak;
4471
4472 htab = elf_x86_hash_table (info, X86_64_ELF_DATA);
4473 if (htab == NULL)
4474 return false;
4475
4476 /* Use the second PLT section only if there is .plt section. */
4477 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL;
4478
4479 eh = (struct elf_x86_link_hash_entry *) h;
4480 if (eh->no_finish_dynamic_symbol)
4481 abort ();
4482
4483 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for
4484 resolved undefined weak symbols in executable so that their
4485 references have value 0 at run-time. */
4486 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh);
4487
4488 if (h->plt.offset != (bfd_vma) -1)
4489 {
4490 bfd_vma plt_index;
4491 bfd_vma got_offset, plt_offset;
4492 Elf_Internal_Rela rela;
4493 bfd_byte *loc;
4494 asection *plt, *gotplt, *relplt, *resolved_plt;
4495 const struct elf_backend_data *bed;
4496 bfd_vma plt_got_pcrel_offset;
4497
4498 /* When building a static executable, use .iplt, .igot.plt and
4499 .rela.iplt sections for STT_GNU_IFUNC symbols. */
4500 if (htab->elf.splt != NULL)
4501 {
4502 plt = htab->elf.splt;
4503 gotplt = htab->elf.sgotplt;
4504 relplt = htab->elf.srelplt;
4505 }
4506 else
4507 {
4508 plt = htab->elf.iplt;
4509 gotplt = htab->elf.igotplt;
4510 relplt = htab->elf.irelplt;
4511 }
4512
4513 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak)
4514
4515 /* Get the index in the procedure linkage table which
4516 corresponds to this symbol. This is the index of this symbol
4517 in all the symbols for which we are making plt entries. The
4518 first entry in the procedure linkage table is reserved.
4519
4520 Get the offset into the .got table of the entry that
4521 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE
4522 bytes. The first three are reserved for the dynamic linker.
4523
4524 For static executables, we don't reserve anything. */
4525
4526 if (plt == htab->elf.splt)
4527 {
4528 got_offset = (h->plt.offset / htab->plt.plt_entry_size
4529 - htab->plt.has_plt0);
4530 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE;
4531 }
4532 else
4533 {
4534 got_offset = h->plt.offset / htab->plt.plt_entry_size;
4535 got_offset = got_offset * GOT_ENTRY_SIZE;
4536 }
4537
4538 /* Fill in the entry in the procedure linkage table. */
4539 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry,
4540 htab->plt.plt_entry_size);
4541 if (use_plt_second)
4542 {
4543 memcpy (htab->plt_second->contents + eh->plt_second.offset,
4544 htab->non_lazy_plt->plt_entry,
4545 htab->non_lazy_plt->plt_entry_size);
4546
4547 resolved_plt = htab->plt_second;
4548 plt_offset = eh->plt_second.offset;
4549 }
4550 else
4551 {
4552 resolved_plt = plt;
4553 plt_offset = h->plt.offset;
4554 }
4555
4556 /* Insert the relocation positions of the plt section. */
4557
4558 /* Put offset the PC-relative instruction referring to the GOT entry,
4559 subtracting the size of that instruction. */
4560 plt_got_pcrel_offset = (gotplt->output_section->vma
4561 + gotplt->output_offset
4562 + got_offset
4563 - resolved_plt->output_section->vma
4564 - resolved_plt->output_offset
4565 - plt_offset
4566 - htab->plt.plt_got_insn_size);
4567
4568 /* Check PC-relative offset overflow in PLT entry. */
4569 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff)
4570 /* xgettext:c-format */
4571 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"),
4572 output_bfd, h->root.root.string);
4573
4574 bfd_put_32 (output_bfd, plt_got_pcrel_offset,
4575 (resolved_plt->contents + plt_offset
4576 + htab->plt.plt_got_offset));
4577
4578 /* Fill in the entry in the global offset table, initially this
4579 points to the second part of the PLT entry. Leave the entry
4580 as zero for undefined weak symbol in PIE. No PLT relocation
4581 against undefined weak symbol in PIE. */
4582 if (!local_undefweak)
4583 {
4584 if (htab->plt.has_plt0)
4585 bfd_put_64 (output_bfd, (plt->output_section->vma
4586 + plt->output_offset
4587 + h->plt.offset
4588 + htab->lazy_plt->plt_lazy_offset),
4589 gotplt->contents + got_offset);
4590
4591 /* Fill in the entry in the .rela.plt section. */
4592 rela.r_offset = (gotplt->output_section->vma
4593 + gotplt->output_offset
4594 + got_offset);
4595 if (PLT_LOCAL_IFUNC_P (info, h))
4596 {
4597 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4598 h->root.root.string,
4599 h->root.u.def.section->owner);
4600
4601 /* If an STT_GNU_IFUNC symbol is locally defined, generate
4602 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */
4603 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE);
4604 rela.r_addend = (h->root.u.def.value
4605 + h->root.u.def.section->output_section->vma
4606 + h->root.u.def.section->output_offset);
4607
4608 if (htab->params->report_relative_reloc)
4609 _bfd_x86_elf_link_report_relative_reloc
4610 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela);
4611
4612 /* R_X86_64_IRELATIVE comes last. */
4613 plt_index = htab->next_irelative_index--;
4614 }
4615 else
4616 {
4617 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT);
4618 if (htab->params->mark_plt)
4619 rela.r_addend = (resolved_plt->output_section->vma
4620 + plt_offset
4621 + htab->plt.plt_indirect_branch_offset);
4622 else
4623 rela.r_addend = 0;
4624 plt_index = htab->next_jump_slot_index++;
4625 }
4626
4627 /* Don't fill the second and third slots in PLT entry for
4628 static executables nor without PLT0. */
4629 if (plt == htab->elf.splt && htab->plt.has_plt0)
4630 {
4631 bfd_vma plt0_offset
4632 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end;
4633
4634 /* Put relocation index. */
4635 bfd_put_32 (output_bfd, plt_index,
4636 (plt->contents + h->plt.offset
4637 + htab->lazy_plt->plt_reloc_offset));
4638
4639 /* Put offset for jmp .PLT0 and check for overflow. We don't
4640 check relocation index for overflow since branch displacement
4641 will overflow first. */
4642 if (plt0_offset > 0x80000000)
4643 /* xgettext:c-format */
4644 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"),
4645 output_bfd, h->root.root.string);
4646 bfd_put_32 (output_bfd, - plt0_offset,
4647 (plt->contents + h->plt.offset
4648 + htab->lazy_plt->plt_plt_offset));
4649 }
4650
4651 bed = get_elf_backend_data (output_bfd);
4652 loc = relplt->contents + plt_index * bed->s->sizeof_rela;
4653 bed->s->swap_reloca_out (output_bfd, &rela, loc);
4654 }
4655 }
4656 else if (eh->plt_got.offset != (bfd_vma) -1)
4657 {
4658 bfd_vma got_offset, plt_offset;
4659 asection *plt, *got;
4660 bool got_after_plt;
4661 int32_t got_pcrel_offset;
4662
4663 /* Set the entry in the GOT procedure linkage table. */
4664 plt = htab->plt_got;
4665 got = htab->elf.sgot;
4666 got_offset = h->got.offset;
4667
4668 if (got_offset == (bfd_vma) -1
4669 || (h->type == STT_GNU_IFUNC && h->def_regular)
4670 || plt == NULL
4671 || got == NULL)
4672 abort ();
4673
4674 /* Use the non-lazy PLT entry template for the GOT PLT since they
4675 are the identical. */
4676 /* Fill in the entry in the GOT procedure linkage table. */
4677 plt_offset = eh->plt_got.offset;
4678 memcpy (plt->contents + plt_offset,
4679 htab->non_lazy_plt->plt_entry,
4680 htab->non_lazy_plt->plt_entry_size);
4681
4682 /* Put offset the PC-relative instruction referring to the GOT
4683 entry, subtracting the size of that instruction. */
4684 got_pcrel_offset = (got->output_section->vma
4685 + got->output_offset
4686 + got_offset
4687 - plt->output_section->vma
4688 - plt->output_offset
4689 - plt_offset
4690 - htab->non_lazy_plt->plt_got_insn_size);
4691
4692 /* Check PC-relative offset overflow in GOT PLT entry. */
4693 got_after_plt = got->output_section->vma > plt->output_section->vma;
4694 if ((got_after_plt && got_pcrel_offset < 0)
4695 || (!got_after_plt && got_pcrel_offset > 0))
4696 /* xgettext:c-format */
4697 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"),
4698 output_bfd, h->root.root.string);
4699
4700 bfd_put_32 (output_bfd, got_pcrel_offset,
4701 (plt->contents + plt_offset
4702 + htab->non_lazy_plt->plt_got_offset));
4703 }
4704
4705 if (!local_undefweak
4706 && !h->def_regular
4707 && (h->plt.offset != (bfd_vma) -1
4708 || eh->plt_got.offset != (bfd_vma) -1))
4709 {
4710 /* Mark the symbol as undefined, rather than as defined in
4711 the .plt section. Leave the value if there were any
4712 relocations where pointer equality matters (this is a clue
4713 for the dynamic linker, to make function pointer
4714 comparisons work between an application and shared
4715 library), otherwise set it to zero. If a function is only
4716 called from a binary, there is no need to slow down
4717 shared libraries because of that. */
4718 sym->st_shndx = SHN_UNDEF;
4719 if (!h->pointer_equality_needed)
4720 sym->st_value = 0;
4721 }
4722
4723 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym);
4724
4725 /* Don't generate dynamic GOT relocation against undefined weak
4726 symbol in executable. */
4727 if (h->got.offset != (bfd_vma) -1
4728 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type)
4729 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE
4730 && !local_undefweak)
4731 {
4732 Elf_Internal_Rela rela;
4733 asection *relgot = htab->elf.srelgot;
4734 const char *relative_reloc_name = NULL;
4735 bool generate_dynamic_reloc = true;
4736
4737 /* This symbol has an entry in the global offset table. Set it
4738 up. */
4739 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL)
4740 abort ();
4741
4742 rela.r_offset = (htab->elf.sgot->output_section->vma
4743 + htab->elf.sgot->output_offset
4744 + (h->got.offset &~ (bfd_vma) 1));
4745
4746 /* If this is a static link, or it is a -Bsymbolic link and the
4747 symbol is defined locally or was forced to be local because
4748 of a version file, we just want to emit a RELATIVE reloc.
4749 The entry in the global offset table will already have been
4750 initialized in the relocate_section function. */
4751 if (h->def_regular
4752 && h->type == STT_GNU_IFUNC)
4753 {
4754 if (h->plt.offset == (bfd_vma) -1)
4755 {
4756 /* STT_GNU_IFUNC is referenced without PLT. */
4757 if (htab->elf.splt == NULL)
4758 {
4759 /* use .rel[a].iplt section to store .got relocations
4760 in static executable. */
4761 relgot = htab->elf.irelplt;
4762 }
4763 if (SYMBOL_REFERENCES_LOCAL_P (info, h))
4764 {
4765 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"),
4766 h->root.root.string,
4767 h->root.u.def.section->owner);
4768
4769 rela.r_info = htab->r_info (0,
4770 R_X86_64_IRELATIVE);
4771 rela.r_addend = (h->root.u.def.value
4772 + h->root.u.def.section->output_section->vma
4773 + h->root.u.def.section->output_offset);
4774 relative_reloc_name = "R_X86_64_IRELATIVE";
4775 }
4776 else
4777 goto do_glob_dat;
4778 }
4779 else if (bfd_link_pic (info))
4780 {
4781 /* Generate R_X86_64_GLOB_DAT. */
4782 goto do_glob_dat;
4783 }
4784 else
4785 {
4786 asection *plt;
4787 bfd_vma plt_offset;
4788
4789 if (!h->pointer_equality_needed)
4790 abort ();
4791
4792 /* For non-shared object, we can't use .got.plt, which
4793 contains the real function addres if we need pointer
4794 equality. We load the GOT entry with the PLT entry. */
4795 if (htab->plt_second != NULL)
4796 {
4797 plt = htab->plt_second;
4798 plt_offset = eh->plt_second.offset;
4799 }
4800 else
4801 {
4802 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt;
4803 plt_offset = h->plt.offset;
4804 }
4805 bfd_put_64 (output_bfd, (plt->output_section->vma
4806 + plt->output_offset
4807 + plt_offset),
4808 htab->elf.sgot->contents + h->got.offset);
4809 return true;
4810 }
4811 }
4812 else if (bfd_link_pic (info)
4813 && SYMBOL_REFERENCES_LOCAL_P (info, h))
4814 {
4815 if (!SYMBOL_DEFINED_NON_SHARED_P (h))
4816 return false;
4817 BFD_ASSERT((h->got.offset & 1) != 0);
4818 if (info->enable_dt_relr)
4819 generate_dynamic_reloc = false;
4820 else
4821 {
4822 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE);
4823 rela.r_addend = (h->root.u.def.value
4824 + h->root.u.def.section->output_section->vma
4825 + h->root.u.def.section->output_offset);
4826 relative_reloc_name = "R_X86_64_RELATIVE";
4827 }
4828 }
4829 else
4830 {
4831 BFD_ASSERT((h->got.offset & 1) == 0);
4832 do_glob_dat:
4833 bfd_put_64 (output_bfd, (bfd_vma) 0,
4834 htab->elf.sgot->contents + h->got.offset);
4835 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT);
4836 rela.r_addend = 0;
4837 }
4838
4839 if (generate_dynamic_reloc)
4840 {
4841 if (relative_reloc_name != NULL
4842 && htab->params->report_relative_reloc)
4843 _bfd_x86_elf_link_report_relative_reloc
4844 (info, relgot, h, sym, relative_reloc_name, &rela);
4845
4846 elf_append_rela (output_bfd, relgot, &rela);
4847 }
4848 }
4849
4850 if (h->needs_copy)
4851 {
4852 Elf_Internal_Rela rela;
4853 asection *s;
4854
4855 /* This symbol needs a copy reloc. Set it up. */
4856 VERIFY_COPY_RELOC (h, htab)
4857
4858 rela.r_offset = (h->root.u.def.value
4859 + h->root.u.def.section->output_section->vma
4860 + h->root.u.def.section->output_offset);
4861 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY);
4862 rela.r_addend = 0;
4863 if (h->root.u.def.section == htab->elf.sdynrelro)
4864 s = htab->elf.sreldynrelro;
4865 else
4866 s = htab->elf.srelbss;
4867 elf_append_rela (output_bfd, s, &rela);
4868 }
4869
4870 return true;
4871 }
4872
4873 /* Finish up local dynamic symbol handling. We set the contents of
4874 various dynamic sections here. */
4875
4876 static int
elf_x86_64_finish_local_dynamic_symbol(void ** slot,void * inf)4877 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf)
4878 {
4879 struct elf_link_hash_entry *h
4880 = (struct elf_link_hash_entry *) *slot;
4881 struct bfd_link_info *info
4882 = (struct bfd_link_info *) inf;
4883
4884 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4885 info, h, NULL);
4886 }
4887
4888 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry
4889 here since undefined weak symbol may not be dynamic and may not be
4890 called for elf_x86_64_finish_dynamic_symbol. */
4891
4892 static bool
elf_x86_64_pie_finish_undefweak_symbol(struct bfd_hash_entry * bh,void * inf)4893 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh,
4894 void *inf)
4895 {
4896 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh;
4897 struct bfd_link_info *info = (struct bfd_link_info *) inf;
4898
4899 if (h->root.type != bfd_link_hash_undefweak
4900 || h->dynindx != -1)
4901 return true;
4902
4903 return elf_x86_64_finish_dynamic_symbol (info->output_bfd,
4904 info, h, NULL);
4905 }
4906
4907 /* Used to decide how to sort relocs in an optimal manner for the
4908 dynamic linker, before writing them out. */
4909
4910 static enum elf_reloc_type_class
elf_x86_64_reloc_type_class(const struct bfd_link_info * info,const asection * rel_sec ATTRIBUTE_UNUSED,const Elf_Internal_Rela * rela)4911 elf_x86_64_reloc_type_class (const struct bfd_link_info *info,
4912 const asection *rel_sec ATTRIBUTE_UNUSED,
4913 const Elf_Internal_Rela *rela)
4914 {
4915 bfd *abfd = info->output_bfd;
4916 const struct elf_backend_data *bed = get_elf_backend_data (abfd);
4917 struct elf_x86_link_hash_table *htab
4918 = elf_x86_hash_table (info, X86_64_ELF_DATA);
4919
4920 if (htab->elf.dynsym != NULL
4921 && htab->elf.dynsym->contents != NULL)
4922 {
4923 /* Check relocation against STT_GNU_IFUNC symbol if there are
4924 dynamic symbols. */
4925 unsigned long r_symndx = htab->r_sym (rela->r_info);
4926 if (r_symndx != STN_UNDEF)
4927 {
4928 Elf_Internal_Sym sym;
4929 if (!bed->s->swap_symbol_in (abfd,
4930 (htab->elf.dynsym->contents
4931 + r_symndx * bed->s->sizeof_sym),
4932 0, &sym))
4933 abort ();
4934
4935 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC)
4936 return reloc_class_ifunc;
4937 }
4938 }
4939
4940 switch ((int) ELF32_R_TYPE (rela->r_info))
4941 {
4942 case R_X86_64_IRELATIVE:
4943 return reloc_class_ifunc;
4944 case R_X86_64_RELATIVE:
4945 case R_X86_64_RELATIVE64:
4946 return reloc_class_relative;
4947 case R_X86_64_JUMP_SLOT:
4948 return reloc_class_plt;
4949 case R_X86_64_COPY:
4950 return reloc_class_copy;
4951 default:
4952 return reloc_class_normal;
4953 }
4954 }
4955
4956 /* Finish up the dynamic sections. */
4957
4958 static bool
elf_x86_64_finish_dynamic_sections(bfd * output_bfd,struct bfd_link_info * info)4959 elf_x86_64_finish_dynamic_sections (bfd *output_bfd,
4960 struct bfd_link_info *info)
4961 {
4962 struct elf_x86_link_hash_table *htab;
4963
4964 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info);
4965 if (htab == NULL)
4966 return false;
4967
4968 if (! htab->elf.dynamic_sections_created)
4969 return true;
4970
4971 if (htab->elf.splt && htab->elf.splt->size > 0)
4972 {
4973 if (bfd_is_abs_section (htab->elf.splt->output_section))
4974 {
4975 info->callbacks->einfo
4976 (_("%F%P: discarded output section: `%pA'\n"),
4977 htab->elf.splt);
4978 return false;
4979 }
4980
4981 elf_section_data (htab->elf.splt->output_section)
4982 ->this_hdr.sh_entsize = htab->plt.plt_entry_size;
4983
4984 if (htab->plt.has_plt0)
4985 {
4986 /* Fill in the special first entry in the procedure linkage
4987 table. */
4988 memcpy (htab->elf.splt->contents,
4989 htab->lazy_plt->plt0_entry,
4990 htab->lazy_plt->plt0_entry_size);
4991 /* Add offset for pushq GOT+8(%rip), since the instruction
4992 uses 6 bytes subtract this value. */
4993 bfd_put_32 (output_bfd,
4994 (htab->elf.sgotplt->output_section->vma
4995 + htab->elf.sgotplt->output_offset
4996 + 8
4997 - htab->elf.splt->output_section->vma
4998 - htab->elf.splt->output_offset
4999 - 6),
5000 (htab->elf.splt->contents
5001 + htab->lazy_plt->plt0_got1_offset));
5002 /* Add offset for the PC-relative instruction accessing
5003 GOT+16, subtracting the offset to the end of that
5004 instruction. */
5005 bfd_put_32 (output_bfd,
5006 (htab->elf.sgotplt->output_section->vma
5007 + htab->elf.sgotplt->output_offset
5008 + 16
5009 - htab->elf.splt->output_section->vma
5010 - htab->elf.splt->output_offset
5011 - htab->lazy_plt->plt0_got2_insn_end),
5012 (htab->elf.splt->contents
5013 + htab->lazy_plt->plt0_got2_offset));
5014 }
5015
5016 if (htab->elf.tlsdesc_plt)
5017 {
5018 bfd_put_64 (output_bfd, (bfd_vma) 0,
5019 htab->elf.sgot->contents + htab->elf.tlsdesc_got);
5020
5021 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt,
5022 htab->lazy_plt->plt_tlsdesc_entry,
5023 htab->lazy_plt->plt_tlsdesc_entry_size);
5024
5025 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4
5026 bytes and the instruction uses 6 bytes, subtract these
5027 values. */
5028 bfd_put_32 (output_bfd,
5029 (htab->elf.sgotplt->output_section->vma
5030 + htab->elf.sgotplt->output_offset
5031 + 8
5032 - htab->elf.splt->output_section->vma
5033 - htab->elf.splt->output_offset
5034 - htab->elf.tlsdesc_plt
5035 - htab->lazy_plt->plt_tlsdesc_got1_insn_end),
5036 (htab->elf.splt->contents
5037 + htab->elf.tlsdesc_plt
5038 + htab->lazy_plt->plt_tlsdesc_got1_offset));
5039 /* Add offset for indirect branch via GOT+TDG, where TDG
5040 stands for htab->tlsdesc_got, subtracting the offset
5041 to the end of that instruction. */
5042 bfd_put_32 (output_bfd,
5043 (htab->elf.sgot->output_section->vma
5044 + htab->elf.sgot->output_offset
5045 + htab->elf.tlsdesc_got
5046 - htab->elf.splt->output_section->vma
5047 - htab->elf.splt->output_offset
5048 - htab->elf.tlsdesc_plt
5049 - htab->lazy_plt->plt_tlsdesc_got2_insn_end),
5050 (htab->elf.splt->contents
5051 + htab->elf.tlsdesc_plt
5052 + htab->lazy_plt->plt_tlsdesc_got2_offset));
5053 }
5054 }
5055
5056 /* Fill PLT entries for undefined weak symbols in PIE. */
5057 if (bfd_link_pie (info))
5058 bfd_hash_traverse (&info->hash->table,
5059 elf_x86_64_pie_finish_undefweak_symbol,
5060 info);
5061
5062 return true;
5063 }
5064
5065 /* Fill PLT/GOT entries and allocate dynamic relocations for local
5066 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table.
5067 It has to be done before elf_link_sort_relocs is called so that
5068 dynamic relocations are properly sorted. */
5069
5070 static bool
elf_x86_64_output_arch_local_syms(bfd * output_bfd ATTRIBUTE_UNUSED,struct bfd_link_info * info,void * flaginfo ATTRIBUTE_UNUSED,int (* func)(void *,const char *,Elf_Internal_Sym *,asection *,struct elf_link_hash_entry *)ATTRIBUTE_UNUSED)5071 elf_x86_64_output_arch_local_syms
5072 (bfd *output_bfd ATTRIBUTE_UNUSED,
5073 struct bfd_link_info *info,
5074 void *flaginfo ATTRIBUTE_UNUSED,
5075 int (*func) (void *, const char *,
5076 Elf_Internal_Sym *,
5077 asection *,
5078 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED)
5079 {
5080 struct elf_x86_link_hash_table *htab
5081 = elf_x86_hash_table (info, X86_64_ELF_DATA);
5082 if (htab == NULL)
5083 return false;
5084
5085 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */
5086 htab_traverse (htab->loc_hash_table,
5087 elf_x86_64_finish_local_dynamic_symbol,
5088 info);
5089
5090 return true;
5091 }
5092
5093 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all
5094 dynamic relocations. */
5095
5096 static long
elf_x86_64_get_synthetic_symtab(bfd * abfd,long symcount ATTRIBUTE_UNUSED,asymbol ** syms ATTRIBUTE_UNUSED,long dynsymcount,asymbol ** dynsyms,asymbol ** ret)5097 elf_x86_64_get_synthetic_symtab (bfd *abfd,
5098 long symcount ATTRIBUTE_UNUSED,
5099 asymbol **syms ATTRIBUTE_UNUSED,
5100 long dynsymcount,
5101 asymbol **dynsyms,
5102 asymbol **ret)
5103 {
5104 long count, i, n;
5105 int j;
5106 bfd_byte *plt_contents;
5107 long relsize;
5108 const struct elf_x86_lazy_plt_layout *lazy_plt;
5109 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt;
5110 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt;
5111 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt;
5112 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt;
5113 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt;
5114 const struct elf_x86_lazy_plt_layout *x32_lazy_ibt_plt;
5115 const struct elf_x86_non_lazy_plt_layout *x32_non_lazy_ibt_plt;
5116 asection *plt;
5117 enum elf_x86_plt_type plt_type;
5118 struct elf_x86_plt plts[] =
5119 {
5120 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 },
5121 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 },
5122 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 },
5123 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 },
5124 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }
5125 };
5126
5127 *ret = NULL;
5128
5129 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0)
5130 return 0;
5131
5132 if (dynsymcount <= 0)
5133 return 0;
5134
5135 relsize = bfd_get_dynamic_reloc_upper_bound (abfd);
5136 if (relsize <= 0)
5137 return -1;
5138
5139 lazy_plt = &elf_x86_64_lazy_plt;
5140 non_lazy_plt = &elf_x86_64_non_lazy_plt;
5141 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt;
5142 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt;
5143 if (ABI_64_P (abfd))
5144 {
5145 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt;
5146 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt;
5147 x32_lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5148 x32_non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5149 }
5150 else
5151 {
5152 lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5153 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5154 x32_lazy_ibt_plt = NULL;
5155 x32_non_lazy_ibt_plt = NULL;
5156 }
5157
5158 count = 0;
5159 for (j = 0; plts[j].name != NULL; j++)
5160 {
5161 plt = bfd_get_section_by_name (abfd, plts[j].name);
5162 if (plt == NULL
5163 || plt->size == 0
5164 || (plt->flags & SEC_HAS_CONTENTS) == 0)
5165 continue;
5166
5167 /* Get the PLT section contents. */
5168 if (!bfd_malloc_and_get_section (abfd, plt, &plt_contents))
5169 break;
5170
5171 /* Check what kind of PLT it is. */
5172 plt_type = plt_unknown;
5173 if (plts[j].type == plt_unknown
5174 && (plt->size >= (lazy_plt->plt_entry_size
5175 + lazy_plt->plt_entry_size)))
5176 {
5177 /* Match lazy PLT first. Need to check the first two
5178 instructions. */
5179 if ((memcmp (plt_contents, lazy_plt->plt0_entry,
5180 lazy_plt->plt0_got1_offset) == 0)
5181 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6,
5182 2) == 0))
5183 {
5184 if (x32_lazy_ibt_plt != NULL
5185 && (memcmp (plt_contents
5186 + x32_lazy_ibt_plt->plt_entry_size,
5187 x32_lazy_ibt_plt->plt_entry,
5188 x32_lazy_ibt_plt->plt_got_offset) == 0))
5189 {
5190 /* The fist entry in the x32 lazy IBT PLT is the same
5191 as the lazy PLT. */
5192 plt_type = plt_lazy | plt_second;
5193 lazy_plt = x32_lazy_ibt_plt;
5194 }
5195 else
5196 plt_type = plt_lazy;
5197 }
5198 else if (lazy_bnd_plt != NULL
5199 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry,
5200 lazy_bnd_plt->plt0_got1_offset) == 0)
5201 && (memcmp (plt_contents + 6,
5202 lazy_bnd_plt->plt0_entry + 6, 3) == 0))
5203 {
5204 plt_type = plt_lazy | plt_second;
5205 /* The fist entry in the lazy IBT PLT is the same as the
5206 lazy BND PLT. */
5207 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size,
5208 lazy_ibt_plt->plt_entry,
5209 lazy_ibt_plt->plt_got_offset) == 0))
5210 lazy_plt = lazy_ibt_plt;
5211 else
5212 lazy_plt = lazy_bnd_plt;
5213 }
5214 }
5215
5216 if (non_lazy_plt != NULL
5217 && (plt_type == plt_unknown || plt_type == plt_non_lazy)
5218 && plt->size >= non_lazy_plt->plt_entry_size)
5219 {
5220 /* Match non-lazy PLT. */
5221 if (memcmp (plt_contents, non_lazy_plt->plt_entry,
5222 non_lazy_plt->plt_got_offset) == 0)
5223 plt_type = plt_non_lazy;
5224 }
5225
5226 if (plt_type == plt_unknown || plt_type == plt_second)
5227 {
5228 if (non_lazy_bnd_plt != NULL
5229 && plt->size >= non_lazy_bnd_plt->plt_entry_size
5230 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry,
5231 non_lazy_bnd_plt->plt_got_offset) == 0))
5232 {
5233 /* Match BND PLT. */
5234 plt_type = plt_second;
5235 non_lazy_plt = non_lazy_bnd_plt;
5236 }
5237 else if (non_lazy_ibt_plt != NULL
5238 && plt->size >= non_lazy_ibt_plt->plt_entry_size
5239 && (memcmp (plt_contents,
5240 non_lazy_ibt_plt->plt_entry,
5241 non_lazy_ibt_plt->plt_got_offset) == 0))
5242 {
5243 /* Match IBT PLT. */
5244 plt_type = plt_second;
5245 non_lazy_plt = non_lazy_ibt_plt;
5246 }
5247 else if (x32_non_lazy_ibt_plt != NULL
5248 && plt->size >= x32_non_lazy_ibt_plt->plt_entry_size
5249 && (memcmp (plt_contents,
5250 x32_non_lazy_ibt_plt->plt_entry,
5251 x32_non_lazy_ibt_plt->plt_got_offset) == 0))
5252 {
5253 /* Match x32 IBT PLT. */
5254 plt_type = plt_second;
5255 non_lazy_plt = x32_non_lazy_ibt_plt;
5256 }
5257 }
5258
5259 if (plt_type == plt_unknown)
5260 {
5261 free (plt_contents);
5262 continue;
5263 }
5264
5265 plts[j].sec = plt;
5266 plts[j].type = plt_type;
5267
5268 if ((plt_type & plt_lazy))
5269 {
5270 plts[j].plt_got_offset = lazy_plt->plt_got_offset;
5271 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size;
5272 plts[j].plt_entry_size = lazy_plt->plt_entry_size;
5273 /* Skip PLT0 in lazy PLT. */
5274 i = 1;
5275 }
5276 else
5277 {
5278 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset;
5279 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size;
5280 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size;
5281 i = 0;
5282 }
5283
5284 /* Skip lazy PLT when the second PLT is used. */
5285 if (plt_type == (plt_lazy | plt_second))
5286 plts[j].count = 0;
5287 else
5288 {
5289 n = plt->size / plts[j].plt_entry_size;
5290 plts[j].count = n;
5291 count += n - i;
5292 }
5293
5294 plts[j].contents = plt_contents;
5295 }
5296
5297 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize,
5298 (bfd_vma) 0, plts, dynsyms,
5299 ret);
5300 }
5301
5302 /* Handle an x86-64 specific section when reading an object file. This
5303 is called when elfcode.h finds a section with an unknown type. */
5304
5305 static bool
elf_x86_64_section_from_shdr(bfd * abfd,Elf_Internal_Shdr * hdr,const char * name,int shindex)5306 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr,
5307 const char *name, int shindex)
5308 {
5309 if (hdr->sh_type != SHT_X86_64_UNWIND)
5310 return false;
5311
5312 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex))
5313 return false;
5314
5315 return true;
5316 }
5317
5318 /* Hook called by the linker routine which adds symbols from an object
5319 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead
5320 of .bss. */
5321
5322 static bool
elf_x86_64_add_symbol_hook(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED,Elf_Internal_Sym * sym,const char ** namep ATTRIBUTE_UNUSED,flagword * flagsp ATTRIBUTE_UNUSED,asection ** secp,bfd_vma * valp)5323 elf_x86_64_add_symbol_hook (bfd *abfd,
5324 struct bfd_link_info *info ATTRIBUTE_UNUSED,
5325 Elf_Internal_Sym *sym,
5326 const char **namep ATTRIBUTE_UNUSED,
5327 flagword *flagsp ATTRIBUTE_UNUSED,
5328 asection **secp,
5329 bfd_vma *valp)
5330 {
5331 asection *lcomm;
5332
5333 switch (sym->st_shndx)
5334 {
5335 case SHN_X86_64_LCOMMON:
5336 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON");
5337 if (lcomm == NULL)
5338 {
5339 lcomm = bfd_make_section_with_flags (abfd,
5340 "LARGE_COMMON",
5341 (SEC_ALLOC
5342 | SEC_IS_COMMON
5343 | SEC_LINKER_CREATED));
5344 if (lcomm == NULL)
5345 return false;
5346 elf_section_flags (lcomm) |= SHF_X86_64_LARGE;
5347 }
5348 *secp = lcomm;
5349 *valp = sym->st_size;
5350 return true;
5351 }
5352
5353 return true;
5354 }
5355
5356
5357 /* Given a BFD section, try to locate the corresponding ELF section
5358 index. */
5359
5360 static bool
elf_x86_64_elf_section_from_bfd_section(bfd * abfd ATTRIBUTE_UNUSED,asection * sec,int * index_return)5361 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED,
5362 asection *sec, int *index_return)
5363 {
5364 if (sec == &_bfd_elf_large_com_section)
5365 {
5366 *index_return = SHN_X86_64_LCOMMON;
5367 return true;
5368 }
5369 return false;
5370 }
5371
5372 /* Process a symbol. */
5373
5374 static void
elf_x86_64_symbol_processing(bfd * abfd ATTRIBUTE_UNUSED,asymbol * asym)5375 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED,
5376 asymbol *asym)
5377 {
5378 elf_symbol_type *elfsym = (elf_symbol_type *) asym;
5379
5380 switch (elfsym->internal_elf_sym.st_shndx)
5381 {
5382 case SHN_X86_64_LCOMMON:
5383 asym->section = &_bfd_elf_large_com_section;
5384 asym->value = elfsym->internal_elf_sym.st_size;
5385 /* Common symbol doesn't set BSF_GLOBAL. */
5386 asym->flags &= ~BSF_GLOBAL;
5387 break;
5388 }
5389 }
5390
5391 static bool
elf_x86_64_common_definition(Elf_Internal_Sym * sym)5392 elf_x86_64_common_definition (Elf_Internal_Sym *sym)
5393 {
5394 return (sym->st_shndx == SHN_COMMON
5395 || sym->st_shndx == SHN_X86_64_LCOMMON);
5396 }
5397
5398 static unsigned int
elf_x86_64_common_section_index(asection * sec)5399 elf_x86_64_common_section_index (asection *sec)
5400 {
5401 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5402 return SHN_COMMON;
5403 else
5404 return SHN_X86_64_LCOMMON;
5405 }
5406
5407 static asection *
elf_x86_64_common_section(asection * sec)5408 elf_x86_64_common_section (asection *sec)
5409 {
5410 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0)
5411 return bfd_com_section_ptr;
5412 else
5413 return &_bfd_elf_large_com_section;
5414 }
5415
5416 static bool
elf_x86_64_merge_symbol(struct elf_link_hash_entry * h,const Elf_Internal_Sym * sym,asection ** psec,bool newdef,bool olddef,bfd * oldbfd,const asection * oldsec)5417 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h,
5418 const Elf_Internal_Sym *sym,
5419 asection **psec,
5420 bool newdef,
5421 bool olddef,
5422 bfd *oldbfd,
5423 const asection *oldsec)
5424 {
5425 /* A normal common symbol and a large common symbol result in a
5426 normal common symbol. We turn the large common symbol into a
5427 normal one. */
5428 if (!olddef
5429 && h->root.type == bfd_link_hash_common
5430 && !newdef
5431 && bfd_is_com_section (*psec)
5432 && oldsec != *psec)
5433 {
5434 if (sym->st_shndx == SHN_COMMON
5435 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0)
5436 {
5437 h->root.u.c.p->section
5438 = bfd_make_section_old_way (oldbfd, "COMMON");
5439 h->root.u.c.p->section->flags = SEC_ALLOC;
5440 }
5441 else if (sym->st_shndx == SHN_X86_64_LCOMMON
5442 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0)
5443 *psec = bfd_com_section_ptr;
5444 }
5445
5446 return true;
5447 }
5448
5449 static bool
elf_x86_64_section_flags(const Elf_Internal_Shdr * hdr)5450 elf_x86_64_section_flags (const Elf_Internal_Shdr *hdr)
5451 {
5452 if ((hdr->sh_flags & SHF_X86_64_LARGE) != 0)
5453 hdr->bfd_section->flags |= SEC_ELF_LARGE;
5454
5455 return true;
5456 }
5457
5458 static bool
elf_x86_64_fake_sections(bfd * abfd ATTRIBUTE_UNUSED,Elf_Internal_Shdr * hdr,asection * sec)5459 elf_x86_64_fake_sections (bfd *abfd ATTRIBUTE_UNUSED,
5460 Elf_Internal_Shdr *hdr, asection *sec)
5461 {
5462 if (sec->flags & SEC_ELF_LARGE)
5463 hdr->sh_flags |= SHF_X86_64_LARGE;
5464
5465 return true;
5466 }
5467
5468 static bool
elf_x86_64_copy_private_section_data(bfd * ibfd,asection * isec,bfd * obfd,asection * osec)5469 elf_x86_64_copy_private_section_data (bfd *ibfd, asection *isec,
5470 bfd *obfd, asection *osec)
5471 {
5472 if (!_bfd_elf_copy_private_section_data (ibfd, isec, obfd, osec))
5473 return false;
5474
5475 /* objcopy --set-section-flags without "large" drops SHF_X86_64_LARGE. */
5476 if (ibfd != obfd)
5477 elf_section_flags (osec) &= ~SHF_X86_64_LARGE;
5478
5479 return true;
5480 }
5481
5482 static int
elf_x86_64_additional_program_headers(bfd * abfd,struct bfd_link_info * info ATTRIBUTE_UNUSED)5483 elf_x86_64_additional_program_headers (bfd *abfd,
5484 struct bfd_link_info *info ATTRIBUTE_UNUSED)
5485 {
5486 asection *s;
5487 int count = 0;
5488
5489 /* Check to see if we need a large readonly segment. */
5490 s = bfd_get_section_by_name (abfd, ".lrodata");
5491 if (s && (s->flags & SEC_LOAD))
5492 count++;
5493
5494 /* Check to see if we need a large data segment. Since .lbss sections
5495 is placed right after the .bss section, there should be no need for
5496 a large data segment just because of .lbss. */
5497 s = bfd_get_section_by_name (abfd, ".ldata");
5498 if (s && (s->flags & SEC_LOAD))
5499 count++;
5500
5501 return count;
5502 }
5503
5504 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */
5505
5506 static bool
elf_x86_64_relocs_compatible(const bfd_target * input,const bfd_target * output)5507 elf_x86_64_relocs_compatible (const bfd_target *input,
5508 const bfd_target *output)
5509 {
5510 return ((xvec_get_elf_backend_data (input)->s->elfclass
5511 == xvec_get_elf_backend_data (output)->s->elfclass)
5512 && _bfd_elf_relocs_compatible (input, output));
5513 }
5514
5515 /* Set up x86-64 GNU properties. Return the first relocatable ELF input
5516 with GNU properties if found. Otherwise, return NULL. */
5517
5518 static bfd *
elf_x86_64_link_setup_gnu_properties(struct bfd_link_info * info)5519 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info)
5520 {
5521 struct elf_x86_init_table init_table;
5522 const struct elf_backend_data *bed;
5523 struct elf_x86_link_hash_table *htab;
5524
5525 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit
5526 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit
5527 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit)
5528 != (int) R_X86_64_GNU_VTINHERIT)
5529 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit)
5530 != (int) R_X86_64_GNU_VTENTRY))
5531 abort ();
5532
5533 /* This is unused for x86-64. */
5534 init_table.plt0_pad_byte = 0x90;
5535
5536 bed = get_elf_backend_data (info->output_bfd);
5537 htab = elf_x86_hash_table (info, bed->target_id);
5538 if (!htab)
5539 abort ();
5540
5541 init_table.lazy_plt = &elf_x86_64_lazy_plt;
5542 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt;
5543
5544 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt;
5545 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt;
5546
5547 if (ABI_64_P (info->output_bfd))
5548 {
5549 init_table.sframe_lazy_plt = &elf_x86_64_sframe_plt;
5550 init_table.sframe_non_lazy_plt = &elf_x86_64_sframe_non_lazy_plt;
5551 init_table.sframe_lazy_ibt_plt = &elf_x86_64_sframe_plt;
5552 init_table.sframe_non_lazy_ibt_plt = &elf_x86_64_sframe_non_lazy_plt;
5553 }
5554 else
5555 {
5556 /* SFrame is not supported for non AMD64. */
5557 init_table.sframe_lazy_plt = NULL;
5558 init_table.sframe_non_lazy_plt = NULL;
5559 }
5560
5561 if (ABI_64_P (info->output_bfd))
5562 {
5563 init_table.r_info = elf64_r_info;
5564 init_table.r_sym = elf64_r_sym;
5565 }
5566 else
5567 {
5568 init_table.r_info = elf32_r_info;
5569 init_table.r_sym = elf32_r_sym;
5570 }
5571
5572 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table);
5573 }
5574
5575 static void
elf_x86_64_add_glibc_version_dependency(struct elf_find_verdep_info * rinfo)5576 elf_x86_64_add_glibc_version_dependency
5577 (struct elf_find_verdep_info *rinfo)
5578 {
5579 unsigned int i = 0;
5580 const char *version[3] = { NULL, NULL, NULL };
5581 struct elf_x86_link_hash_table *htab;
5582
5583 if (rinfo->info->enable_dt_relr)
5584 {
5585 version[i] = "GLIBC_ABI_DT_RELR";
5586 i++;
5587 }
5588
5589 htab = elf_x86_hash_table (rinfo->info, X86_64_ELF_DATA);
5590 if (htab != NULL && htab->params->mark_plt)
5591 {
5592 version[i] = "GLIBC_2.36";
5593 i++;
5594 }
5595
5596 if (i != 0)
5597 _bfd_elf_link_add_glibc_version_dependency (rinfo, version);
5598 }
5599
5600 static const struct bfd_elf_special_section
5601 elf_x86_64_special_sections[]=
5602 {
5603 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5604 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5605 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE},
5606 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5607 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE},
5608 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE},
5609 { NULL, 0, 0, 0, 0 }
5610 };
5611
5612 #define TARGET_LITTLE_SYM x86_64_elf64_vec
5613 #define TARGET_LITTLE_NAME "elf64-x86-64"
5614 #define ELF_ARCH bfd_arch_i386
5615 #define ELF_TARGET_ID X86_64_ELF_DATA
5616 #define ELF_MACHINE_CODE EM_X86_64
5617 #define ELF_MAXPAGESIZE 0x1000
5618 #define ELF_COMMONPAGESIZE 0x1000
5619
5620 #define elf_backend_can_gc_sections 1
5621 #define elf_backend_can_refcount 1
5622 #define elf_backend_want_got_plt 1
5623 #define elf_backend_plt_readonly 1
5624 #define elf_backend_want_plt_sym 0
5625 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3)
5626 #define elf_backend_rela_normal 1
5627 #define elf_backend_plt_alignment 4
5628 #define elf_backend_caches_rawsize 1
5629 #define elf_backend_dtrel_excludes_plt 1
5630 #define elf_backend_want_dynrelro 1
5631
5632 #define elf_info_to_howto elf_x86_64_info_to_howto
5633
5634 #define bfd_elf64_bfd_copy_private_section_data \
5635 elf_x86_64_copy_private_section_data
5636 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup
5637 #define bfd_elf64_bfd_reloc_name_lookup \
5638 elf_x86_64_reloc_name_lookup
5639
5640 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible
5641 #define elf_backend_always_size_sections elf_x86_64_always_size_sections
5642 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections
5643 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections
5644 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol
5645 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms
5646 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus
5647 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo
5648 #ifdef CORE_HEADER
5649 #define elf_backend_write_core_note elf_x86_64_write_core_note
5650 #endif
5651 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class
5652 #define elf_backend_relocate_section elf_x86_64_relocate_section
5653 #define elf_backend_init_index_section _bfd_elf_init_1_index_section
5654 #define elf_backend_object_p elf64_x86_64_elf_object_p
5655 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab
5656
5657 #define elf_backend_section_from_shdr \
5658 elf_x86_64_section_from_shdr
5659
5660 #define elf_backend_section_from_bfd_section \
5661 elf_x86_64_elf_section_from_bfd_section
5662 #define elf_backend_add_symbol_hook \
5663 elf_x86_64_add_symbol_hook
5664 #define elf_backend_symbol_processing \
5665 elf_x86_64_symbol_processing
5666 #define elf_backend_common_section_index \
5667 elf_x86_64_common_section_index
5668 #define elf_backend_common_section \
5669 elf_x86_64_common_section
5670 #define elf_backend_common_definition \
5671 elf_x86_64_common_definition
5672 #define elf_backend_merge_symbol \
5673 elf_x86_64_merge_symbol
5674 #define elf_backend_special_sections \
5675 elf_x86_64_special_sections
5676 #define elf_backend_section_flags elf_x86_64_section_flags
5677 #define elf_backend_fake_sections elf_x86_64_fake_sections
5678 #define elf_backend_additional_program_headers \
5679 elf_x86_64_additional_program_headers
5680 #define elf_backend_setup_gnu_properties \
5681 elf_x86_64_link_setup_gnu_properties
5682 #define elf_backend_hide_symbol \
5683 _bfd_x86_elf_hide_symbol
5684 #define elf_backend_add_glibc_version_dependency \
5685 elf_x86_64_add_glibc_version_dependency
5686
5687 #undef elf64_bed
5688 #define elf64_bed elf64_x86_64_bed
5689
5690 #include "elf64-target.h"
5691
5692 /* CloudABI support. */
5693
5694 #undef TARGET_LITTLE_SYM
5695 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec
5696 #undef TARGET_LITTLE_NAME
5697 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi"
5698
5699 #undef ELF_OSABI
5700 #define ELF_OSABI ELFOSABI_CLOUDABI
5701
5702 #undef elf64_bed
5703 #define elf64_bed elf64_x86_64_cloudabi_bed
5704
5705 #include "elf64-target.h"
5706
5707 /* FreeBSD support. */
5708
5709 #undef TARGET_LITTLE_SYM
5710 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec
5711 #undef TARGET_LITTLE_NAME
5712 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd"
5713
5714 #undef ELF_OSABI
5715 #define ELF_OSABI ELFOSABI_FREEBSD
5716
5717 #undef elf64_bed
5718 #define elf64_bed elf64_x86_64_fbsd_bed
5719
5720 #include "elf64-target.h"
5721
5722 /* Solaris 2 support. */
5723
5724 #undef TARGET_LITTLE_SYM
5725 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec
5726 #undef TARGET_LITTLE_NAME
5727 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2"
5728
5729 #undef ELF_TARGET_OS
5730 #define ELF_TARGET_OS is_solaris
5731
5732 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE
5733 objects won't be recognized. */
5734 #undef ELF_OSABI
5735
5736 #undef elf64_bed
5737 #define elf64_bed elf64_x86_64_sol2_bed
5738
5739 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte
5740 boundary. */
5741 #undef elf_backend_static_tls_alignment
5742 #define elf_backend_static_tls_alignment 16
5743
5744 /* The Solaris 2 ABI requires a plt symbol on all platforms.
5745
5746 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output
5747 File, p.63. */
5748 #undef elf_backend_want_plt_sym
5749 #define elf_backend_want_plt_sym 1
5750
5751 #undef elf_backend_strtab_flags
5752 #define elf_backend_strtab_flags SHF_STRINGS
5753
5754 static bool
elf64_x86_64_copy_solaris_special_section_fields(const bfd * ibfd ATTRIBUTE_UNUSED,bfd * obfd ATTRIBUTE_UNUSED,const Elf_Internal_Shdr * isection ATTRIBUTE_UNUSED,Elf_Internal_Shdr * osection ATTRIBUTE_UNUSED)5755 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED,
5756 bfd *obfd ATTRIBUTE_UNUSED,
5757 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED,
5758 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED)
5759 {
5760 /* PR 19938: FIXME: Need to add code for setting the sh_info
5761 and sh_link fields of Solaris specific section types. */
5762 return false;
5763 }
5764
5765 #undef elf_backend_copy_special_section_fields
5766 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields
5767
5768 #include "elf64-target.h"
5769
5770 /* Restore defaults. */
5771 #undef ELF_OSABI
5772 #undef elf_backend_static_tls_alignment
5773 #undef elf_backend_want_plt_sym
5774 #define elf_backend_want_plt_sym 0
5775 #undef elf_backend_strtab_flags
5776 #undef elf_backend_copy_special_section_fields
5777
5778 /* 32bit x86-64 support. */
5779
5780 #undef TARGET_LITTLE_SYM
5781 #define TARGET_LITTLE_SYM x86_64_elf32_vec
5782 #undef TARGET_LITTLE_NAME
5783 #define TARGET_LITTLE_NAME "elf32-x86-64"
5784 #undef elf32_bed
5785 #define elf32_bed elf32_x86_64_bed
5786
5787 #undef ELF_ARCH
5788 #define ELF_ARCH bfd_arch_i386
5789
5790 #undef ELF_MACHINE_CODE
5791 #define ELF_MACHINE_CODE EM_X86_64
5792
5793 #undef ELF_TARGET_OS
5794 #undef ELF_OSABI
5795
5796 #define bfd_elf32_bfd_copy_private_section_data \
5797 elf_x86_64_copy_private_section_data
5798 #define bfd_elf32_bfd_reloc_type_lookup \
5799 elf_x86_64_reloc_type_lookup
5800 #define bfd_elf32_bfd_reloc_name_lookup \
5801 elf_x86_64_reloc_name_lookup
5802 #define bfd_elf32_get_synthetic_symtab \
5803 elf_x86_64_get_synthetic_symtab
5804
5805 #undef elf_backend_object_p
5806 #define elf_backend_object_p \
5807 elf32_x86_64_elf_object_p
5808
5809 #undef elf_backend_bfd_from_remote_memory
5810 #define elf_backend_bfd_from_remote_memory \
5811 _bfd_elf32_bfd_from_remote_memory
5812
5813 #undef elf_backend_size_info
5814 #define elf_backend_size_info \
5815 _bfd_elf32_size_info
5816
5817 #include "elf32-target.h"
5818