1 /* X86-64 specific support for ELF 2 Copyright (C) 2000-2015 Free Software Foundation, Inc. 3 Contributed by Jan Hubicka <jh@suse.cz>. 4 5 This file is part of BFD, the Binary File Descriptor library. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 20 MA 02110-1301, USA. */ 21 22 #include "sysdep.h" 23 #include "bfd.h" 24 #include "bfdlink.h" 25 #include "libbfd.h" 26 #include "elf-bfd.h" 27 #include "elf-nacl.h" 28 #include "bfd_stdint.h" 29 #include "objalloc.h" 30 #include "hashtab.h" 31 #include "dwarf2.h" 32 #include "libiberty.h" 33 34 #include "elf/x86-64.h" 35 36 #ifdef CORE_HEADER 37 #include <stdarg.h> 38 #include CORE_HEADER 39 #endif 40 41 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ 42 #define MINUS_ONE (~ (bfd_vma) 0) 43 44 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the 45 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get 46 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE 47 since they are the same. */ 48 49 #define ABI_64_P(abfd) \ 50 (get_elf_backend_data (abfd)->s->elfclass == ELFCLASS64) 51 52 /* The relocation "howto" table. Order of fields: 53 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow, 54 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */ 55 static reloc_howto_type x86_64_elf_howto_table[] = 56 { 57 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont, 58 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000, 59 FALSE), 60 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 61 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE, 62 FALSE), 63 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed, 64 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff, 65 TRUE), 66 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed, 67 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff, 68 FALSE), 69 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed, 70 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff, 71 TRUE), 72 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield, 73 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff, 74 FALSE), 75 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 76 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE, 77 MINUS_ONE, FALSE), 78 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 79 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE, 80 MINUS_ONE, FALSE), 81 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 82 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE, 83 MINUS_ONE, FALSE), 84 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed, 85 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff, 86 0xffffffff, TRUE), 87 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned, 88 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff, 89 FALSE), 90 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed, 91 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff, 92 FALSE), 93 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield, 94 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE), 95 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield, 96 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE), 97 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield, 98 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE), 99 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed, 100 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE), 101 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 102 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE, 103 MINUS_ONE, FALSE), 104 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 105 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE, 106 MINUS_ONE, FALSE), 107 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 108 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE, 109 MINUS_ONE, FALSE), 110 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed, 111 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff, 112 0xffffffff, TRUE), 113 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed, 114 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff, 115 0xffffffff, TRUE), 116 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed, 117 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff, 118 0xffffffff, FALSE), 119 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed, 120 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff, 121 0xffffffff, TRUE), 122 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed, 123 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff, 124 0xffffffff, FALSE), 125 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield, 126 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE, 127 TRUE), 128 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 129 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", 130 FALSE, MINUS_ONE, MINUS_ONE, FALSE), 131 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed, 132 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", 133 FALSE, 0xffffffff, 0xffffffff, TRUE), 134 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed, 135 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE, 136 FALSE), 137 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed, 138 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE, 139 MINUS_ONE, TRUE), 140 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed, 141 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", 142 FALSE, MINUS_ONE, MINUS_ONE, TRUE), 143 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed, 144 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE, 145 MINUS_ONE, FALSE), 146 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed, 147 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE, 148 MINUS_ONE, FALSE), 149 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned, 150 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff, 151 FALSE), 152 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned, 153 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE, 154 FALSE), 155 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0, 156 complain_overflow_bitfield, bfd_elf_generic_reloc, 157 "R_X86_64_GOTPC32_TLSDESC", 158 FALSE, 0xffffffff, 0xffffffff, TRUE), 159 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0, 160 complain_overflow_dont, bfd_elf_generic_reloc, 161 "R_X86_64_TLSDESC_CALL", 162 FALSE, 0, 0, FALSE), 163 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0, 164 complain_overflow_bitfield, bfd_elf_generic_reloc, 165 "R_X86_64_TLSDESC", 166 FALSE, MINUS_ONE, MINUS_ONE, FALSE), 167 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 168 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE, 169 MINUS_ONE, FALSE), 170 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 171 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE, 172 MINUS_ONE, FALSE), 173 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed, 174 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff, 175 TRUE), 176 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed, 177 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff, 178 TRUE), 179 180 /* We have a gap in the reloc numbers here. 181 R_X86_64_standard counts the number up to this point, and 182 R_X86_64_vt_offset is the value to subtract from a reloc type of 183 R_X86_64_GNU_VT* to form an index into this table. */ 184 #define R_X86_64_standard (R_X86_64_PLT32_BND + 1) 185 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard) 186 187 /* GNU extension to record C++ vtable hierarchy. */ 188 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont, 189 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE), 190 191 /* GNU extension to record C++ vtable member usage. */ 192 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont, 193 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0, 194 FALSE), 195 196 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */ 197 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield, 198 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff, 199 FALSE) 200 }; 201 202 #define IS_X86_64_PCREL_TYPE(TYPE) \ 203 ( ((TYPE) == R_X86_64_PC8) \ 204 || ((TYPE) == R_X86_64_PC16) \ 205 || ((TYPE) == R_X86_64_PC32) \ 206 || ((TYPE) == R_X86_64_PC32_BND) \ 207 || ((TYPE) == R_X86_64_PC64)) 208 209 /* Map BFD relocs to the x86_64 elf relocs. */ 210 struct elf_reloc_map 211 { 212 bfd_reloc_code_real_type bfd_reloc_val; 213 unsigned char elf_reloc_val; 214 }; 215 216 static const struct elf_reloc_map x86_64_reloc_map[] = 217 { 218 { BFD_RELOC_NONE, R_X86_64_NONE, }, 219 { BFD_RELOC_64, R_X86_64_64, }, 220 { BFD_RELOC_32_PCREL, R_X86_64_PC32, }, 221 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,}, 222 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,}, 223 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, }, 224 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, }, 225 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, }, 226 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, }, 227 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, }, 228 { BFD_RELOC_32, R_X86_64_32, }, 229 { BFD_RELOC_X86_64_32S, R_X86_64_32S, }, 230 { BFD_RELOC_16, R_X86_64_16, }, 231 { BFD_RELOC_16_PCREL, R_X86_64_PC16, }, 232 { BFD_RELOC_8, R_X86_64_8, }, 233 { BFD_RELOC_8_PCREL, R_X86_64_PC8, }, 234 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, }, 235 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, }, 236 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, }, 237 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, }, 238 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, }, 239 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, }, 240 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, }, 241 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, }, 242 { BFD_RELOC_64_PCREL, R_X86_64_PC64, }, 243 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, }, 244 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, }, 245 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, }, 246 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, }, 247 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, }, 248 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, }, 249 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, }, 250 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, }, 251 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, }, 252 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, }, 253 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, }, 254 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, }, 255 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, }, 256 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND,}, 257 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND,}, 258 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, }, 259 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, }, 260 }; 261 262 static reloc_howto_type * 263 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type) 264 { 265 unsigned i; 266 267 if (r_type == (unsigned int) R_X86_64_32) 268 { 269 if (ABI_64_P (abfd)) 270 i = r_type; 271 else 272 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1; 273 } 274 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT 275 || r_type >= (unsigned int) R_X86_64_max) 276 { 277 if (r_type >= (unsigned int) R_X86_64_standard) 278 { 279 (*_bfd_error_handler) (_("%B: invalid relocation type %d"), 280 abfd, (int) r_type); 281 r_type = R_X86_64_NONE; 282 } 283 i = r_type; 284 } 285 else 286 i = r_type - (unsigned int) R_X86_64_vt_offset; 287 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type); 288 return &x86_64_elf_howto_table[i]; 289 } 290 291 /* Given a BFD reloc type, return a HOWTO structure. */ 292 static reloc_howto_type * 293 elf_x86_64_reloc_type_lookup (bfd *abfd, 294 bfd_reloc_code_real_type code) 295 { 296 unsigned int i; 297 298 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map); 299 i++) 300 { 301 if (x86_64_reloc_map[i].bfd_reloc_val == code) 302 return elf_x86_64_rtype_to_howto (abfd, 303 x86_64_reloc_map[i].elf_reloc_val); 304 } 305 return NULL; 306 } 307 308 static reloc_howto_type * 309 elf_x86_64_reloc_name_lookup (bfd *abfd, 310 const char *r_name) 311 { 312 unsigned int i; 313 314 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0) 315 { 316 /* Get x32 R_X86_64_32. */ 317 reloc_howto_type *reloc 318 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1]; 319 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32); 320 return reloc; 321 } 322 323 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++) 324 if (x86_64_elf_howto_table[i].name != NULL 325 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0) 326 return &x86_64_elf_howto_table[i]; 327 328 return NULL; 329 } 330 331 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */ 332 333 static void 334 elf_x86_64_info_to_howto (bfd *abfd ATTRIBUTE_UNUSED, arelent *cache_ptr, 335 Elf_Internal_Rela *dst) 336 { 337 unsigned r_type; 338 339 r_type = ELF32_R_TYPE (dst->r_info); 340 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type); 341 BFD_ASSERT (r_type == cache_ptr->howto->type); 342 } 343 344 /* Support for core dump NOTE sections. */ 345 static bfd_boolean 346 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) 347 { 348 int offset; 349 size_t size; 350 351 switch (note->descsz) 352 { 353 default: 354 return FALSE; 355 356 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */ 357 /* pr_cursig */ 358 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); 359 360 /* pr_pid */ 361 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24); 362 363 /* pr_reg */ 364 offset = 72; 365 size = 216; 366 367 break; 368 369 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */ 370 /* pr_cursig */ 371 elf_tdata (abfd)->core->signal 372 = bfd_get_16 (abfd, note->descdata + 12); 373 374 /* pr_pid */ 375 elf_tdata (abfd)->core->lwpid 376 = bfd_get_32 (abfd, note->descdata + 32); 377 378 /* pr_reg */ 379 offset = 112; 380 size = 216; 381 382 break; 383 } 384 385 /* Make a ".reg/999" section. */ 386 return _bfd_elfcore_make_pseudosection (abfd, ".reg", 387 size, note->descpos + offset); 388 } 389 390 static bfd_boolean 391 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) 392 { 393 switch (note->descsz) 394 { 395 default: 396 return FALSE; 397 398 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */ 399 elf_tdata (abfd)->core->pid 400 = bfd_get_32 (abfd, note->descdata + 12); 401 elf_tdata (abfd)->core->program 402 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); 403 elf_tdata (abfd)->core->command 404 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); 405 break; 406 407 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */ 408 elf_tdata (abfd)->core->pid 409 = bfd_get_32 (abfd, note->descdata + 24); 410 elf_tdata (abfd)->core->program 411 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16); 412 elf_tdata (abfd)->core->command 413 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80); 414 } 415 416 /* Note that for some reason, a spurious space is tacked 417 onto the end of the args in some (at least one anyway) 418 implementations, so strip it off if it exists. */ 419 420 { 421 char *command = elf_tdata (abfd)->core->command; 422 int n = strlen (command); 423 424 if (0 < n && command[n - 1] == ' ') 425 command[n - 1] = '\0'; 426 } 427 428 return TRUE; 429 } 430 431 #ifdef CORE_HEADER 432 static char * 433 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz, 434 int note_type, ...) 435 { 436 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 437 va_list ap; 438 const char *fname, *psargs; 439 long pid; 440 int cursig; 441 const void *gregs; 442 443 switch (note_type) 444 { 445 default: 446 return NULL; 447 448 case NT_PRPSINFO: 449 va_start (ap, note_type); 450 fname = va_arg (ap, const char *); 451 psargs = va_arg (ap, const char *); 452 va_end (ap); 453 454 if (bed->s->elfclass == ELFCLASS32) 455 { 456 prpsinfo32_t data; 457 memset (&data, 0, sizeof (data)); 458 strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); 459 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); 460 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 461 &data, sizeof (data)); 462 } 463 else 464 { 465 prpsinfo64_t data; 466 memset (&data, 0, sizeof (data)); 467 strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); 468 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); 469 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 470 &data, sizeof (data)); 471 } 472 /* NOTREACHED */ 473 474 case NT_PRSTATUS: 475 va_start (ap, note_type); 476 pid = va_arg (ap, long); 477 cursig = va_arg (ap, int); 478 gregs = va_arg (ap, const void *); 479 va_end (ap); 480 481 if (bed->s->elfclass == ELFCLASS32) 482 { 483 if (bed->elf_machine_code == EM_X86_64) 484 { 485 prstatusx32_t prstat; 486 memset (&prstat, 0, sizeof (prstat)); 487 prstat.pr_pid = pid; 488 prstat.pr_cursig = cursig; 489 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 490 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 491 &prstat, sizeof (prstat)); 492 } 493 else 494 { 495 prstatus32_t prstat; 496 memset (&prstat, 0, sizeof (prstat)); 497 prstat.pr_pid = pid; 498 prstat.pr_cursig = cursig; 499 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 500 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 501 &prstat, sizeof (prstat)); 502 } 503 } 504 else 505 { 506 prstatus64_t prstat; 507 memset (&prstat, 0, sizeof (prstat)); 508 prstat.pr_pid = pid; 509 prstat.pr_cursig = cursig; 510 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 511 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 512 &prstat, sizeof (prstat)); 513 } 514 } 515 /* NOTREACHED */ 516 } 517 #endif 518 519 /* Functions for the x86-64 ELF linker. */ 520 521 /* The name of the dynamic interpreter. This is put in the .interp 522 section. */ 523 524 #define ELF64_DYNAMIC_INTERPRETER "/lib/ld64.so.1" 525 #define ELF32_DYNAMIC_INTERPRETER "/lib/ldx32.so.1" 526 527 /* If ELIMINATE_COPY_RELOCS is non-zero, the linker will try to avoid 528 copying dynamic variables from a shared lib into an app's dynbss 529 section, and instead use a dynamic relocation to point into the 530 shared lib. */ 531 #define ELIMINATE_COPY_RELOCS 1 532 533 /* The size in bytes of an entry in the global offset table. */ 534 535 #define GOT_ENTRY_SIZE 8 536 537 /* The size in bytes of an entry in the procedure linkage table. */ 538 539 #define PLT_ENTRY_SIZE 16 540 541 /* The first entry in a procedure linkage table looks like this. See the 542 SVR4 ABI i386 supplement and the x86-64 ABI to see how this works. */ 543 544 static const bfd_byte elf_x86_64_plt0_entry[PLT_ENTRY_SIZE] = 545 { 546 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 547 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */ 548 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */ 549 }; 550 551 /* Subsequent entries in a procedure linkage table look like this. */ 552 553 static const bfd_byte elf_x86_64_plt_entry[PLT_ENTRY_SIZE] = 554 { 555 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 556 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 557 0x68, /* pushq immediate */ 558 0, 0, 0, 0, /* replaced with index into relocation table. */ 559 0xe9, /* jmp relative */ 560 0, 0, 0, 0 /* replaced with offset to start of .plt0. */ 561 }; 562 563 /* The first entry in a procedure linkage table with BND relocations 564 like this. */ 565 566 static const bfd_byte elf_x86_64_bnd_plt0_entry[PLT_ENTRY_SIZE] = 567 { 568 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 569 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */ 570 0x0f, 0x1f, 0 /* nopl (%rax) */ 571 }; 572 573 /* Subsequent entries for legacy branches in a procedure linkage table 574 with BND relocations look like this. */ 575 576 static const bfd_byte elf_x86_64_legacy_plt_entry[PLT_ENTRY_SIZE] = 577 { 578 0x68, 0, 0, 0, 0, /* pushq immediate */ 579 0xe9, 0, 0, 0, 0, /* jmpq relative */ 580 0x66, 0x0f, 0x1f, 0x44, 0, 0 /* nopw (%rax,%rax,1) */ 581 }; 582 583 /* Subsequent entries for branches with BND prefx in a procedure linkage 584 table with BND relocations look like this. */ 585 586 static const bfd_byte elf_x86_64_bnd_plt_entry[PLT_ENTRY_SIZE] = 587 { 588 0x68, 0, 0, 0, 0, /* pushq immediate */ 589 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ 590 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ 591 }; 592 593 /* Entries for legacy branches in the second procedure linkage table 594 look like this. */ 595 596 static const bfd_byte elf_x86_64_legacy_plt2_entry[8] = 597 { 598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 600 0x66, 0x90 /* xchg %ax,%ax */ 601 }; 602 603 /* Entries for branches with BND prefix in the second procedure linkage 604 table look like this. */ 605 606 static const bfd_byte elf_x86_64_bnd_plt2_entry[8] = 607 { 608 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ 609 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 610 0x90 /* nop */ 611 }; 612 613 /* .eh_frame covering the .plt section. */ 614 615 static const bfd_byte elf_x86_64_eh_frame_plt[] = 616 { 617 #define PLT_CIE_LENGTH 20 618 #define PLT_FDE_LENGTH 36 619 #define PLT_FDE_START_OFFSET 4 + PLT_CIE_LENGTH + 8 620 #define PLT_FDE_LEN_OFFSET 4 + PLT_CIE_LENGTH + 12 621 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 622 0, 0, 0, 0, /* CIE ID */ 623 1, /* CIE version */ 624 'z', 'R', 0, /* Augmentation string */ 625 1, /* Code alignment factor */ 626 0x78, /* Data alignment factor */ 627 16, /* Return address column */ 628 1, /* Augmentation size */ 629 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 630 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 631 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 632 DW_CFA_nop, DW_CFA_nop, 633 634 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 635 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 636 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 637 0, 0, 0, 0, /* .plt size goes here */ 638 0, /* Augmentation size */ 639 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 640 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 641 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 642 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 643 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 644 11, /* Block length */ 645 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 646 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 647 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge, 648 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 649 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 650 }; 651 652 /* Architecture-specific backend data for x86-64. */ 653 654 struct elf_x86_64_backend_data 655 { 656 /* Templates for the initial PLT entry and for subsequent entries. */ 657 const bfd_byte *plt0_entry; 658 const bfd_byte *plt_entry; 659 unsigned int plt_entry_size; /* Size of each PLT entry. */ 660 661 /* Offsets into plt0_entry that are to be replaced with GOT[1] and GOT[2]. */ 662 unsigned int plt0_got1_offset; 663 unsigned int plt0_got2_offset; 664 665 /* Offset of the end of the PC-relative instruction containing 666 plt0_got2_offset. */ 667 unsigned int plt0_got2_insn_end; 668 669 /* Offsets into plt_entry that are to be replaced with... */ 670 unsigned int plt_got_offset; /* ... address of this symbol in .got. */ 671 unsigned int plt_reloc_offset; /* ... offset into relocation table. */ 672 unsigned int plt_plt_offset; /* ... offset to start of .plt. */ 673 674 /* Length of the PC-relative instruction containing plt_got_offset. */ 675 unsigned int plt_got_insn_size; 676 677 /* Offset of the end of the PC-relative jump to plt0_entry. */ 678 unsigned int plt_plt_insn_end; 679 680 /* Offset into plt_entry where the initial value of the GOT entry points. */ 681 unsigned int plt_lazy_offset; 682 683 /* .eh_frame covering the .plt section. */ 684 const bfd_byte *eh_frame_plt; 685 unsigned int eh_frame_plt_size; 686 }; 687 688 #define get_elf_x86_64_arch_data(bed) \ 689 ((const struct elf_x86_64_backend_data *) (bed)->arch_data) 690 691 #define get_elf_x86_64_backend_data(abfd) \ 692 get_elf_x86_64_arch_data (get_elf_backend_data (abfd)) 693 694 #define GET_PLT_ENTRY_SIZE(abfd) \ 695 get_elf_x86_64_backend_data (abfd)->plt_entry_size 696 697 /* These are the standard parameters. */ 698 static const struct elf_x86_64_backend_data elf_x86_64_arch_bed = 699 { 700 elf_x86_64_plt0_entry, /* plt0_entry */ 701 elf_x86_64_plt_entry, /* plt_entry */ 702 sizeof (elf_x86_64_plt_entry), /* plt_entry_size */ 703 2, /* plt0_got1_offset */ 704 8, /* plt0_got2_offset */ 705 12, /* plt0_got2_insn_end */ 706 2, /* plt_got_offset */ 707 7, /* plt_reloc_offset */ 708 12, /* plt_plt_offset */ 709 6, /* plt_got_insn_size */ 710 PLT_ENTRY_SIZE, /* plt_plt_insn_end */ 711 6, /* plt_lazy_offset */ 712 elf_x86_64_eh_frame_plt, /* eh_frame_plt */ 713 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ 714 }; 715 716 static const struct elf_x86_64_backend_data elf_x86_64_bnd_arch_bed = 717 { 718 elf_x86_64_bnd_plt0_entry, /* plt0_entry */ 719 elf_x86_64_bnd_plt_entry, /* plt_entry */ 720 sizeof (elf_x86_64_bnd_plt_entry), /* plt_entry_size */ 721 2, /* plt0_got1_offset */ 722 1+8, /* plt0_got2_offset */ 723 1+12, /* plt0_got2_insn_end */ 724 1+2, /* plt_got_offset */ 725 1, /* plt_reloc_offset */ 726 7, /* plt_plt_offset */ 727 1+6, /* plt_got_insn_size */ 728 11, /* plt_plt_insn_end */ 729 0, /* plt_lazy_offset */ 730 elf_x86_64_eh_frame_plt, /* eh_frame_plt */ 731 sizeof (elf_x86_64_eh_frame_plt), /* eh_frame_plt_size */ 732 }; 733 734 #define elf_backend_arch_data &elf_x86_64_arch_bed 735 736 /* x86-64 ELF linker hash entry. */ 737 738 struct elf_x86_64_link_hash_entry 739 { 740 struct elf_link_hash_entry elf; 741 742 /* Track dynamic relocs copied for this symbol. */ 743 struct elf_dyn_relocs *dyn_relocs; 744 745 #define GOT_UNKNOWN 0 746 #define GOT_NORMAL 1 747 #define GOT_TLS_GD 2 748 #define GOT_TLS_IE 3 749 #define GOT_TLS_GDESC 4 750 #define GOT_TLS_GD_BOTH_P(type) \ 751 ((type) == (GOT_TLS_GD | GOT_TLS_GDESC)) 752 #define GOT_TLS_GD_P(type) \ 753 ((type) == GOT_TLS_GD || GOT_TLS_GD_BOTH_P (type)) 754 #define GOT_TLS_GDESC_P(type) \ 755 ((type) == GOT_TLS_GDESC || GOT_TLS_GD_BOTH_P (type)) 756 #define GOT_TLS_GD_ANY_P(type) \ 757 (GOT_TLS_GD_P (type) || GOT_TLS_GDESC_P (type)) 758 unsigned char tls_type; 759 760 /* TRUE if a weak symbol with a real definition needs a copy reloc. 761 When there is a weak symbol with a real definition, the processor 762 independent code will have arranged for us to see the real 763 definition first. We need to copy the needs_copy bit from the 764 real definition and check it when allowing copy reloc in PIE. */ 765 unsigned int needs_copy : 1; 766 767 /* TRUE if symbol has at least one BND relocation. */ 768 unsigned int has_bnd_reloc : 1; 769 770 /* Information about the GOT PLT entry. Filled when there are both 771 GOT and PLT relocations against the same function. */ 772 union gotplt_union plt_got; 773 774 /* Information about the second PLT entry. Filled when has_bnd_reloc is 775 set. */ 776 union gotplt_union plt_bnd; 777 778 /* Offset of the GOTPLT entry reserved for the TLS descriptor, 779 starting at the end of the jump table. */ 780 bfd_vma tlsdesc_got; 781 }; 782 783 #define elf_x86_64_hash_entry(ent) \ 784 ((struct elf_x86_64_link_hash_entry *)(ent)) 785 786 struct elf_x86_64_obj_tdata 787 { 788 struct elf_obj_tdata root; 789 790 /* tls_type for each local got entry. */ 791 char *local_got_tls_type; 792 793 /* GOTPLT entries for TLS descriptors. */ 794 bfd_vma *local_tlsdesc_gotent; 795 }; 796 797 #define elf_x86_64_tdata(abfd) \ 798 ((struct elf_x86_64_obj_tdata *) (abfd)->tdata.any) 799 800 #define elf_x86_64_local_got_tls_type(abfd) \ 801 (elf_x86_64_tdata (abfd)->local_got_tls_type) 802 803 #define elf_x86_64_local_tlsdesc_gotent(abfd) \ 804 (elf_x86_64_tdata (abfd)->local_tlsdesc_gotent) 805 806 #define is_x86_64_elf(bfd) \ 807 (bfd_get_flavour (bfd) == bfd_target_elf_flavour \ 808 && elf_tdata (bfd) != NULL \ 809 && elf_object_id (bfd) == X86_64_ELF_DATA) 810 811 static bfd_boolean 812 elf_x86_64_mkobject (bfd *abfd) 813 { 814 return bfd_elf_allocate_object (abfd, sizeof (struct elf_x86_64_obj_tdata), 815 X86_64_ELF_DATA); 816 } 817 818 /* x86-64 ELF linker hash table. */ 819 820 struct elf_x86_64_link_hash_table 821 { 822 struct elf_link_hash_table elf; 823 824 /* Short-cuts to get to dynamic linker sections. */ 825 asection *sdynbss; 826 asection *srelbss; 827 asection *plt_eh_frame; 828 asection *plt_bnd; 829 asection *plt_got; 830 831 union 832 { 833 bfd_signed_vma refcount; 834 bfd_vma offset; 835 } tls_ld_got; 836 837 /* The amount of space used by the jump slots in the GOT. */ 838 bfd_vma sgotplt_jump_table_size; 839 840 /* Small local sym cache. */ 841 struct sym_cache sym_cache; 842 843 bfd_vma (*r_info) (bfd_vma, bfd_vma); 844 bfd_vma (*r_sym) (bfd_vma); 845 unsigned int pointer_r_type; 846 const char *dynamic_interpreter; 847 int dynamic_interpreter_size; 848 849 /* _TLS_MODULE_BASE_ symbol. */ 850 struct bfd_link_hash_entry *tls_module_base; 851 852 /* Used by local STT_GNU_IFUNC symbols. */ 853 htab_t loc_hash_table; 854 void * loc_hash_memory; 855 856 /* The offset into splt of the PLT entry for the TLS descriptor 857 resolver. Special values are 0, if not necessary (or not found 858 to be necessary yet), and -1 if needed but not determined 859 yet. */ 860 bfd_vma tlsdesc_plt; 861 /* The offset into sgot of the GOT entry used by the PLT entry 862 above. */ 863 bfd_vma tlsdesc_got; 864 865 /* The index of the next R_X86_64_JUMP_SLOT entry in .rela.plt. */ 866 bfd_vma next_jump_slot_index; 867 /* The index of the next R_X86_64_IRELATIVE entry in .rela.plt. */ 868 bfd_vma next_irelative_index; 869 }; 870 871 /* Get the x86-64 ELF linker hash table from a link_info structure. */ 872 873 #define elf_x86_64_hash_table(p) \ 874 (elf_hash_table_id ((struct elf_link_hash_table *) ((p)->hash)) \ 875 == X86_64_ELF_DATA ? ((struct elf_x86_64_link_hash_table *) ((p)->hash)) : NULL) 876 877 #define elf_x86_64_compute_jump_table_size(htab) \ 878 ((htab)->elf.srelplt->reloc_count * GOT_ENTRY_SIZE) 879 880 /* Create an entry in an x86-64 ELF linker hash table. */ 881 882 static struct bfd_hash_entry * 883 elf_x86_64_link_hash_newfunc (struct bfd_hash_entry *entry, 884 struct bfd_hash_table *table, 885 const char *string) 886 { 887 /* Allocate the structure if it has not already been allocated by a 888 subclass. */ 889 if (entry == NULL) 890 { 891 entry = (struct bfd_hash_entry *) 892 bfd_hash_allocate (table, 893 sizeof (struct elf_x86_64_link_hash_entry)); 894 if (entry == NULL) 895 return entry; 896 } 897 898 /* Call the allocation method of the superclass. */ 899 entry = _bfd_elf_link_hash_newfunc (entry, table, string); 900 if (entry != NULL) 901 { 902 struct elf_x86_64_link_hash_entry *eh; 903 904 eh = (struct elf_x86_64_link_hash_entry *) entry; 905 eh->dyn_relocs = NULL; 906 eh->tls_type = GOT_UNKNOWN; 907 eh->needs_copy = 0; 908 eh->has_bnd_reloc = 0; 909 eh->plt_bnd.offset = (bfd_vma) -1; 910 eh->plt_got.offset = (bfd_vma) -1; 911 eh->tlsdesc_got = (bfd_vma) -1; 912 } 913 914 return entry; 915 } 916 917 /* Compute a hash of a local hash entry. We use elf_link_hash_entry 918 for local symbol so that we can handle local STT_GNU_IFUNC symbols 919 as global symbol. We reuse indx and dynstr_index for local symbol 920 hash since they aren't used by global symbols in this backend. */ 921 922 static hashval_t 923 elf_x86_64_local_htab_hash (const void *ptr) 924 { 925 struct elf_link_hash_entry *h 926 = (struct elf_link_hash_entry *) ptr; 927 return ELF_LOCAL_SYMBOL_HASH (h->indx, h->dynstr_index); 928 } 929 930 /* Compare local hash entries. */ 931 932 static int 933 elf_x86_64_local_htab_eq (const void *ptr1, const void *ptr2) 934 { 935 struct elf_link_hash_entry *h1 936 = (struct elf_link_hash_entry *) ptr1; 937 struct elf_link_hash_entry *h2 938 = (struct elf_link_hash_entry *) ptr2; 939 940 return h1->indx == h2->indx && h1->dynstr_index == h2->dynstr_index; 941 } 942 943 /* Find and/or create a hash entry for local symbol. */ 944 945 static struct elf_link_hash_entry * 946 elf_x86_64_get_local_sym_hash (struct elf_x86_64_link_hash_table *htab, 947 bfd *abfd, const Elf_Internal_Rela *rel, 948 bfd_boolean create) 949 { 950 struct elf_x86_64_link_hash_entry e, *ret; 951 asection *sec = abfd->sections; 952 hashval_t h = ELF_LOCAL_SYMBOL_HASH (sec->id, 953 htab->r_sym (rel->r_info)); 954 void **slot; 955 956 e.elf.indx = sec->id; 957 e.elf.dynstr_index = htab->r_sym (rel->r_info); 958 slot = htab_find_slot_with_hash (htab->loc_hash_table, &e, h, 959 create ? INSERT : NO_INSERT); 960 961 if (!slot) 962 return NULL; 963 964 if (*slot) 965 { 966 ret = (struct elf_x86_64_link_hash_entry *) *slot; 967 return &ret->elf; 968 } 969 970 ret = (struct elf_x86_64_link_hash_entry *) 971 objalloc_alloc ((struct objalloc *) htab->loc_hash_memory, 972 sizeof (struct elf_x86_64_link_hash_entry)); 973 if (ret) 974 { 975 memset (ret, 0, sizeof (*ret)); 976 ret->elf.indx = sec->id; 977 ret->elf.dynstr_index = htab->r_sym (rel->r_info); 978 ret->elf.dynindx = -1; 979 ret->plt_got.offset = (bfd_vma) -1; 980 *slot = ret; 981 } 982 return &ret->elf; 983 } 984 985 /* Destroy an X86-64 ELF linker hash table. */ 986 987 static void 988 elf_x86_64_link_hash_table_free (bfd *obfd) 989 { 990 struct elf_x86_64_link_hash_table *htab 991 = (struct elf_x86_64_link_hash_table *) obfd->link.hash; 992 993 if (htab->loc_hash_table) 994 htab_delete (htab->loc_hash_table); 995 if (htab->loc_hash_memory) 996 objalloc_free ((struct objalloc *) htab->loc_hash_memory); 997 _bfd_elf_link_hash_table_free (obfd); 998 } 999 1000 /* Create an X86-64 ELF linker hash table. */ 1001 1002 static struct bfd_link_hash_table * 1003 elf_x86_64_link_hash_table_create (bfd *abfd) 1004 { 1005 struct elf_x86_64_link_hash_table *ret; 1006 bfd_size_type amt = sizeof (struct elf_x86_64_link_hash_table); 1007 1008 ret = (struct elf_x86_64_link_hash_table *) bfd_zmalloc (amt); 1009 if (ret == NULL) 1010 return NULL; 1011 1012 if (!_bfd_elf_link_hash_table_init (&ret->elf, abfd, 1013 elf_x86_64_link_hash_newfunc, 1014 sizeof (struct elf_x86_64_link_hash_entry), 1015 X86_64_ELF_DATA)) 1016 { 1017 free (ret); 1018 return NULL; 1019 } 1020 1021 if (ABI_64_P (abfd)) 1022 { 1023 ret->r_info = elf64_r_info; 1024 ret->r_sym = elf64_r_sym; 1025 ret->pointer_r_type = R_X86_64_64; 1026 ret->dynamic_interpreter = ELF64_DYNAMIC_INTERPRETER; 1027 ret->dynamic_interpreter_size = sizeof ELF64_DYNAMIC_INTERPRETER; 1028 } 1029 else 1030 { 1031 ret->r_info = elf32_r_info; 1032 ret->r_sym = elf32_r_sym; 1033 ret->pointer_r_type = R_X86_64_32; 1034 ret->dynamic_interpreter = ELF32_DYNAMIC_INTERPRETER; 1035 ret->dynamic_interpreter_size = sizeof ELF32_DYNAMIC_INTERPRETER; 1036 } 1037 1038 ret->loc_hash_table = htab_try_create (1024, 1039 elf_x86_64_local_htab_hash, 1040 elf_x86_64_local_htab_eq, 1041 NULL); 1042 ret->loc_hash_memory = objalloc_create (); 1043 if (!ret->loc_hash_table || !ret->loc_hash_memory) 1044 { 1045 elf_x86_64_link_hash_table_free (abfd); 1046 return NULL; 1047 } 1048 ret->elf.root.hash_table_free = elf_x86_64_link_hash_table_free; 1049 1050 return &ret->elf.root; 1051 } 1052 1053 /* Create .plt, .rela.plt, .got, .got.plt, .rela.got, .dynbss, and 1054 .rela.bss sections in DYNOBJ, and set up shortcuts to them in our 1055 hash table. */ 1056 1057 static bfd_boolean 1058 elf_x86_64_create_dynamic_sections (bfd *dynobj, 1059 struct bfd_link_info *info) 1060 { 1061 struct elf_x86_64_link_hash_table *htab; 1062 1063 if (!_bfd_elf_create_dynamic_sections (dynobj, info)) 1064 return FALSE; 1065 1066 htab = elf_x86_64_hash_table (info); 1067 if (htab == NULL) 1068 return FALSE; 1069 1070 htab->sdynbss = bfd_get_linker_section (dynobj, ".dynbss"); 1071 if (!htab->sdynbss) 1072 abort (); 1073 1074 if (info->executable) 1075 { 1076 /* Always allow copy relocs for building executables. */ 1077 asection *s = bfd_get_linker_section (dynobj, ".rela.bss"); 1078 if (s == NULL) 1079 { 1080 const struct elf_backend_data *bed = get_elf_backend_data (dynobj); 1081 s = bfd_make_section_anyway_with_flags (dynobj, 1082 ".rela.bss", 1083 (bed->dynamic_sec_flags 1084 | SEC_READONLY)); 1085 if (s == NULL 1086 || ! bfd_set_section_alignment (dynobj, s, 1087 bed->s->log_file_align)) 1088 return FALSE; 1089 } 1090 htab->srelbss = s; 1091 } 1092 1093 if (!info->no_ld_generated_unwind_info 1094 && htab->plt_eh_frame == NULL 1095 && htab->elf.splt != NULL) 1096 { 1097 flagword flags = (SEC_ALLOC | SEC_LOAD | SEC_READONLY 1098 | SEC_HAS_CONTENTS | SEC_IN_MEMORY 1099 | SEC_LINKER_CREATED); 1100 htab->plt_eh_frame 1101 = bfd_make_section_anyway_with_flags (dynobj, ".eh_frame", flags); 1102 if (htab->plt_eh_frame == NULL 1103 || !bfd_set_section_alignment (dynobj, htab->plt_eh_frame, 3)) 1104 return FALSE; 1105 } 1106 return TRUE; 1107 } 1108 1109 /* Copy the extra info we tack onto an elf_link_hash_entry. */ 1110 1111 static void 1112 elf_x86_64_copy_indirect_symbol (struct bfd_link_info *info, 1113 struct elf_link_hash_entry *dir, 1114 struct elf_link_hash_entry *ind) 1115 { 1116 struct elf_x86_64_link_hash_entry *edir, *eind; 1117 1118 edir = (struct elf_x86_64_link_hash_entry *) dir; 1119 eind = (struct elf_x86_64_link_hash_entry *) ind; 1120 1121 if (!edir->has_bnd_reloc) 1122 edir->has_bnd_reloc = eind->has_bnd_reloc; 1123 1124 if (eind->dyn_relocs != NULL) 1125 { 1126 if (edir->dyn_relocs != NULL) 1127 { 1128 struct elf_dyn_relocs **pp; 1129 struct elf_dyn_relocs *p; 1130 1131 /* Add reloc counts against the indirect sym to the direct sym 1132 list. Merge any entries against the same section. */ 1133 for (pp = &eind->dyn_relocs; (p = *pp) != NULL; ) 1134 { 1135 struct elf_dyn_relocs *q; 1136 1137 for (q = edir->dyn_relocs; q != NULL; q = q->next) 1138 if (q->sec == p->sec) 1139 { 1140 q->pc_count += p->pc_count; 1141 q->count += p->count; 1142 *pp = p->next; 1143 break; 1144 } 1145 if (q == NULL) 1146 pp = &p->next; 1147 } 1148 *pp = edir->dyn_relocs; 1149 } 1150 1151 edir->dyn_relocs = eind->dyn_relocs; 1152 eind->dyn_relocs = NULL; 1153 } 1154 1155 if (ind->root.type == bfd_link_hash_indirect 1156 && dir->got.refcount <= 0) 1157 { 1158 edir->tls_type = eind->tls_type; 1159 eind->tls_type = GOT_UNKNOWN; 1160 } 1161 1162 if (ELIMINATE_COPY_RELOCS 1163 && ind->root.type != bfd_link_hash_indirect 1164 && dir->dynamic_adjusted) 1165 { 1166 /* If called to transfer flags for a weakdef during processing 1167 of elf_adjust_dynamic_symbol, don't copy non_got_ref. 1168 We clear it ourselves for ELIMINATE_COPY_RELOCS. */ 1169 dir->ref_dynamic |= ind->ref_dynamic; 1170 dir->ref_regular |= ind->ref_regular; 1171 dir->ref_regular_nonweak |= ind->ref_regular_nonweak; 1172 dir->needs_plt |= ind->needs_plt; 1173 dir->pointer_equality_needed |= ind->pointer_equality_needed; 1174 } 1175 else 1176 _bfd_elf_link_hash_copy_indirect (info, dir, ind); 1177 } 1178 1179 static bfd_boolean 1180 elf64_x86_64_elf_object_p (bfd *abfd) 1181 { 1182 /* Set the right machine number for an x86-64 elf64 file. */ 1183 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); 1184 return TRUE; 1185 } 1186 1187 static bfd_boolean 1188 elf32_x86_64_elf_object_p (bfd *abfd) 1189 { 1190 /* Set the right machine number for an x86-64 elf32 file. */ 1191 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); 1192 return TRUE; 1193 } 1194 1195 /* Return TRUE if the TLS access code sequence support transition 1196 from R_TYPE. */ 1197 1198 static bfd_boolean 1199 elf_x86_64_check_tls_transition (bfd *abfd, 1200 struct bfd_link_info *info, 1201 asection *sec, 1202 bfd_byte *contents, 1203 Elf_Internal_Shdr *symtab_hdr, 1204 struct elf_link_hash_entry **sym_hashes, 1205 unsigned int r_type, 1206 const Elf_Internal_Rela *rel, 1207 const Elf_Internal_Rela *relend) 1208 { 1209 unsigned int val; 1210 unsigned long r_symndx; 1211 bfd_boolean largepic = FALSE; 1212 struct elf_link_hash_entry *h; 1213 bfd_vma offset; 1214 struct elf_x86_64_link_hash_table *htab; 1215 1216 /* Get the section contents. */ 1217 if (contents == NULL) 1218 { 1219 if (elf_section_data (sec)->this_hdr.contents != NULL) 1220 contents = elf_section_data (sec)->this_hdr.contents; 1221 else 1222 { 1223 /* FIXME: How to better handle error condition? */ 1224 if (!bfd_malloc_and_get_section (abfd, sec, &contents)) 1225 return FALSE; 1226 1227 /* Cache the section contents for elf_link_input_bfd. */ 1228 elf_section_data (sec)->this_hdr.contents = contents; 1229 } 1230 } 1231 1232 htab = elf_x86_64_hash_table (info); 1233 offset = rel->r_offset; 1234 switch (r_type) 1235 { 1236 case R_X86_64_TLSGD: 1237 case R_X86_64_TLSLD: 1238 if ((rel + 1) >= relend) 1239 return FALSE; 1240 1241 if (r_type == R_X86_64_TLSGD) 1242 { 1243 /* Check transition from GD access model. For 64bit, only 1244 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 1245 .word 0x6666; rex64; call __tls_get_addr 1246 can transit to different access model. For 32bit, only 1247 leaq foo@tlsgd(%rip), %rdi 1248 .word 0x6666; rex64; call __tls_get_addr 1249 can transit to different access model. For largepic 1250 we also support: 1251 leaq foo@tlsgd(%rip), %rdi 1252 movabsq $__tls_get_addr@pltoff, %rax 1253 addq $rbx, %rax 1254 call *%rax. */ 1255 1256 static const unsigned char call[] = { 0x66, 0x66, 0x48, 0xe8 }; 1257 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; 1258 1259 if ((offset + 12) > sec->size) 1260 return FALSE; 1261 1262 if (memcmp (contents + offset + 4, call, 4) != 0) 1263 { 1264 if (!ABI_64_P (abfd) 1265 || (offset + 19) > sec->size 1266 || offset < 3 1267 || memcmp (contents + offset - 3, leaq + 1, 3) != 0 1268 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 1269 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) 1270 != 0) 1271 return FALSE; 1272 largepic = TRUE; 1273 } 1274 else if (ABI_64_P (abfd)) 1275 { 1276 if (offset < 4 1277 || memcmp (contents + offset - 4, leaq, 4) != 0) 1278 return FALSE; 1279 } 1280 else 1281 { 1282 if (offset < 3 1283 || memcmp (contents + offset - 3, leaq + 1, 3) != 0) 1284 return FALSE; 1285 } 1286 } 1287 else 1288 { 1289 /* Check transition from LD access model. Only 1290 leaq foo@tlsld(%rip), %rdi; 1291 call __tls_get_addr 1292 can transit to different access model. For largepic 1293 we also support: 1294 leaq foo@tlsld(%rip), %rdi 1295 movabsq $__tls_get_addr@pltoff, %rax 1296 addq $rbx, %rax 1297 call *%rax. */ 1298 1299 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; 1300 1301 if (offset < 3 || (offset + 9) > sec->size) 1302 return FALSE; 1303 1304 if (memcmp (contents + offset - 3, lea, 3) != 0) 1305 return FALSE; 1306 1307 if (0xe8 != *(contents + offset + 4)) 1308 { 1309 if (!ABI_64_P (abfd) 1310 || (offset + 19) > sec->size 1311 || memcmp (contents + offset + 4, "\x48\xb8", 2) != 0 1312 || memcmp (contents + offset + 14, "\x48\x01\xd8\xff\xd0", 5) 1313 != 0) 1314 return FALSE; 1315 largepic = TRUE; 1316 } 1317 } 1318 1319 r_symndx = htab->r_sym (rel[1].r_info); 1320 if (r_symndx < symtab_hdr->sh_info) 1321 return FALSE; 1322 1323 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 1324 /* Use strncmp to check __tls_get_addr since __tls_get_addr 1325 may be versioned. */ 1326 return (h != NULL 1327 && h->root.root.string != NULL 1328 && (largepic 1329 ? ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLTOFF64 1330 : (ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PC32 1331 || ELF32_R_TYPE (rel[1].r_info) == R_X86_64_PLT32)) 1332 && (strncmp (h->root.root.string, 1333 "__tls_get_addr", 14) == 0)); 1334 1335 case R_X86_64_GOTTPOFF: 1336 /* Check transition from IE access model: 1337 mov foo@gottpoff(%rip), %reg 1338 add foo@gottpoff(%rip), %reg 1339 */ 1340 1341 /* Check REX prefix first. */ 1342 if (offset >= 3 && (offset + 4) <= sec->size) 1343 { 1344 val = bfd_get_8 (abfd, contents + offset - 3); 1345 if (val != 0x48 && val != 0x4c) 1346 { 1347 /* X32 may have 0x44 REX prefix or no REX prefix. */ 1348 if (ABI_64_P (abfd)) 1349 return FALSE; 1350 } 1351 } 1352 else 1353 { 1354 /* X32 may not have any REX prefix. */ 1355 if (ABI_64_P (abfd)) 1356 return FALSE; 1357 if (offset < 2 || (offset + 3) > sec->size) 1358 return FALSE; 1359 } 1360 1361 val = bfd_get_8 (abfd, contents + offset - 2); 1362 if (val != 0x8b && val != 0x03) 1363 return FALSE; 1364 1365 val = bfd_get_8 (abfd, contents + offset - 1); 1366 return (val & 0xc7) == 5; 1367 1368 case R_X86_64_GOTPC32_TLSDESC: 1369 /* Check transition from GDesc access model: 1370 leaq x@tlsdesc(%rip), %rax 1371 1372 Make sure it's a leaq adding rip to a 32-bit offset 1373 into any register, although it's probably almost always 1374 going to be rax. */ 1375 1376 if (offset < 3 || (offset + 4) > sec->size) 1377 return FALSE; 1378 1379 val = bfd_get_8 (abfd, contents + offset - 3); 1380 if ((val & 0xfb) != 0x48) 1381 return FALSE; 1382 1383 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d) 1384 return FALSE; 1385 1386 val = bfd_get_8 (abfd, contents + offset - 1); 1387 return (val & 0xc7) == 0x05; 1388 1389 case R_X86_64_TLSDESC_CALL: 1390 /* Check transition from GDesc access model: 1391 call *x@tlsdesc(%rax) 1392 */ 1393 if (offset + 2 <= sec->size) 1394 { 1395 /* Make sure that it's a call *x@tlsdesc(%rax). */ 1396 static const unsigned char call[] = { 0xff, 0x10 }; 1397 return memcmp (contents + offset, call, 2) == 0; 1398 } 1399 1400 return FALSE; 1401 1402 default: 1403 abort (); 1404 } 1405 } 1406 1407 /* Return TRUE if the TLS access transition is OK or no transition 1408 will be performed. Update R_TYPE if there is a transition. */ 1409 1410 static bfd_boolean 1411 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, 1412 asection *sec, bfd_byte *contents, 1413 Elf_Internal_Shdr *symtab_hdr, 1414 struct elf_link_hash_entry **sym_hashes, 1415 unsigned int *r_type, int tls_type, 1416 const Elf_Internal_Rela *rel, 1417 const Elf_Internal_Rela *relend, 1418 struct elf_link_hash_entry *h, 1419 unsigned long r_symndx) 1420 { 1421 unsigned int from_type = *r_type; 1422 unsigned int to_type = from_type; 1423 bfd_boolean check = TRUE; 1424 1425 /* Skip TLS transition for functions. */ 1426 if (h != NULL 1427 && (h->type == STT_FUNC 1428 || h->type == STT_GNU_IFUNC)) 1429 return TRUE; 1430 1431 switch (from_type) 1432 { 1433 case R_X86_64_TLSGD: 1434 case R_X86_64_GOTPC32_TLSDESC: 1435 case R_X86_64_TLSDESC_CALL: 1436 case R_X86_64_GOTTPOFF: 1437 if (info->executable) 1438 { 1439 if (h == NULL) 1440 to_type = R_X86_64_TPOFF32; 1441 else 1442 to_type = R_X86_64_GOTTPOFF; 1443 } 1444 1445 /* When we are called from elf_x86_64_relocate_section, 1446 CONTENTS isn't NULL and there may be additional transitions 1447 based on TLS_TYPE. */ 1448 if (contents != NULL) 1449 { 1450 unsigned int new_to_type = to_type; 1451 1452 if (info->executable 1453 && h != NULL 1454 && h->dynindx == -1 1455 && tls_type == GOT_TLS_IE) 1456 new_to_type = R_X86_64_TPOFF32; 1457 1458 if (to_type == R_X86_64_TLSGD 1459 || to_type == R_X86_64_GOTPC32_TLSDESC 1460 || to_type == R_X86_64_TLSDESC_CALL) 1461 { 1462 if (tls_type == GOT_TLS_IE) 1463 new_to_type = R_X86_64_GOTTPOFF; 1464 } 1465 1466 /* We checked the transition before when we were called from 1467 elf_x86_64_check_relocs. We only want to check the new 1468 transition which hasn't been checked before. */ 1469 check = new_to_type != to_type && from_type == to_type; 1470 to_type = new_to_type; 1471 } 1472 1473 break; 1474 1475 case R_X86_64_TLSLD: 1476 if (info->executable) 1477 to_type = R_X86_64_TPOFF32; 1478 break; 1479 1480 default: 1481 return TRUE; 1482 } 1483 1484 /* Return TRUE if there is no transition. */ 1485 if (from_type == to_type) 1486 return TRUE; 1487 1488 /* Check if the transition can be performed. */ 1489 if (check 1490 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents, 1491 symtab_hdr, sym_hashes, 1492 from_type, rel, relend)) 1493 { 1494 reloc_howto_type *from, *to; 1495 const char *name; 1496 1497 from = elf_x86_64_rtype_to_howto (abfd, from_type); 1498 to = elf_x86_64_rtype_to_howto (abfd, to_type); 1499 1500 if (h) 1501 name = h->root.root.string; 1502 else 1503 { 1504 struct elf_x86_64_link_hash_table *htab; 1505 1506 htab = elf_x86_64_hash_table (info); 1507 if (htab == NULL) 1508 name = "*unknown*"; 1509 else 1510 { 1511 Elf_Internal_Sym *isym; 1512 1513 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 1514 abfd, r_symndx); 1515 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); 1516 } 1517 } 1518 1519 (*_bfd_error_handler) 1520 (_("%B: TLS transition from %s to %s against `%s' at 0x%lx " 1521 "in section `%A' failed"), 1522 abfd, sec, from->name, to->name, name, 1523 (unsigned long) rel->r_offset); 1524 bfd_set_error (bfd_error_bad_value); 1525 return FALSE; 1526 } 1527 1528 *r_type = to_type; 1529 return TRUE; 1530 } 1531 1532 /* Rename some of the generic section flags to better document how they 1533 are used here. */ 1534 #define need_convert_mov_to_lea sec_flg0 1535 1536 /* Look through the relocs for a section during the first phase, and 1537 calculate needed space in the global offset table, procedure 1538 linkage table, and dynamic reloc sections. */ 1539 1540 static bfd_boolean 1541 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, 1542 asection *sec, 1543 const Elf_Internal_Rela *relocs) 1544 { 1545 struct elf_x86_64_link_hash_table *htab; 1546 Elf_Internal_Shdr *symtab_hdr; 1547 struct elf_link_hash_entry **sym_hashes; 1548 const Elf_Internal_Rela *rel; 1549 const Elf_Internal_Rela *rel_end; 1550 asection *sreloc; 1551 bfd_boolean use_plt_got; 1552 1553 if (info->relocatable) 1554 return TRUE; 1555 1556 BFD_ASSERT (is_x86_64_elf (abfd)); 1557 1558 htab = elf_x86_64_hash_table (info); 1559 if (htab == NULL) 1560 return FALSE; 1561 1562 use_plt_got = get_elf_x86_64_backend_data (abfd) == &elf_x86_64_arch_bed; 1563 1564 symtab_hdr = &elf_symtab_hdr (abfd); 1565 sym_hashes = elf_sym_hashes (abfd); 1566 1567 sreloc = NULL; 1568 1569 rel_end = relocs + sec->reloc_count; 1570 for (rel = relocs; rel < rel_end; rel++) 1571 { 1572 unsigned int r_type; 1573 unsigned long r_symndx; 1574 struct elf_link_hash_entry *h; 1575 Elf_Internal_Sym *isym; 1576 const char *name; 1577 bfd_boolean size_reloc; 1578 1579 r_symndx = htab->r_sym (rel->r_info); 1580 r_type = ELF32_R_TYPE (rel->r_info); 1581 1582 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) 1583 { 1584 (*_bfd_error_handler) (_("%B: bad symbol index: %d"), 1585 abfd, r_symndx); 1586 return FALSE; 1587 } 1588 1589 if (r_symndx < symtab_hdr->sh_info) 1590 { 1591 /* A local symbol. */ 1592 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 1593 abfd, r_symndx); 1594 if (isym == NULL) 1595 return FALSE; 1596 1597 /* Check relocation against local STT_GNU_IFUNC symbol. */ 1598 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 1599 { 1600 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, 1601 TRUE); 1602 if (h == NULL) 1603 return FALSE; 1604 1605 /* Fake a STT_GNU_IFUNC symbol. */ 1606 h->type = STT_GNU_IFUNC; 1607 h->def_regular = 1; 1608 h->ref_regular = 1; 1609 h->forced_local = 1; 1610 h->root.type = bfd_link_hash_defined; 1611 } 1612 else 1613 h = NULL; 1614 } 1615 else 1616 { 1617 isym = NULL; 1618 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 1619 while (h->root.type == bfd_link_hash_indirect 1620 || h->root.type == bfd_link_hash_warning) 1621 h = (struct elf_link_hash_entry *) h->root.u.i.link; 1622 } 1623 1624 /* Check invalid x32 relocations. */ 1625 if (!ABI_64_P (abfd)) 1626 switch (r_type) 1627 { 1628 default: 1629 break; 1630 1631 case R_X86_64_DTPOFF64: 1632 case R_X86_64_TPOFF64: 1633 case R_X86_64_PC64: 1634 case R_X86_64_GOTOFF64: 1635 case R_X86_64_GOT64: 1636 case R_X86_64_GOTPCREL64: 1637 case R_X86_64_GOTPC64: 1638 case R_X86_64_GOTPLT64: 1639 case R_X86_64_PLTOFF64: 1640 { 1641 if (h) 1642 name = h->root.root.string; 1643 else 1644 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, 1645 NULL); 1646 (*_bfd_error_handler) 1647 (_("%B: relocation %s against symbol `%s' isn't " 1648 "supported in x32 mode"), abfd, 1649 x86_64_elf_howto_table[r_type].name, name); 1650 bfd_set_error (bfd_error_bad_value); 1651 return FALSE; 1652 } 1653 break; 1654 } 1655 1656 if (h != NULL) 1657 { 1658 /* Create the ifunc sections for static executables. If we 1659 never see an indirect function symbol nor we are building 1660 a static executable, those sections will be empty and 1661 won't appear in output. */ 1662 switch (r_type) 1663 { 1664 default: 1665 break; 1666 1667 case R_X86_64_PC32_BND: 1668 case R_X86_64_PLT32_BND: 1669 case R_X86_64_PC32: 1670 case R_X86_64_PLT32: 1671 case R_X86_64_32: 1672 case R_X86_64_64: 1673 /* MPX PLT is supported only if elf_x86_64_arch_bed 1674 is used in 64-bit mode. */ 1675 if (ABI_64_P (abfd) 1676 && info->bndplt 1677 && (get_elf_x86_64_backend_data (abfd) 1678 == &elf_x86_64_arch_bed)) 1679 { 1680 elf_x86_64_hash_entry (h)->has_bnd_reloc = 1; 1681 1682 /* Create the second PLT for Intel MPX support. */ 1683 if (htab->plt_bnd == NULL) 1684 { 1685 unsigned int plt_bnd_align; 1686 const struct elf_backend_data *bed; 1687 1688 bed = get_elf_backend_data (info->output_bfd); 1689 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt2_entry) == 8 1690 && (sizeof (elf_x86_64_bnd_plt2_entry) 1691 == sizeof (elf_x86_64_legacy_plt2_entry))); 1692 plt_bnd_align = 3; 1693 1694 if (htab->elf.dynobj == NULL) 1695 htab->elf.dynobj = abfd; 1696 htab->plt_bnd 1697 = bfd_make_section_anyway_with_flags (htab->elf.dynobj, 1698 ".plt.bnd", 1699 (bed->dynamic_sec_flags 1700 | SEC_ALLOC 1701 | SEC_CODE 1702 | SEC_LOAD 1703 | SEC_READONLY)); 1704 if (htab->plt_bnd == NULL 1705 || !bfd_set_section_alignment (htab->elf.dynobj, 1706 htab->plt_bnd, 1707 plt_bnd_align)) 1708 return FALSE; 1709 } 1710 } 1711 1712 case R_X86_64_32S: 1713 case R_X86_64_PC64: 1714 case R_X86_64_GOTPCREL: 1715 case R_X86_64_GOTPCREL64: 1716 if (htab->elf.dynobj == NULL) 1717 htab->elf.dynobj = abfd; 1718 if (!_bfd_elf_create_ifunc_sections (htab->elf.dynobj, info)) 1719 return FALSE; 1720 break; 1721 } 1722 1723 /* It is referenced by a non-shared object. */ 1724 h->ref_regular = 1; 1725 h->root.non_ir_ref = 1; 1726 } 1727 1728 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL, 1729 symtab_hdr, sym_hashes, 1730 &r_type, GOT_UNKNOWN, 1731 rel, rel_end, h, r_symndx)) 1732 return FALSE; 1733 1734 switch (r_type) 1735 { 1736 case R_X86_64_TLSLD: 1737 htab->tls_ld_got.refcount += 1; 1738 goto create_got; 1739 1740 case R_X86_64_TPOFF32: 1741 if (!info->executable && ABI_64_P (abfd)) 1742 { 1743 if (h) 1744 name = h->root.root.string; 1745 else 1746 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, 1747 NULL); 1748 (*_bfd_error_handler) 1749 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), 1750 abfd, 1751 x86_64_elf_howto_table[r_type].name, name); 1752 bfd_set_error (bfd_error_bad_value); 1753 return FALSE; 1754 } 1755 break; 1756 1757 case R_X86_64_GOTTPOFF: 1758 if (!info->executable) 1759 info->flags |= DF_STATIC_TLS; 1760 /* Fall through */ 1761 1762 case R_X86_64_GOT32: 1763 case R_X86_64_GOTPCREL: 1764 case R_X86_64_TLSGD: 1765 case R_X86_64_GOT64: 1766 case R_X86_64_GOTPCREL64: 1767 case R_X86_64_GOTPLT64: 1768 case R_X86_64_GOTPC32_TLSDESC: 1769 case R_X86_64_TLSDESC_CALL: 1770 /* This symbol requires a global offset table entry. */ 1771 { 1772 int tls_type, old_tls_type; 1773 1774 switch (r_type) 1775 { 1776 default: tls_type = GOT_NORMAL; break; 1777 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break; 1778 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break; 1779 case R_X86_64_GOTPC32_TLSDESC: 1780 case R_X86_64_TLSDESC_CALL: 1781 tls_type = GOT_TLS_GDESC; break; 1782 } 1783 1784 if (h != NULL) 1785 { 1786 h->got.refcount += 1; 1787 old_tls_type = elf_x86_64_hash_entry (h)->tls_type; 1788 } 1789 else 1790 { 1791 bfd_signed_vma *local_got_refcounts; 1792 1793 /* This is a global offset table entry for a local symbol. */ 1794 local_got_refcounts = elf_local_got_refcounts (abfd); 1795 if (local_got_refcounts == NULL) 1796 { 1797 bfd_size_type size; 1798 1799 size = symtab_hdr->sh_info; 1800 size *= sizeof (bfd_signed_vma) 1801 + sizeof (bfd_vma) + sizeof (char); 1802 local_got_refcounts = ((bfd_signed_vma *) 1803 bfd_zalloc (abfd, size)); 1804 if (local_got_refcounts == NULL) 1805 return FALSE; 1806 elf_local_got_refcounts (abfd) = local_got_refcounts; 1807 elf_x86_64_local_tlsdesc_gotent (abfd) 1808 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info); 1809 elf_x86_64_local_got_tls_type (abfd) 1810 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info); 1811 } 1812 local_got_refcounts[r_symndx] += 1; 1813 old_tls_type 1814 = elf_x86_64_local_got_tls_type (abfd) [r_symndx]; 1815 } 1816 1817 /* If a TLS symbol is accessed using IE at least once, 1818 there is no point to use dynamic model for it. */ 1819 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN 1820 && (! GOT_TLS_GD_ANY_P (old_tls_type) 1821 || tls_type != GOT_TLS_IE)) 1822 { 1823 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type)) 1824 tls_type = old_tls_type; 1825 else if (GOT_TLS_GD_ANY_P (old_tls_type) 1826 && GOT_TLS_GD_ANY_P (tls_type)) 1827 tls_type |= old_tls_type; 1828 else 1829 { 1830 if (h) 1831 name = h->root.root.string; 1832 else 1833 name = bfd_elf_sym_name (abfd, symtab_hdr, 1834 isym, NULL); 1835 (*_bfd_error_handler) 1836 (_("%B: '%s' accessed both as normal and thread local symbol"), 1837 abfd, name); 1838 bfd_set_error (bfd_error_bad_value); 1839 return FALSE; 1840 } 1841 } 1842 1843 if (old_tls_type != tls_type) 1844 { 1845 if (h != NULL) 1846 elf_x86_64_hash_entry (h)->tls_type = tls_type; 1847 else 1848 elf_x86_64_local_got_tls_type (abfd) [r_symndx] = tls_type; 1849 } 1850 } 1851 /* Fall through */ 1852 1853 case R_X86_64_GOTOFF64: 1854 case R_X86_64_GOTPC32: 1855 case R_X86_64_GOTPC64: 1856 create_got: 1857 if (htab->elf.sgot == NULL) 1858 { 1859 if (htab->elf.dynobj == NULL) 1860 htab->elf.dynobj = abfd; 1861 if (!_bfd_elf_create_got_section (htab->elf.dynobj, 1862 info)) 1863 return FALSE; 1864 } 1865 break; 1866 1867 case R_X86_64_PLT32: 1868 case R_X86_64_PLT32_BND: 1869 /* This symbol requires a procedure linkage table entry. We 1870 actually build the entry in adjust_dynamic_symbol, 1871 because this might be a case of linking PIC code which is 1872 never referenced by a dynamic object, in which case we 1873 don't need to generate a procedure linkage table entry 1874 after all. */ 1875 1876 /* If this is a local symbol, we resolve it directly without 1877 creating a procedure linkage table entry. */ 1878 if (h == NULL) 1879 continue; 1880 1881 h->needs_plt = 1; 1882 h->plt.refcount += 1; 1883 break; 1884 1885 case R_X86_64_PLTOFF64: 1886 /* This tries to form the 'address' of a function relative 1887 to GOT. For global symbols we need a PLT entry. */ 1888 if (h != NULL) 1889 { 1890 h->needs_plt = 1; 1891 h->plt.refcount += 1; 1892 } 1893 goto create_got; 1894 1895 case R_X86_64_SIZE32: 1896 case R_X86_64_SIZE64: 1897 size_reloc = TRUE; 1898 goto do_size; 1899 1900 case R_X86_64_32: 1901 if (!ABI_64_P (abfd)) 1902 goto pointer; 1903 case R_X86_64_8: 1904 case R_X86_64_16: 1905 case R_X86_64_32S: 1906 /* Let's help debug shared library creation. These relocs 1907 cannot be used in shared libs. Don't error out for 1908 sections we don't care about, such as debug sections or 1909 non-constant sections. */ 1910 if (info->shared 1911 && (sec->flags & SEC_ALLOC) != 0 1912 && (sec->flags & SEC_READONLY) != 0) 1913 { 1914 if (h) 1915 name = h->root.root.string; 1916 else 1917 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); 1918 (*_bfd_error_handler) 1919 (_("%B: relocation %s against `%s' can not be used when making a shared object; recompile with -fPIC"), 1920 abfd, x86_64_elf_howto_table[r_type].name, name); 1921 bfd_set_error (bfd_error_bad_value); 1922 return FALSE; 1923 } 1924 /* Fall through. */ 1925 1926 case R_X86_64_PC8: 1927 case R_X86_64_PC16: 1928 case R_X86_64_PC32: 1929 case R_X86_64_PC32_BND: 1930 case R_X86_64_PC64: 1931 case R_X86_64_64: 1932 pointer: 1933 if (h != NULL && info->executable) 1934 { 1935 /* If this reloc is in a read-only section, we might 1936 need a copy reloc. We can't check reliably at this 1937 stage whether the section is read-only, as input 1938 sections have not yet been mapped to output sections. 1939 Tentatively set the flag for now, and correct in 1940 adjust_dynamic_symbol. */ 1941 h->non_got_ref = 1; 1942 1943 /* We may need a .plt entry if the function this reloc 1944 refers to is in a shared lib. */ 1945 h->plt.refcount += 1; 1946 if (r_type != R_X86_64_PC32 1947 && r_type != R_X86_64_PC32_BND 1948 && r_type != R_X86_64_PC64) 1949 h->pointer_equality_needed = 1; 1950 } 1951 1952 size_reloc = FALSE; 1953 do_size: 1954 /* If we are creating a shared library, and this is a reloc 1955 against a global symbol, or a non PC relative reloc 1956 against a local symbol, then we need to copy the reloc 1957 into the shared library. However, if we are linking with 1958 -Bsymbolic, we do not need to copy a reloc against a 1959 global symbol which is defined in an object we are 1960 including in the link (i.e., DEF_REGULAR is set). At 1961 this point we have not seen all the input files, so it is 1962 possible that DEF_REGULAR is not set now but will be set 1963 later (it is never cleared). In case of a weak definition, 1964 DEF_REGULAR may be cleared later by a strong definition in 1965 a shared library. We account for that possibility below by 1966 storing information in the relocs_copied field of the hash 1967 table entry. A similar situation occurs when creating 1968 shared libraries and symbol visibility changes render the 1969 symbol local. 1970 1971 If on the other hand, we are creating an executable, we 1972 may need to keep relocations for symbols satisfied by a 1973 dynamic library if we manage to avoid copy relocs for the 1974 symbol. */ 1975 if ((info->shared 1976 && (sec->flags & SEC_ALLOC) != 0 1977 && (! IS_X86_64_PCREL_TYPE (r_type) 1978 || (h != NULL 1979 && (! SYMBOLIC_BIND (info, h) 1980 || h->root.type == bfd_link_hash_defweak 1981 || !h->def_regular)))) 1982 || (ELIMINATE_COPY_RELOCS 1983 && !info->shared 1984 && (sec->flags & SEC_ALLOC) != 0 1985 && h != NULL 1986 && (h->root.type == bfd_link_hash_defweak 1987 || !h->def_regular))) 1988 { 1989 struct elf_dyn_relocs *p; 1990 struct elf_dyn_relocs **head; 1991 1992 /* We must copy these reloc types into the output file. 1993 Create a reloc section in dynobj and make room for 1994 this reloc. */ 1995 if (sreloc == NULL) 1996 { 1997 if (htab->elf.dynobj == NULL) 1998 htab->elf.dynobj = abfd; 1999 2000 sreloc = _bfd_elf_make_dynamic_reloc_section 2001 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2, 2002 abfd, /*rela?*/ TRUE); 2003 2004 if (sreloc == NULL) 2005 return FALSE; 2006 } 2007 2008 /* If this is a global symbol, we count the number of 2009 relocations we need for this symbol. */ 2010 if (h != NULL) 2011 { 2012 head = &((struct elf_x86_64_link_hash_entry *) h)->dyn_relocs; 2013 } 2014 else 2015 { 2016 /* Track dynamic relocs needed for local syms too. 2017 We really need local syms available to do this 2018 easily. Oh well. */ 2019 asection *s; 2020 void **vpp; 2021 2022 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 2023 abfd, r_symndx); 2024 if (isym == NULL) 2025 return FALSE; 2026 2027 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 2028 if (s == NULL) 2029 s = sec; 2030 2031 /* Beware of type punned pointers vs strict aliasing 2032 rules. */ 2033 vpp = &(elf_section_data (s)->local_dynrel); 2034 head = (struct elf_dyn_relocs **)vpp; 2035 } 2036 2037 p = *head; 2038 if (p == NULL || p->sec != sec) 2039 { 2040 bfd_size_type amt = sizeof *p; 2041 2042 p = ((struct elf_dyn_relocs *) 2043 bfd_alloc (htab->elf.dynobj, amt)); 2044 if (p == NULL) 2045 return FALSE; 2046 p->next = *head; 2047 *head = p; 2048 p->sec = sec; 2049 p->count = 0; 2050 p->pc_count = 0; 2051 } 2052 2053 p->count += 1; 2054 /* Count size relocation as PC-relative relocation. */ 2055 if (IS_X86_64_PCREL_TYPE (r_type) || size_reloc) 2056 p->pc_count += 1; 2057 } 2058 break; 2059 2060 /* This relocation describes the C++ object vtable hierarchy. 2061 Reconstruct it for later use during GC. */ 2062 case R_X86_64_GNU_VTINHERIT: 2063 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) 2064 return FALSE; 2065 break; 2066 2067 /* This relocation describes which C++ vtable entries are actually 2068 used. Record for later use during GC. */ 2069 case R_X86_64_GNU_VTENTRY: 2070 BFD_ASSERT (h != NULL); 2071 if (h != NULL 2072 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) 2073 return FALSE; 2074 break; 2075 2076 default: 2077 break; 2078 } 2079 2080 if (use_plt_got 2081 && h != NULL 2082 && h->plt.refcount > 0 2083 && (((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed) 2084 || h->got.refcount > 0) 2085 && htab->plt_got == NULL) 2086 { 2087 /* Create the GOT procedure linkage table. */ 2088 unsigned int plt_got_align; 2089 const struct elf_backend_data *bed; 2090 2091 bed = get_elf_backend_data (info->output_bfd); 2092 BFD_ASSERT (sizeof (elf_x86_64_legacy_plt2_entry) == 8 2093 && (sizeof (elf_x86_64_bnd_plt2_entry) 2094 == sizeof (elf_x86_64_legacy_plt2_entry))); 2095 plt_got_align = 3; 2096 2097 if (htab->elf.dynobj == NULL) 2098 htab->elf.dynobj = abfd; 2099 htab->plt_got 2100 = bfd_make_section_anyway_with_flags (htab->elf.dynobj, 2101 ".plt.got", 2102 (bed->dynamic_sec_flags 2103 | SEC_ALLOC 2104 | SEC_CODE 2105 | SEC_LOAD 2106 | SEC_READONLY)); 2107 if (htab->plt_got == NULL 2108 || !bfd_set_section_alignment (htab->elf.dynobj, 2109 htab->plt_got, 2110 plt_got_align)) 2111 return FALSE; 2112 } 2113 2114 if (r_type == R_X86_64_GOTPCREL 2115 && (h == NULL || h->type != STT_GNU_IFUNC)) 2116 sec->need_convert_mov_to_lea = 1; 2117 } 2118 2119 return TRUE; 2120 } 2121 2122 /* Return the section that should be marked against GC for a given 2123 relocation. */ 2124 2125 static asection * 2126 elf_x86_64_gc_mark_hook (asection *sec, 2127 struct bfd_link_info *info, 2128 Elf_Internal_Rela *rel, 2129 struct elf_link_hash_entry *h, 2130 Elf_Internal_Sym *sym) 2131 { 2132 if (h != NULL) 2133 switch (ELF32_R_TYPE (rel->r_info)) 2134 { 2135 case R_X86_64_GNU_VTINHERIT: 2136 case R_X86_64_GNU_VTENTRY: 2137 return NULL; 2138 } 2139 2140 return _bfd_elf_gc_mark_hook (sec, info, rel, h, sym); 2141 } 2142 2143 /* Update the got entry reference counts for the section being removed. */ 2144 2145 static bfd_boolean 2146 elf_x86_64_gc_sweep_hook (bfd *abfd, struct bfd_link_info *info, 2147 asection *sec, 2148 const Elf_Internal_Rela *relocs) 2149 { 2150 struct elf_x86_64_link_hash_table *htab; 2151 Elf_Internal_Shdr *symtab_hdr; 2152 struct elf_link_hash_entry **sym_hashes; 2153 bfd_signed_vma *local_got_refcounts; 2154 const Elf_Internal_Rela *rel, *relend; 2155 2156 if (info->relocatable) 2157 return TRUE; 2158 2159 htab = elf_x86_64_hash_table (info); 2160 if (htab == NULL) 2161 return FALSE; 2162 2163 elf_section_data (sec)->local_dynrel = NULL; 2164 2165 symtab_hdr = &elf_symtab_hdr (abfd); 2166 sym_hashes = elf_sym_hashes (abfd); 2167 local_got_refcounts = elf_local_got_refcounts (abfd); 2168 2169 htab = elf_x86_64_hash_table (info); 2170 relend = relocs + sec->reloc_count; 2171 for (rel = relocs; rel < relend; rel++) 2172 { 2173 unsigned long r_symndx; 2174 unsigned int r_type; 2175 struct elf_link_hash_entry *h = NULL; 2176 2177 r_symndx = htab->r_sym (rel->r_info); 2178 if (r_symndx >= symtab_hdr->sh_info) 2179 { 2180 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 2181 while (h->root.type == bfd_link_hash_indirect 2182 || h->root.type == bfd_link_hash_warning) 2183 h = (struct elf_link_hash_entry *) h->root.u.i.link; 2184 } 2185 else 2186 { 2187 /* A local symbol. */ 2188 Elf_Internal_Sym *isym; 2189 2190 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 2191 abfd, r_symndx); 2192 2193 /* Check relocation against local STT_GNU_IFUNC symbol. */ 2194 if (isym != NULL 2195 && ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 2196 { 2197 h = elf_x86_64_get_local_sym_hash (htab, abfd, rel, FALSE); 2198 if (h == NULL) 2199 abort (); 2200 } 2201 } 2202 2203 if (h) 2204 { 2205 struct elf_x86_64_link_hash_entry *eh; 2206 struct elf_dyn_relocs **pp; 2207 struct elf_dyn_relocs *p; 2208 2209 eh = (struct elf_x86_64_link_hash_entry *) h; 2210 2211 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; pp = &p->next) 2212 if (p->sec == sec) 2213 { 2214 /* Everything must go for SEC. */ 2215 *pp = p->next; 2216 break; 2217 } 2218 } 2219 2220 r_type = ELF32_R_TYPE (rel->r_info); 2221 if (! elf_x86_64_tls_transition (info, abfd, sec, NULL, 2222 symtab_hdr, sym_hashes, 2223 &r_type, GOT_UNKNOWN, 2224 rel, relend, h, r_symndx)) 2225 return FALSE; 2226 2227 switch (r_type) 2228 { 2229 case R_X86_64_TLSLD: 2230 if (htab->tls_ld_got.refcount > 0) 2231 htab->tls_ld_got.refcount -= 1; 2232 break; 2233 2234 case R_X86_64_TLSGD: 2235 case R_X86_64_GOTPC32_TLSDESC: 2236 case R_X86_64_TLSDESC_CALL: 2237 case R_X86_64_GOTTPOFF: 2238 case R_X86_64_GOT32: 2239 case R_X86_64_GOTPCREL: 2240 case R_X86_64_GOT64: 2241 case R_X86_64_GOTPCREL64: 2242 case R_X86_64_GOTPLT64: 2243 if (h != NULL) 2244 { 2245 if (h->got.refcount > 0) 2246 h->got.refcount -= 1; 2247 if (h->type == STT_GNU_IFUNC) 2248 { 2249 if (h->plt.refcount > 0) 2250 h->plt.refcount -= 1; 2251 } 2252 } 2253 else if (local_got_refcounts != NULL) 2254 { 2255 if (local_got_refcounts[r_symndx] > 0) 2256 local_got_refcounts[r_symndx] -= 1; 2257 } 2258 break; 2259 2260 case R_X86_64_8: 2261 case R_X86_64_16: 2262 case R_X86_64_32: 2263 case R_X86_64_64: 2264 case R_X86_64_32S: 2265 case R_X86_64_PC8: 2266 case R_X86_64_PC16: 2267 case R_X86_64_PC32: 2268 case R_X86_64_PC32_BND: 2269 case R_X86_64_PC64: 2270 case R_X86_64_SIZE32: 2271 case R_X86_64_SIZE64: 2272 if (info->shared 2273 && (h == NULL || h->type != STT_GNU_IFUNC)) 2274 break; 2275 /* Fall thru */ 2276 2277 case R_X86_64_PLT32: 2278 case R_X86_64_PLT32_BND: 2279 case R_X86_64_PLTOFF64: 2280 if (h != NULL) 2281 { 2282 if (h->plt.refcount > 0) 2283 h->plt.refcount -= 1; 2284 } 2285 break; 2286 2287 default: 2288 break; 2289 } 2290 } 2291 2292 return TRUE; 2293 } 2294 2295 /* Adjust a symbol defined by a dynamic object and referenced by a 2296 regular object. The current definition is in some section of the 2297 dynamic object, but we're not including those sections. We have to 2298 change the definition to something the rest of the link can 2299 understand. */ 2300 2301 static bfd_boolean 2302 elf_x86_64_adjust_dynamic_symbol (struct bfd_link_info *info, 2303 struct elf_link_hash_entry *h) 2304 { 2305 struct elf_x86_64_link_hash_table *htab; 2306 asection *s; 2307 struct elf_x86_64_link_hash_entry *eh; 2308 struct elf_dyn_relocs *p; 2309 2310 /* STT_GNU_IFUNC symbol must go through PLT. */ 2311 if (h->type == STT_GNU_IFUNC) 2312 { 2313 /* All local STT_GNU_IFUNC references must be treate as local 2314 calls via local PLT. */ 2315 if (h->ref_regular 2316 && SYMBOL_CALLS_LOCAL (info, h)) 2317 { 2318 bfd_size_type pc_count = 0, count = 0; 2319 struct elf_dyn_relocs **pp; 2320 2321 eh = (struct elf_x86_64_link_hash_entry *) h; 2322 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 2323 { 2324 pc_count += p->pc_count; 2325 p->count -= p->pc_count; 2326 p->pc_count = 0; 2327 count += p->count; 2328 if (p->count == 0) 2329 *pp = p->next; 2330 else 2331 pp = &p->next; 2332 } 2333 2334 if (pc_count || count) 2335 { 2336 h->needs_plt = 1; 2337 h->non_got_ref = 1; 2338 if (h->plt.refcount <= 0) 2339 h->plt.refcount = 1; 2340 else 2341 h->plt.refcount += 1; 2342 } 2343 } 2344 2345 if (h->plt.refcount <= 0) 2346 { 2347 h->plt.offset = (bfd_vma) -1; 2348 h->needs_plt = 0; 2349 } 2350 return TRUE; 2351 } 2352 2353 /* If this is a function, put it in the procedure linkage table. We 2354 will fill in the contents of the procedure linkage table later, 2355 when we know the address of the .got section. */ 2356 if (h->type == STT_FUNC 2357 || h->needs_plt) 2358 { 2359 if (h->plt.refcount <= 0 2360 || SYMBOL_CALLS_LOCAL (info, h) 2361 || (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT 2362 && h->root.type == bfd_link_hash_undefweak)) 2363 { 2364 /* This case can occur if we saw a PLT32 reloc in an input 2365 file, but the symbol was never referred to by a dynamic 2366 object, or if all references were garbage collected. In 2367 such a case, we don't actually need to build a procedure 2368 linkage table, and we can just do a PC32 reloc instead. */ 2369 h->plt.offset = (bfd_vma) -1; 2370 h->needs_plt = 0; 2371 } 2372 2373 return TRUE; 2374 } 2375 else 2376 /* It's possible that we incorrectly decided a .plt reloc was 2377 needed for an R_X86_64_PC32 reloc to a non-function sym in 2378 check_relocs. We can't decide accurately between function and 2379 non-function syms in check-relocs; Objects loaded later in 2380 the link may change h->type. So fix it now. */ 2381 h->plt.offset = (bfd_vma) -1; 2382 2383 /* If this is a weak symbol, and there is a real definition, the 2384 processor independent code will have arranged for us to see the 2385 real definition first, and we can just use the same value. */ 2386 if (h->u.weakdef != NULL) 2387 { 2388 BFD_ASSERT (h->u.weakdef->root.type == bfd_link_hash_defined 2389 || h->u.weakdef->root.type == bfd_link_hash_defweak); 2390 h->root.u.def.section = h->u.weakdef->root.u.def.section; 2391 h->root.u.def.value = h->u.weakdef->root.u.def.value; 2392 if (ELIMINATE_COPY_RELOCS || info->nocopyreloc) 2393 { 2394 eh = (struct elf_x86_64_link_hash_entry *) h; 2395 h->non_got_ref = h->u.weakdef->non_got_ref; 2396 eh->needs_copy = h->u.weakdef->needs_copy; 2397 } 2398 return TRUE; 2399 } 2400 2401 /* This is a reference to a symbol defined by a dynamic object which 2402 is not a function. */ 2403 2404 /* If we are creating a shared library, we must presume that the 2405 only references to the symbol are via the global offset table. 2406 For such cases we need not do anything here; the relocations will 2407 be handled correctly by relocate_section. */ 2408 if (!info->executable) 2409 return TRUE; 2410 2411 /* If there are no references to this symbol that do not use the 2412 GOT, we don't need to generate a copy reloc. */ 2413 if (!h->non_got_ref) 2414 return TRUE; 2415 2416 /* If -z nocopyreloc was given, we won't generate them either. */ 2417 if (info->nocopyreloc) 2418 { 2419 h->non_got_ref = 0; 2420 return TRUE; 2421 } 2422 2423 if (ELIMINATE_COPY_RELOCS) 2424 { 2425 eh = (struct elf_x86_64_link_hash_entry *) h; 2426 for (p = eh->dyn_relocs; p != NULL; p = p->next) 2427 { 2428 s = p->sec->output_section; 2429 if (s != NULL && (s->flags & SEC_READONLY) != 0) 2430 break; 2431 } 2432 2433 /* If we didn't find any dynamic relocs in read-only sections, then 2434 we'll be keeping the dynamic relocs and avoiding the copy reloc. */ 2435 if (p == NULL) 2436 { 2437 h->non_got_ref = 0; 2438 return TRUE; 2439 } 2440 } 2441 2442 /* We must allocate the symbol in our .dynbss section, which will 2443 become part of the .bss section of the executable. There will be 2444 an entry for this symbol in the .dynsym section. The dynamic 2445 object will contain position independent code, so all references 2446 from the dynamic object to this symbol will go through the global 2447 offset table. The dynamic linker will use the .dynsym entry to 2448 determine the address it must put in the global offset table, so 2449 both the dynamic object and the regular object will refer to the 2450 same memory location for the variable. */ 2451 2452 htab = elf_x86_64_hash_table (info); 2453 if (htab == NULL) 2454 return FALSE; 2455 2456 /* We must generate a R_X86_64_COPY reloc to tell the dynamic linker 2457 to copy the initial value out of the dynamic object and into the 2458 runtime process image. */ 2459 if ((h->root.u.def.section->flags & SEC_ALLOC) != 0 && h->size != 0) 2460 { 2461 const struct elf_backend_data *bed; 2462 bed = get_elf_backend_data (info->output_bfd); 2463 htab->srelbss->size += bed->s->sizeof_rela; 2464 h->needs_copy = 1; 2465 } 2466 2467 s = htab->sdynbss; 2468 2469 return _bfd_elf_adjust_dynamic_copy (info, h, s); 2470 } 2471 2472 /* Allocate space in .plt, .got and associated reloc sections for 2473 dynamic relocs. */ 2474 2475 static bfd_boolean 2476 elf_x86_64_allocate_dynrelocs (struct elf_link_hash_entry *h, void * inf) 2477 { 2478 struct bfd_link_info *info; 2479 struct elf_x86_64_link_hash_table *htab; 2480 struct elf_x86_64_link_hash_entry *eh; 2481 struct elf_dyn_relocs *p; 2482 const struct elf_backend_data *bed; 2483 unsigned int plt_entry_size; 2484 2485 if (h->root.type == bfd_link_hash_indirect) 2486 return TRUE; 2487 2488 eh = (struct elf_x86_64_link_hash_entry *) h; 2489 2490 info = (struct bfd_link_info *) inf; 2491 htab = elf_x86_64_hash_table (info); 2492 if (htab == NULL) 2493 return FALSE; 2494 bed = get_elf_backend_data (info->output_bfd); 2495 plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); 2496 2497 /* We can't use the GOT PLT if pointer equality is needed since 2498 finish_dynamic_symbol won't clear symbol value and the dynamic 2499 linker won't update the GOT slot. We will get into an infinite 2500 loop at run-time. */ 2501 if (htab->plt_got != NULL 2502 && h->type != STT_GNU_IFUNC 2503 && !h->pointer_equality_needed 2504 && h->plt.refcount > 0 2505 && h->got.refcount > 0) 2506 { 2507 /* Don't use the regular PLT if there are both GOT and GOTPLT 2508 reloctions. */ 2509 h->plt.offset = (bfd_vma) -1; 2510 2511 /* Use the GOT PLT. */ 2512 eh->plt_got.refcount = 1; 2513 } 2514 2515 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle it 2516 here if it is defined and referenced in a non-shared object. */ 2517 if (h->type == STT_GNU_IFUNC 2518 && h->def_regular) 2519 { 2520 if (_bfd_elf_allocate_ifunc_dyn_relocs (info, h, 2521 &eh->dyn_relocs, 2522 plt_entry_size, 2523 plt_entry_size, 2524 GOT_ENTRY_SIZE)) 2525 { 2526 asection *s = htab->plt_bnd; 2527 if (h->plt.offset != (bfd_vma) -1 && s != NULL) 2528 { 2529 /* Use the .plt.bnd section if it is created. */ 2530 eh->plt_bnd.offset = s->size; 2531 2532 /* Make room for this entry in the .plt.bnd section. */ 2533 s->size += sizeof (elf_x86_64_legacy_plt2_entry); 2534 } 2535 2536 return TRUE; 2537 } 2538 else 2539 return FALSE; 2540 } 2541 else if (htab->elf.dynamic_sections_created 2542 && (h->plt.refcount > 0 || eh->plt_got.refcount > 0)) 2543 { 2544 bfd_boolean use_plt_got; 2545 2546 if ((info->flags & DF_BIND_NOW) && !h->pointer_equality_needed) 2547 { 2548 /* Don't use the regular PLT for DF_BIND_NOW. */ 2549 h->plt.offset = (bfd_vma) -1; 2550 2551 /* Use the GOT PLT. */ 2552 h->got.refcount = 1; 2553 eh->plt_got.refcount = 1; 2554 } 2555 2556 use_plt_got = eh->plt_got.refcount > 0; 2557 2558 /* Make sure this symbol is output as a dynamic symbol. 2559 Undefined weak syms won't yet be marked as dynamic. */ 2560 if (h->dynindx == -1 2561 && !h->forced_local) 2562 { 2563 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 2564 return FALSE; 2565 } 2566 2567 if (info->shared 2568 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (1, 0, h)) 2569 { 2570 asection *s = htab->elf.splt; 2571 asection *bnd_s = htab->plt_bnd; 2572 asection *got_s = htab->plt_got; 2573 2574 /* If this is the first .plt entry, make room for the special 2575 first entry. The .plt section is used by prelink to undo 2576 prelinking for dynamic relocations. */ 2577 if (s->size == 0) 2578 s->size = plt_entry_size; 2579 2580 if (use_plt_got) 2581 eh->plt_got.offset = got_s->size; 2582 else 2583 { 2584 h->plt.offset = s->size; 2585 if (bnd_s) 2586 eh->plt_bnd.offset = bnd_s->size; 2587 } 2588 2589 /* If this symbol is not defined in a regular file, and we are 2590 not generating a shared library, then set the symbol to this 2591 location in the .plt. This is required to make function 2592 pointers compare as equal between the normal executable and 2593 the shared library. */ 2594 if (! info->shared 2595 && !h->def_regular) 2596 { 2597 if (use_plt_got) 2598 { 2599 /* We need to make a call to the entry of the GOT PLT 2600 instead of regular PLT entry. */ 2601 h->root.u.def.section = got_s; 2602 h->root.u.def.value = eh->plt_got.offset; 2603 } 2604 else 2605 { 2606 if (bnd_s) 2607 { 2608 /* We need to make a call to the entry of the second 2609 PLT instead of regular PLT entry. */ 2610 h->root.u.def.section = bnd_s; 2611 h->root.u.def.value = eh->plt_bnd.offset; 2612 } 2613 else 2614 { 2615 h->root.u.def.section = s; 2616 h->root.u.def.value = h->plt.offset; 2617 } 2618 } 2619 } 2620 2621 /* Make room for this entry. */ 2622 if (use_plt_got) 2623 got_s->size += sizeof (elf_x86_64_legacy_plt2_entry); 2624 else 2625 { 2626 s->size += plt_entry_size; 2627 if (bnd_s) 2628 bnd_s->size += sizeof (elf_x86_64_legacy_plt2_entry); 2629 2630 /* We also need to make an entry in the .got.plt section, 2631 which will be placed in the .got section by the linker 2632 script. */ 2633 htab->elf.sgotplt->size += GOT_ENTRY_SIZE; 2634 2635 /* We also need to make an entry in the .rela.plt 2636 section. */ 2637 htab->elf.srelplt->size += bed->s->sizeof_rela; 2638 htab->elf.srelplt->reloc_count++; 2639 } 2640 } 2641 else 2642 { 2643 h->plt.offset = (bfd_vma) -1; 2644 h->needs_plt = 0; 2645 } 2646 } 2647 else 2648 { 2649 h->plt.offset = (bfd_vma) -1; 2650 h->needs_plt = 0; 2651 } 2652 2653 eh->tlsdesc_got = (bfd_vma) -1; 2654 2655 /* If R_X86_64_GOTTPOFF symbol is now local to the binary, 2656 make it a R_X86_64_TPOFF32 requiring no GOT entry. */ 2657 if (h->got.refcount > 0 2658 && info->executable 2659 && h->dynindx == -1 2660 && elf_x86_64_hash_entry (h)->tls_type == GOT_TLS_IE) 2661 { 2662 h->got.offset = (bfd_vma) -1; 2663 } 2664 else if (h->got.refcount > 0) 2665 { 2666 asection *s; 2667 bfd_boolean dyn; 2668 int tls_type = elf_x86_64_hash_entry (h)->tls_type; 2669 2670 /* Make sure this symbol is output as a dynamic symbol. 2671 Undefined weak syms won't yet be marked as dynamic. */ 2672 if (h->dynindx == -1 2673 && !h->forced_local) 2674 { 2675 if (! bfd_elf_link_record_dynamic_symbol (info, h)) 2676 return FALSE; 2677 } 2678 2679 if (GOT_TLS_GDESC_P (tls_type)) 2680 { 2681 eh->tlsdesc_got = htab->elf.sgotplt->size 2682 - elf_x86_64_compute_jump_table_size (htab); 2683 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE; 2684 h->got.offset = (bfd_vma) -2; 2685 } 2686 if (! GOT_TLS_GDESC_P (tls_type) 2687 || GOT_TLS_GD_P (tls_type)) 2688 { 2689 s = htab->elf.sgot; 2690 h->got.offset = s->size; 2691 s->size += GOT_ENTRY_SIZE; 2692 if (GOT_TLS_GD_P (tls_type)) 2693 s->size += GOT_ENTRY_SIZE; 2694 } 2695 dyn = htab->elf.dynamic_sections_created; 2696 /* R_X86_64_TLSGD needs one dynamic relocation if local symbol 2697 and two if global. 2698 R_X86_64_GOTTPOFF needs one dynamic relocation. */ 2699 if ((GOT_TLS_GD_P (tls_type) && h->dynindx == -1) 2700 || tls_type == GOT_TLS_IE) 2701 htab->elf.srelgot->size += bed->s->sizeof_rela; 2702 else if (GOT_TLS_GD_P (tls_type)) 2703 htab->elf.srelgot->size += 2 * bed->s->sizeof_rela; 2704 else if (! GOT_TLS_GDESC_P (tls_type) 2705 && (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 2706 || h->root.type != bfd_link_hash_undefweak) 2707 && (info->shared 2708 || WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, 0, h))) 2709 htab->elf.srelgot->size += bed->s->sizeof_rela; 2710 if (GOT_TLS_GDESC_P (tls_type)) 2711 { 2712 htab->elf.srelplt->size += bed->s->sizeof_rela; 2713 htab->tlsdesc_plt = (bfd_vma) -1; 2714 } 2715 } 2716 else 2717 h->got.offset = (bfd_vma) -1; 2718 2719 if (eh->dyn_relocs == NULL) 2720 return TRUE; 2721 2722 /* In the shared -Bsymbolic case, discard space allocated for 2723 dynamic pc-relative relocs against symbols which turn out to be 2724 defined in regular objects. For the normal shared case, discard 2725 space for pc-relative relocs that have become local due to symbol 2726 visibility changes. */ 2727 2728 if (info->shared) 2729 { 2730 /* Relocs that use pc_count are those that appear on a call 2731 insn, or certain REL relocs that can generated via assembly. 2732 We want calls to protected symbols to resolve directly to the 2733 function rather than going via the plt. If people want 2734 function pointer comparisons to work as expected then they 2735 should avoid writing weird assembly. */ 2736 if (SYMBOL_CALLS_LOCAL (info, h)) 2737 { 2738 struct elf_dyn_relocs **pp; 2739 2740 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 2741 { 2742 p->count -= p->pc_count; 2743 p->pc_count = 0; 2744 if (p->count == 0) 2745 *pp = p->next; 2746 else 2747 pp = &p->next; 2748 } 2749 } 2750 2751 /* Also discard relocs on undefined weak syms with non-default 2752 visibility. */ 2753 if (eh->dyn_relocs != NULL) 2754 { 2755 if (h->root.type == bfd_link_hash_undefweak) 2756 { 2757 if (ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 2758 eh->dyn_relocs = NULL; 2759 2760 /* Make sure undefined weak symbols are output as a dynamic 2761 symbol in PIEs. */ 2762 else if (h->dynindx == -1 2763 && ! h->forced_local 2764 && ! bfd_elf_link_record_dynamic_symbol (info, h)) 2765 return FALSE; 2766 } 2767 /* For PIE, discard space for pc-relative relocs against 2768 symbols which turn out to need copy relocs. */ 2769 else if (info->executable 2770 && (h->needs_copy || eh->needs_copy) 2771 && h->def_dynamic 2772 && !h->def_regular) 2773 { 2774 struct elf_dyn_relocs **pp; 2775 2776 for (pp = &eh->dyn_relocs; (p = *pp) != NULL; ) 2777 { 2778 if (p->pc_count != 0) 2779 *pp = p->next; 2780 else 2781 pp = &p->next; 2782 } 2783 } 2784 } 2785 } 2786 else if (ELIMINATE_COPY_RELOCS) 2787 { 2788 /* For the non-shared case, discard space for relocs against 2789 symbols which turn out to need copy relocs or are not 2790 dynamic. */ 2791 2792 if (!h->non_got_ref 2793 && ((h->def_dynamic 2794 && !h->def_regular) 2795 || (htab->elf.dynamic_sections_created 2796 && (h->root.type == bfd_link_hash_undefweak 2797 || h->root.type == bfd_link_hash_undefined)))) 2798 { 2799 /* Make sure this symbol is output as a dynamic symbol. 2800 Undefined weak syms won't yet be marked as dynamic. */ 2801 if (h->dynindx == -1 2802 && ! h->forced_local 2803 && ! bfd_elf_link_record_dynamic_symbol (info, h)) 2804 return FALSE; 2805 2806 /* If that succeeded, we know we'll be keeping all the 2807 relocs. */ 2808 if (h->dynindx != -1) 2809 goto keep; 2810 } 2811 2812 eh->dyn_relocs = NULL; 2813 2814 keep: ; 2815 } 2816 2817 /* Finally, allocate space. */ 2818 for (p = eh->dyn_relocs; p != NULL; p = p->next) 2819 { 2820 asection * sreloc; 2821 2822 sreloc = elf_section_data (p->sec)->sreloc; 2823 2824 BFD_ASSERT (sreloc != NULL); 2825 2826 sreloc->size += p->count * bed->s->sizeof_rela; 2827 } 2828 2829 return TRUE; 2830 } 2831 2832 /* Allocate space in .plt, .got and associated reloc sections for 2833 local dynamic relocs. */ 2834 2835 static bfd_boolean 2836 elf_x86_64_allocate_local_dynrelocs (void **slot, void *inf) 2837 { 2838 struct elf_link_hash_entry *h 2839 = (struct elf_link_hash_entry *) *slot; 2840 2841 if (h->type != STT_GNU_IFUNC 2842 || !h->def_regular 2843 || !h->ref_regular 2844 || !h->forced_local 2845 || h->root.type != bfd_link_hash_defined) 2846 abort (); 2847 2848 return elf_x86_64_allocate_dynrelocs (h, inf); 2849 } 2850 2851 /* Find any dynamic relocs that apply to read-only sections. */ 2852 2853 static bfd_boolean 2854 elf_x86_64_readonly_dynrelocs (struct elf_link_hash_entry *h, 2855 void * inf) 2856 { 2857 struct elf_x86_64_link_hash_entry *eh; 2858 struct elf_dyn_relocs *p; 2859 2860 /* Skip local IFUNC symbols. */ 2861 if (h->forced_local && h->type == STT_GNU_IFUNC) 2862 return TRUE; 2863 2864 eh = (struct elf_x86_64_link_hash_entry *) h; 2865 for (p = eh->dyn_relocs; p != NULL; p = p->next) 2866 { 2867 asection *s = p->sec->output_section; 2868 2869 if (s != NULL && (s->flags & SEC_READONLY) != 0) 2870 { 2871 struct bfd_link_info *info = (struct bfd_link_info *) inf; 2872 2873 info->flags |= DF_TEXTREL; 2874 2875 if ((info->warn_shared_textrel && info->shared) 2876 || info->error_textrel) 2877 info->callbacks->einfo (_("%P: %B: warning: relocation against `%s' in readonly section `%A'\n"), 2878 p->sec->owner, h->root.root.string, 2879 p->sec); 2880 2881 /* Not an error, just cut short the traversal. */ 2882 return FALSE; 2883 } 2884 } 2885 return TRUE; 2886 } 2887 2888 /* Convert 2889 mov foo@GOTPCREL(%rip), %reg 2890 to 2891 lea foo(%rip), %reg 2892 with the local symbol, foo. */ 2893 2894 static bfd_boolean 2895 elf_x86_64_convert_mov_to_lea (bfd *abfd, asection *sec, 2896 struct bfd_link_info *link_info) 2897 { 2898 Elf_Internal_Shdr *symtab_hdr; 2899 Elf_Internal_Rela *internal_relocs; 2900 Elf_Internal_Rela *irel, *irelend; 2901 bfd_byte *contents; 2902 struct elf_x86_64_link_hash_table *htab; 2903 bfd_boolean changed_contents; 2904 bfd_boolean changed_relocs; 2905 bfd_signed_vma *local_got_refcounts; 2906 bfd_vma maxpagesize; 2907 2908 /* Don't even try to convert non-ELF outputs. */ 2909 if (!is_elf_hash_table (link_info->hash)) 2910 return FALSE; 2911 2912 /* Nothing to do if there is no need or no output. */ 2913 if ((sec->flags & (SEC_CODE | SEC_RELOC)) != (SEC_CODE | SEC_RELOC) 2914 || sec->need_convert_mov_to_lea == 0 2915 || bfd_is_abs_section (sec->output_section)) 2916 return TRUE; 2917 2918 symtab_hdr = &elf_tdata (abfd)->symtab_hdr; 2919 2920 /* Load the relocations for this section. */ 2921 internal_relocs = (_bfd_elf_link_read_relocs 2922 (abfd, sec, NULL, (Elf_Internal_Rela *) NULL, 2923 link_info->keep_memory)); 2924 if (internal_relocs == NULL) 2925 return FALSE; 2926 2927 htab = elf_x86_64_hash_table (link_info); 2928 changed_contents = FALSE; 2929 changed_relocs = FALSE; 2930 local_got_refcounts = elf_local_got_refcounts (abfd); 2931 maxpagesize = get_elf_backend_data (abfd)->maxpagesize; 2932 2933 /* Get the section contents. */ 2934 if (elf_section_data (sec)->this_hdr.contents != NULL) 2935 contents = elf_section_data (sec)->this_hdr.contents; 2936 else 2937 { 2938 if (!bfd_malloc_and_get_section (abfd, sec, &contents)) 2939 goto error_return; 2940 } 2941 2942 irelend = internal_relocs + sec->reloc_count; 2943 for (irel = internal_relocs; irel < irelend; irel++) 2944 { 2945 unsigned int r_type = ELF32_R_TYPE (irel->r_info); 2946 unsigned int r_symndx = htab->r_sym (irel->r_info); 2947 unsigned int indx; 2948 struct elf_link_hash_entry *h; 2949 asection *tsec; 2950 char symtype; 2951 bfd_vma toff, roff; 2952 enum { 2953 none, local, global 2954 } convert_mov_to_lea; 2955 unsigned int opcode; 2956 2957 if (r_type != R_X86_64_GOTPCREL) 2958 continue; 2959 2960 roff = irel->r_offset; 2961 2962 if (roff < 2) 2963 continue; 2964 2965 opcode = bfd_get_8 (abfd, contents + roff - 2); 2966 2967 /* PR ld/18591: Don't convert R_X86_64_GOTPCREL relocation if it 2968 isn't for mov instruction. */ 2969 if (opcode != 0x8b) 2970 continue; 2971 2972 tsec = NULL; 2973 convert_mov_to_lea = none; 2974 2975 /* Get the symbol referred to by the reloc. */ 2976 if (r_symndx < symtab_hdr->sh_info) 2977 { 2978 Elf_Internal_Sym *isym; 2979 2980 /* Silence older GCC warning. */ 2981 h = NULL; 2982 2983 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 2984 abfd, r_symndx); 2985 2986 symtype = ELF_ST_TYPE (isym->st_info); 2987 2988 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation and 2989 skip relocation against undefined symbols. */ 2990 if (symtype != STT_GNU_IFUNC && isym->st_shndx != SHN_UNDEF) 2991 { 2992 if (isym->st_shndx == SHN_ABS) 2993 tsec = bfd_abs_section_ptr; 2994 else if (isym->st_shndx == SHN_COMMON) 2995 tsec = bfd_com_section_ptr; 2996 else if (isym->st_shndx == SHN_X86_64_LCOMMON) 2997 tsec = &_bfd_elf_large_com_section; 2998 else 2999 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); 3000 3001 toff = isym->st_value; 3002 convert_mov_to_lea = local; 3003 } 3004 } 3005 else 3006 { 3007 indx = r_symndx - symtab_hdr->sh_info; 3008 h = elf_sym_hashes (abfd)[indx]; 3009 BFD_ASSERT (h != NULL); 3010 3011 while (h->root.type == bfd_link_hash_indirect 3012 || h->root.type == bfd_link_hash_warning) 3013 h = (struct elf_link_hash_entry *) h->root.u.i.link; 3014 3015 /* STT_GNU_IFUNC must keep R_X86_64_GOTPCREL relocation. We also 3016 avoid optimizing _DYNAMIC since ld.so may use its link-time 3017 address. */ 3018 if (h->def_regular 3019 && h->type != STT_GNU_IFUNC 3020 && h != htab->elf.hdynamic 3021 && SYMBOL_REFERENCES_LOCAL (link_info, h)) 3022 { 3023 tsec = h->root.u.def.section; 3024 toff = h->root.u.def.value; 3025 symtype = h->type; 3026 convert_mov_to_lea = global; 3027 } 3028 } 3029 3030 if (convert_mov_to_lea == none) 3031 continue; 3032 3033 if (tsec->sec_info_type == SEC_INFO_TYPE_MERGE) 3034 { 3035 /* At this stage in linking, no SEC_MERGE symbol has been 3036 adjusted, so all references to such symbols need to be 3037 passed through _bfd_merged_section_offset. (Later, in 3038 relocate_section, all SEC_MERGE symbols *except* for 3039 section symbols have been adjusted.) 3040 3041 gas may reduce relocations against symbols in SEC_MERGE 3042 sections to a relocation against the section symbol when 3043 the original addend was zero. When the reloc is against 3044 a section symbol we should include the addend in the 3045 offset passed to _bfd_merged_section_offset, since the 3046 location of interest is the original symbol. On the 3047 other hand, an access to "sym+addend" where "sym" is not 3048 a section symbol should not include the addend; Such an 3049 access is presumed to be an offset from "sym"; The 3050 location of interest is just "sym". */ 3051 if (symtype == STT_SECTION) 3052 toff += irel->r_addend; 3053 3054 toff = _bfd_merged_section_offset (abfd, &tsec, 3055 elf_section_data (tsec)->sec_info, 3056 toff); 3057 3058 if (symtype != STT_SECTION) 3059 toff += irel->r_addend; 3060 } 3061 else 3062 toff += irel->r_addend; 3063 3064 /* Don't convert if R_X86_64_PC32 relocation overflows. */ 3065 if (tsec->output_section == sec->output_section) 3066 { 3067 if ((toff - roff + 0x80000000) > 0xffffffff) 3068 continue; 3069 } 3070 else 3071 { 3072 asection *asect; 3073 bfd_size_type size; 3074 3075 /* At this point, we don't know the load addresses of TSEC 3076 section nor SEC section. We estimate the distrance between 3077 SEC and TSEC. */ 3078 size = 0; 3079 for (asect = sec->output_section; 3080 asect != NULL && asect != tsec->output_section; 3081 asect = asect->next) 3082 { 3083 asection *i; 3084 for (i = asect->output_section->map_head.s; 3085 i != NULL; 3086 i = i->map_head.s) 3087 { 3088 size = align_power (size, i->alignment_power); 3089 size += i->size; 3090 } 3091 } 3092 3093 /* Don't convert R_X86_64_GOTPCREL if TSEC isn't placed after 3094 SEC. */ 3095 if (asect == NULL) 3096 continue; 3097 3098 /* Take PT_GNU_RELRO segment into account by adding 3099 maxpagesize. */ 3100 if ((toff + size + maxpagesize - roff + 0x80000000) 3101 > 0xffffffff) 3102 continue; 3103 } 3104 3105 bfd_put_8 (abfd, 0x8d, contents + roff - 2); 3106 irel->r_info = htab->r_info (r_symndx, R_X86_64_PC32); 3107 changed_contents = TRUE; 3108 changed_relocs = TRUE; 3109 3110 if (convert_mov_to_lea == local) 3111 { 3112 if (local_got_refcounts != NULL 3113 && local_got_refcounts[r_symndx] > 0) 3114 local_got_refcounts[r_symndx] -= 1; 3115 } 3116 else 3117 { 3118 if (h->got.refcount > 0) 3119 h->got.refcount -= 1; 3120 } 3121 } 3122 3123 if (contents != NULL 3124 && elf_section_data (sec)->this_hdr.contents != contents) 3125 { 3126 if (!changed_contents && !link_info->keep_memory) 3127 free (contents); 3128 else 3129 { 3130 /* Cache the section contents for elf_link_input_bfd. */ 3131 elf_section_data (sec)->this_hdr.contents = contents; 3132 } 3133 } 3134 3135 if (elf_section_data (sec)->relocs != internal_relocs) 3136 { 3137 if (!changed_relocs) 3138 free (internal_relocs); 3139 else 3140 elf_section_data (sec)->relocs = internal_relocs; 3141 } 3142 3143 return TRUE; 3144 3145 error_return: 3146 if (contents != NULL 3147 && elf_section_data (sec)->this_hdr.contents != contents) 3148 free (contents); 3149 if (internal_relocs != NULL 3150 && elf_section_data (sec)->relocs != internal_relocs) 3151 free (internal_relocs); 3152 return FALSE; 3153 } 3154 3155 /* Set the sizes of the dynamic sections. */ 3156 3157 static bfd_boolean 3158 elf_x86_64_size_dynamic_sections (bfd *output_bfd, 3159 struct bfd_link_info *info) 3160 { 3161 struct elf_x86_64_link_hash_table *htab; 3162 bfd *dynobj; 3163 asection *s; 3164 bfd_boolean relocs; 3165 bfd *ibfd; 3166 const struct elf_backend_data *bed; 3167 3168 htab = elf_x86_64_hash_table (info); 3169 if (htab == NULL) 3170 return FALSE; 3171 bed = get_elf_backend_data (output_bfd); 3172 3173 dynobj = htab->elf.dynobj; 3174 if (dynobj == NULL) 3175 abort (); 3176 3177 if (htab->elf.dynamic_sections_created) 3178 { 3179 /* Set the contents of the .interp section to the interpreter. */ 3180 if (info->executable) 3181 { 3182 s = bfd_get_linker_section (dynobj, ".interp"); 3183 if (s == NULL) 3184 abort (); 3185 s->size = htab->dynamic_interpreter_size; 3186 s->contents = (unsigned char *) htab->dynamic_interpreter; 3187 } 3188 } 3189 3190 /* Set up .got offsets for local syms, and space for local dynamic 3191 relocs. */ 3192 for (ibfd = info->input_bfds; ibfd != NULL; ibfd = ibfd->link.next) 3193 { 3194 bfd_signed_vma *local_got; 3195 bfd_signed_vma *end_local_got; 3196 char *local_tls_type; 3197 bfd_vma *local_tlsdesc_gotent; 3198 bfd_size_type locsymcount; 3199 Elf_Internal_Shdr *symtab_hdr; 3200 asection *srel; 3201 3202 if (! is_x86_64_elf (ibfd)) 3203 continue; 3204 3205 for (s = ibfd->sections; s != NULL; s = s->next) 3206 { 3207 struct elf_dyn_relocs *p; 3208 3209 if (!elf_x86_64_convert_mov_to_lea (ibfd, s, info)) 3210 return FALSE; 3211 3212 for (p = (struct elf_dyn_relocs *) 3213 (elf_section_data (s)->local_dynrel); 3214 p != NULL; 3215 p = p->next) 3216 { 3217 if (!bfd_is_abs_section (p->sec) 3218 && bfd_is_abs_section (p->sec->output_section)) 3219 { 3220 /* Input section has been discarded, either because 3221 it is a copy of a linkonce section or due to 3222 linker script /DISCARD/, so we'll be discarding 3223 the relocs too. */ 3224 } 3225 else if (p->count != 0) 3226 { 3227 srel = elf_section_data (p->sec)->sreloc; 3228 srel->size += p->count * bed->s->sizeof_rela; 3229 if ((p->sec->output_section->flags & SEC_READONLY) != 0 3230 && (info->flags & DF_TEXTREL) == 0) 3231 { 3232 info->flags |= DF_TEXTREL; 3233 if ((info->warn_shared_textrel && info->shared) 3234 || info->error_textrel) 3235 info->callbacks->einfo (_("%P: %B: warning: relocation in readonly section `%A'\n"), 3236 p->sec->owner, p->sec); 3237 } 3238 } 3239 } 3240 } 3241 3242 local_got = elf_local_got_refcounts (ibfd); 3243 if (!local_got) 3244 continue; 3245 3246 symtab_hdr = &elf_symtab_hdr (ibfd); 3247 locsymcount = symtab_hdr->sh_info; 3248 end_local_got = local_got + locsymcount; 3249 local_tls_type = elf_x86_64_local_got_tls_type (ibfd); 3250 local_tlsdesc_gotent = elf_x86_64_local_tlsdesc_gotent (ibfd); 3251 s = htab->elf.sgot; 3252 srel = htab->elf.srelgot; 3253 for (; local_got < end_local_got; 3254 ++local_got, ++local_tls_type, ++local_tlsdesc_gotent) 3255 { 3256 *local_tlsdesc_gotent = (bfd_vma) -1; 3257 if (*local_got > 0) 3258 { 3259 if (GOT_TLS_GDESC_P (*local_tls_type)) 3260 { 3261 *local_tlsdesc_gotent = htab->elf.sgotplt->size 3262 - elf_x86_64_compute_jump_table_size (htab); 3263 htab->elf.sgotplt->size += 2 * GOT_ENTRY_SIZE; 3264 *local_got = (bfd_vma) -2; 3265 } 3266 if (! GOT_TLS_GDESC_P (*local_tls_type) 3267 || GOT_TLS_GD_P (*local_tls_type)) 3268 { 3269 *local_got = s->size; 3270 s->size += GOT_ENTRY_SIZE; 3271 if (GOT_TLS_GD_P (*local_tls_type)) 3272 s->size += GOT_ENTRY_SIZE; 3273 } 3274 if (info->shared 3275 || GOT_TLS_GD_ANY_P (*local_tls_type) 3276 || *local_tls_type == GOT_TLS_IE) 3277 { 3278 if (GOT_TLS_GDESC_P (*local_tls_type)) 3279 { 3280 htab->elf.srelplt->size 3281 += bed->s->sizeof_rela; 3282 htab->tlsdesc_plt = (bfd_vma) -1; 3283 } 3284 if (! GOT_TLS_GDESC_P (*local_tls_type) 3285 || GOT_TLS_GD_P (*local_tls_type)) 3286 srel->size += bed->s->sizeof_rela; 3287 } 3288 } 3289 else 3290 *local_got = (bfd_vma) -1; 3291 } 3292 } 3293 3294 if (htab->tls_ld_got.refcount > 0) 3295 { 3296 /* Allocate 2 got entries and 1 dynamic reloc for R_X86_64_TLSLD 3297 relocs. */ 3298 htab->tls_ld_got.offset = htab->elf.sgot->size; 3299 htab->elf.sgot->size += 2 * GOT_ENTRY_SIZE; 3300 htab->elf.srelgot->size += bed->s->sizeof_rela; 3301 } 3302 else 3303 htab->tls_ld_got.offset = -1; 3304 3305 /* Allocate global sym .plt and .got entries, and space for global 3306 sym dynamic relocs. */ 3307 elf_link_hash_traverse (&htab->elf, elf_x86_64_allocate_dynrelocs, 3308 info); 3309 3310 /* Allocate .plt and .got entries, and space for local symbols. */ 3311 htab_traverse (htab->loc_hash_table, 3312 elf_x86_64_allocate_local_dynrelocs, 3313 info); 3314 3315 /* For every jump slot reserved in the sgotplt, reloc_count is 3316 incremented. However, when we reserve space for TLS descriptors, 3317 it's not incremented, so in order to compute the space reserved 3318 for them, it suffices to multiply the reloc count by the jump 3319 slot size. 3320 3321 PR ld/13302: We start next_irelative_index at the end of .rela.plt 3322 so that R_X86_64_IRELATIVE entries come last. */ 3323 if (htab->elf.srelplt) 3324 { 3325 htab->sgotplt_jump_table_size 3326 = elf_x86_64_compute_jump_table_size (htab); 3327 htab->next_irelative_index = htab->elf.srelplt->reloc_count - 1; 3328 } 3329 else if (htab->elf.irelplt) 3330 htab->next_irelative_index = htab->elf.irelplt->reloc_count - 1; 3331 3332 if (htab->tlsdesc_plt) 3333 { 3334 /* If we're not using lazy TLS relocations, don't generate the 3335 PLT and GOT entries they require. */ 3336 if ((info->flags & DF_BIND_NOW)) 3337 htab->tlsdesc_plt = 0; 3338 else 3339 { 3340 htab->tlsdesc_got = htab->elf.sgot->size; 3341 htab->elf.sgot->size += GOT_ENTRY_SIZE; 3342 /* Reserve room for the initial entry. 3343 FIXME: we could probably do away with it in this case. */ 3344 if (htab->elf.splt->size == 0) 3345 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd); 3346 htab->tlsdesc_plt = htab->elf.splt->size; 3347 htab->elf.splt->size += GET_PLT_ENTRY_SIZE (output_bfd); 3348 } 3349 } 3350 3351 if (htab->elf.sgotplt) 3352 { 3353 /* Don't allocate .got.plt section if there are no GOT nor PLT 3354 entries and there is no refeence to _GLOBAL_OFFSET_TABLE_. */ 3355 if ((htab->elf.hgot == NULL 3356 || !htab->elf.hgot->ref_regular_nonweak) 3357 && (htab->elf.sgotplt->size 3358 == get_elf_backend_data (output_bfd)->got_header_size) 3359 && (htab->elf.splt == NULL 3360 || htab->elf.splt->size == 0) 3361 && (htab->elf.sgot == NULL 3362 || htab->elf.sgot->size == 0) 3363 && (htab->elf.iplt == NULL 3364 || htab->elf.iplt->size == 0) 3365 && (htab->elf.igotplt == NULL 3366 || htab->elf.igotplt->size == 0)) 3367 htab->elf.sgotplt->size = 0; 3368 } 3369 3370 if (htab->plt_eh_frame != NULL 3371 && htab->elf.splt != NULL 3372 && htab->elf.splt->size != 0 3373 && !bfd_is_abs_section (htab->elf.splt->output_section) 3374 && _bfd_elf_eh_frame_present (info)) 3375 { 3376 const struct elf_x86_64_backend_data *arch_data 3377 = get_elf_x86_64_arch_data (bed); 3378 htab->plt_eh_frame->size = arch_data->eh_frame_plt_size; 3379 } 3380 3381 /* We now have determined the sizes of the various dynamic sections. 3382 Allocate memory for them. */ 3383 relocs = FALSE; 3384 for (s = dynobj->sections; s != NULL; s = s->next) 3385 { 3386 if ((s->flags & SEC_LINKER_CREATED) == 0) 3387 continue; 3388 3389 if (s == htab->elf.splt 3390 || s == htab->elf.sgot 3391 || s == htab->elf.sgotplt 3392 || s == htab->elf.iplt 3393 || s == htab->elf.igotplt 3394 || s == htab->plt_bnd 3395 || s == htab->plt_got 3396 || s == htab->plt_eh_frame 3397 || s == htab->sdynbss) 3398 { 3399 /* Strip this section if we don't need it; see the 3400 comment below. */ 3401 } 3402 else if (CONST_STRNEQ (bfd_get_section_name (dynobj, s), ".rela")) 3403 { 3404 if (s->size != 0 && s != htab->elf.srelplt) 3405 relocs = TRUE; 3406 3407 /* We use the reloc_count field as a counter if we need 3408 to copy relocs into the output file. */ 3409 if (s != htab->elf.srelplt) 3410 s->reloc_count = 0; 3411 } 3412 else 3413 { 3414 /* It's not one of our sections, so don't allocate space. */ 3415 continue; 3416 } 3417 3418 if (s->size == 0) 3419 { 3420 /* If we don't need this section, strip it from the 3421 output file. This is mostly to handle .rela.bss and 3422 .rela.plt. We must create both sections in 3423 create_dynamic_sections, because they must be created 3424 before the linker maps input sections to output 3425 sections. The linker does that before 3426 adjust_dynamic_symbol is called, and it is that 3427 function which decides whether anything needs to go 3428 into these sections. */ 3429 3430 s->flags |= SEC_EXCLUDE; 3431 continue; 3432 } 3433 3434 if ((s->flags & SEC_HAS_CONTENTS) == 0) 3435 continue; 3436 3437 /* Allocate memory for the section contents. We use bfd_zalloc 3438 here in case unused entries are not reclaimed before the 3439 section's contents are written out. This should not happen, 3440 but this way if it does, we get a R_X86_64_NONE reloc instead 3441 of garbage. */ 3442 s->contents = (bfd_byte *) bfd_zalloc (dynobj, s->size); 3443 if (s->contents == NULL) 3444 return FALSE; 3445 } 3446 3447 if (htab->plt_eh_frame != NULL 3448 && htab->plt_eh_frame->contents != NULL) 3449 { 3450 const struct elf_x86_64_backend_data *arch_data 3451 = get_elf_x86_64_arch_data (bed); 3452 3453 memcpy (htab->plt_eh_frame->contents, 3454 arch_data->eh_frame_plt, htab->plt_eh_frame->size); 3455 bfd_put_32 (dynobj, htab->elf.splt->size, 3456 htab->plt_eh_frame->contents + PLT_FDE_LEN_OFFSET); 3457 } 3458 3459 if (htab->elf.dynamic_sections_created) 3460 { 3461 /* Add some entries to the .dynamic section. We fill in the 3462 values later, in elf_x86_64_finish_dynamic_sections, but we 3463 must add the entries now so that we get the correct size for 3464 the .dynamic section. The DT_DEBUG entry is filled in by the 3465 dynamic linker and used by the debugger. */ 3466 #define add_dynamic_entry(TAG, VAL) \ 3467 _bfd_elf_add_dynamic_entry (info, TAG, VAL) 3468 3469 if (info->executable) 3470 { 3471 if (!add_dynamic_entry (DT_DEBUG, 0)) 3472 return FALSE; 3473 } 3474 3475 if (htab->elf.splt->size != 0) 3476 { 3477 /* DT_PLTGOT is used by prelink even if there is no PLT 3478 relocation. */ 3479 if (!add_dynamic_entry (DT_PLTGOT, 0)) 3480 return FALSE; 3481 3482 if (htab->elf.srelplt->size != 0) 3483 { 3484 if (!add_dynamic_entry (DT_PLTRELSZ, 0) 3485 || !add_dynamic_entry (DT_PLTREL, DT_RELA) 3486 || !add_dynamic_entry (DT_JMPREL, 0)) 3487 return FALSE; 3488 } 3489 3490 if (htab->tlsdesc_plt 3491 && (!add_dynamic_entry (DT_TLSDESC_PLT, 0) 3492 || !add_dynamic_entry (DT_TLSDESC_GOT, 0))) 3493 return FALSE; 3494 } 3495 3496 if (relocs) 3497 { 3498 if (!add_dynamic_entry (DT_RELA, 0) 3499 || !add_dynamic_entry (DT_RELASZ, 0) 3500 || !add_dynamic_entry (DT_RELAENT, bed->s->sizeof_rela)) 3501 return FALSE; 3502 3503 /* If any dynamic relocs apply to a read-only section, 3504 then we need a DT_TEXTREL entry. */ 3505 if ((info->flags & DF_TEXTREL) == 0) 3506 elf_link_hash_traverse (&htab->elf, 3507 elf_x86_64_readonly_dynrelocs, 3508 info); 3509 3510 if ((info->flags & DF_TEXTREL) != 0) 3511 { 3512 if (!add_dynamic_entry (DT_TEXTREL, 0)) 3513 return FALSE; 3514 } 3515 } 3516 } 3517 #undef add_dynamic_entry 3518 3519 return TRUE; 3520 } 3521 3522 static bfd_boolean 3523 elf_x86_64_always_size_sections (bfd *output_bfd, 3524 struct bfd_link_info *info) 3525 { 3526 asection *tls_sec = elf_hash_table (info)->tls_sec; 3527 3528 if (tls_sec) 3529 { 3530 struct elf_link_hash_entry *tlsbase; 3531 3532 tlsbase = elf_link_hash_lookup (elf_hash_table (info), 3533 "_TLS_MODULE_BASE_", 3534 FALSE, FALSE, FALSE); 3535 3536 if (tlsbase && tlsbase->type == STT_TLS) 3537 { 3538 struct elf_x86_64_link_hash_table *htab; 3539 struct bfd_link_hash_entry *bh = NULL; 3540 const struct elf_backend_data *bed 3541 = get_elf_backend_data (output_bfd); 3542 3543 htab = elf_x86_64_hash_table (info); 3544 if (htab == NULL) 3545 return FALSE; 3546 3547 if (!(_bfd_generic_link_add_one_symbol 3548 (info, output_bfd, "_TLS_MODULE_BASE_", BSF_LOCAL, 3549 tls_sec, 0, NULL, FALSE, 3550 bed->collect, &bh))) 3551 return FALSE; 3552 3553 htab->tls_module_base = bh; 3554 3555 tlsbase = (struct elf_link_hash_entry *)bh; 3556 tlsbase->def_regular = 1; 3557 tlsbase->other = STV_HIDDEN; 3558 tlsbase->root.linker_def = 1; 3559 (*bed->elf_backend_hide_symbol) (info, tlsbase, TRUE); 3560 } 3561 } 3562 3563 return TRUE; 3564 } 3565 3566 /* _TLS_MODULE_BASE_ needs to be treated especially when linking 3567 executables. Rather than setting it to the beginning of the TLS 3568 section, we have to set it to the end. This function may be called 3569 multiple times, it is idempotent. */ 3570 3571 static void 3572 elf_x86_64_set_tls_module_base (struct bfd_link_info *info) 3573 { 3574 struct elf_x86_64_link_hash_table *htab; 3575 struct bfd_link_hash_entry *base; 3576 3577 if (!info->executable) 3578 return; 3579 3580 htab = elf_x86_64_hash_table (info); 3581 if (htab == NULL) 3582 return; 3583 3584 base = htab->tls_module_base; 3585 if (base == NULL) 3586 return; 3587 3588 base->u.def.value = htab->elf.tls_size; 3589 } 3590 3591 /* Return the base VMA address which should be subtracted from real addresses 3592 when resolving @dtpoff relocation. 3593 This is PT_TLS segment p_vaddr. */ 3594 3595 static bfd_vma 3596 elf_x86_64_dtpoff_base (struct bfd_link_info *info) 3597 { 3598 /* If tls_sec is NULL, we should have signalled an error already. */ 3599 if (elf_hash_table (info)->tls_sec == NULL) 3600 return 0; 3601 return elf_hash_table (info)->tls_sec->vma; 3602 } 3603 3604 /* Return the relocation value for @tpoff relocation 3605 if STT_TLS virtual address is ADDRESS. */ 3606 3607 static bfd_vma 3608 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address) 3609 { 3610 struct elf_link_hash_table *htab = elf_hash_table (info); 3611 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd); 3612 bfd_vma static_tls_size; 3613 3614 /* If tls_segment is NULL, we should have signalled an error already. */ 3615 if (htab->tls_sec == NULL) 3616 return 0; 3617 3618 /* Consider special static TLS alignment requirements. */ 3619 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment); 3620 return address - static_tls_size - htab->tls_sec->vma; 3621 } 3622 3623 /* Is the instruction before OFFSET in CONTENTS a 32bit relative 3624 branch? */ 3625 3626 static bfd_boolean 3627 is_32bit_relative_branch (bfd_byte *contents, bfd_vma offset) 3628 { 3629 /* Opcode Instruction 3630 0xe8 call 3631 0xe9 jump 3632 0x0f 0x8x conditional jump */ 3633 return ((offset > 0 3634 && (contents [offset - 1] == 0xe8 3635 || contents [offset - 1] == 0xe9)) 3636 || (offset > 1 3637 && contents [offset - 2] == 0x0f 3638 && (contents [offset - 1] & 0xf0) == 0x80)); 3639 } 3640 3641 /* Relocate an x86_64 ELF section. */ 3642 3643 static bfd_boolean 3644 elf_x86_64_relocate_section (bfd *output_bfd, 3645 struct bfd_link_info *info, 3646 bfd *input_bfd, 3647 asection *input_section, 3648 bfd_byte *contents, 3649 Elf_Internal_Rela *relocs, 3650 Elf_Internal_Sym *local_syms, 3651 asection **local_sections) 3652 { 3653 struct elf_x86_64_link_hash_table *htab; 3654 Elf_Internal_Shdr *symtab_hdr; 3655 struct elf_link_hash_entry **sym_hashes; 3656 bfd_vma *local_got_offsets; 3657 bfd_vma *local_tlsdesc_gotents; 3658 Elf_Internal_Rela *rel; 3659 Elf_Internal_Rela *relend; 3660 const unsigned int plt_entry_size = GET_PLT_ENTRY_SIZE (info->output_bfd); 3661 3662 BFD_ASSERT (is_x86_64_elf (input_bfd)); 3663 3664 htab = elf_x86_64_hash_table (info); 3665 if (htab == NULL) 3666 return FALSE; 3667 symtab_hdr = &elf_symtab_hdr (input_bfd); 3668 sym_hashes = elf_sym_hashes (input_bfd); 3669 local_got_offsets = elf_local_got_offsets (input_bfd); 3670 local_tlsdesc_gotents = elf_x86_64_local_tlsdesc_gotent (input_bfd); 3671 3672 elf_x86_64_set_tls_module_base (info); 3673 3674 rel = relocs; 3675 relend = relocs + input_section->reloc_count; 3676 for (; rel < relend; rel++) 3677 { 3678 unsigned int r_type; 3679 reloc_howto_type *howto; 3680 unsigned long r_symndx; 3681 struct elf_link_hash_entry *h; 3682 struct elf_x86_64_link_hash_entry *eh; 3683 Elf_Internal_Sym *sym; 3684 asection *sec; 3685 bfd_vma off, offplt, plt_offset; 3686 bfd_vma relocation; 3687 bfd_boolean unresolved_reloc; 3688 bfd_reloc_status_type r; 3689 int tls_type; 3690 asection *base_got, *resolved_plt; 3691 bfd_vma st_size; 3692 3693 r_type = ELF32_R_TYPE (rel->r_info); 3694 if (r_type == (int) R_X86_64_GNU_VTINHERIT 3695 || r_type == (int) R_X86_64_GNU_VTENTRY) 3696 continue; 3697 3698 if (r_type >= (int) R_X86_64_standard) 3699 { 3700 (*_bfd_error_handler) 3701 (_("%B: unrecognized relocation (0x%x) in section `%A'"), 3702 input_bfd, input_section, r_type); 3703 bfd_set_error (bfd_error_bad_value); 3704 return FALSE; 3705 } 3706 3707 if (r_type != (int) R_X86_64_32 3708 || ABI_64_P (output_bfd)) 3709 howto = x86_64_elf_howto_table + r_type; 3710 else 3711 howto = (x86_64_elf_howto_table 3712 + ARRAY_SIZE (x86_64_elf_howto_table) - 1); 3713 r_symndx = htab->r_sym (rel->r_info); 3714 h = NULL; 3715 sym = NULL; 3716 sec = NULL; 3717 unresolved_reloc = FALSE; 3718 if (r_symndx < symtab_hdr->sh_info) 3719 { 3720 sym = local_syms + r_symndx; 3721 sec = local_sections[r_symndx]; 3722 3723 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, 3724 &sec, rel); 3725 st_size = sym->st_size; 3726 3727 /* Relocate against local STT_GNU_IFUNC symbol. */ 3728 if (!info->relocatable 3729 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) 3730 { 3731 h = elf_x86_64_get_local_sym_hash (htab, input_bfd, 3732 rel, FALSE); 3733 if (h == NULL) 3734 abort (); 3735 3736 /* Set STT_GNU_IFUNC symbol value. */ 3737 h->root.u.def.value = sym->st_value; 3738 h->root.u.def.section = sec; 3739 } 3740 } 3741 else 3742 { 3743 bfd_boolean warned ATTRIBUTE_UNUSED; 3744 bfd_boolean ignored ATTRIBUTE_UNUSED; 3745 3746 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 3747 r_symndx, symtab_hdr, sym_hashes, 3748 h, sec, relocation, 3749 unresolved_reloc, warned, ignored); 3750 st_size = h->size; 3751 } 3752 3753 if (sec != NULL && discarded_section (sec)) 3754 RELOC_AGAINST_DISCARDED_SECTION (info, input_bfd, input_section, 3755 rel, 1, relend, howto, 0, contents); 3756 3757 if (info->relocatable) 3758 continue; 3759 3760 if (rel->r_addend == 0 && !ABI_64_P (output_bfd)) 3761 { 3762 if (r_type == R_X86_64_64) 3763 { 3764 /* For x32, treat R_X86_64_64 like R_X86_64_32 and 3765 zero-extend it to 64bit if addend is zero. */ 3766 r_type = R_X86_64_32; 3767 memset (contents + rel->r_offset + 4, 0, 4); 3768 } 3769 else if (r_type == R_X86_64_SIZE64) 3770 { 3771 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and 3772 zero-extend it to 64bit if addend is zero. */ 3773 r_type = R_X86_64_SIZE32; 3774 memset (contents + rel->r_offset + 4, 0, 4); 3775 } 3776 } 3777 3778 eh = (struct elf_x86_64_link_hash_entry *) h; 3779 3780 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle 3781 it here if it is defined in a non-shared object. */ 3782 if (h != NULL 3783 && h->type == STT_GNU_IFUNC 3784 && h->def_regular) 3785 { 3786 bfd_vma plt_index; 3787 const char *name; 3788 3789 if ((input_section->flags & SEC_ALLOC) == 0 3790 || h->plt.offset == (bfd_vma) -1) 3791 abort (); 3792 3793 /* STT_GNU_IFUNC symbol must go through PLT. */ 3794 if (htab->elf.splt != NULL) 3795 { 3796 if (htab->plt_bnd != NULL) 3797 { 3798 resolved_plt = htab->plt_bnd; 3799 plt_offset = eh->plt_bnd.offset; 3800 } 3801 else 3802 { 3803 resolved_plt = htab->elf.splt; 3804 plt_offset = h->plt.offset; 3805 } 3806 } 3807 else 3808 { 3809 resolved_plt = htab->elf.iplt; 3810 plt_offset = h->plt.offset; 3811 } 3812 3813 relocation = (resolved_plt->output_section->vma 3814 + resolved_plt->output_offset + plt_offset); 3815 3816 switch (r_type) 3817 { 3818 default: 3819 if (h->root.root.string) 3820 name = h->root.root.string; 3821 else 3822 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, 3823 NULL); 3824 (*_bfd_error_handler) 3825 (_("%B: relocation %s against STT_GNU_IFUNC " 3826 "symbol `%s' isn't handled by %s"), input_bfd, 3827 x86_64_elf_howto_table[r_type].name, 3828 name, __FUNCTION__); 3829 bfd_set_error (bfd_error_bad_value); 3830 return FALSE; 3831 3832 case R_X86_64_32S: 3833 if (info->shared) 3834 abort (); 3835 goto do_relocation; 3836 3837 case R_X86_64_32: 3838 if (ABI_64_P (output_bfd)) 3839 goto do_relocation; 3840 /* FALLTHROUGH */ 3841 case R_X86_64_64: 3842 if (rel->r_addend != 0) 3843 { 3844 if (h->root.root.string) 3845 name = h->root.root.string; 3846 else 3847 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 3848 sym, NULL); 3849 (*_bfd_error_handler) 3850 (_("%B: relocation %s against STT_GNU_IFUNC " 3851 "symbol `%s' has non-zero addend: %d"), 3852 input_bfd, x86_64_elf_howto_table[r_type].name, 3853 name, rel->r_addend); 3854 bfd_set_error (bfd_error_bad_value); 3855 return FALSE; 3856 } 3857 3858 /* Generate dynamic relcoation only when there is a 3859 non-GOT reference in a shared object. */ 3860 if (info->shared && h->non_got_ref) 3861 { 3862 Elf_Internal_Rela outrel; 3863 asection *sreloc; 3864 3865 /* Need a dynamic relocation to get the real function 3866 address. */ 3867 outrel.r_offset = _bfd_elf_section_offset (output_bfd, 3868 info, 3869 input_section, 3870 rel->r_offset); 3871 if (outrel.r_offset == (bfd_vma) -1 3872 || outrel.r_offset == (bfd_vma) -2) 3873 abort (); 3874 3875 outrel.r_offset += (input_section->output_section->vma 3876 + input_section->output_offset); 3877 3878 if (h->dynindx == -1 3879 || h->forced_local 3880 || info->executable) 3881 { 3882 /* This symbol is resolved locally. */ 3883 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE); 3884 outrel.r_addend = (h->root.u.def.value 3885 + h->root.u.def.section->output_section->vma 3886 + h->root.u.def.section->output_offset); 3887 } 3888 else 3889 { 3890 outrel.r_info = htab->r_info (h->dynindx, r_type); 3891 outrel.r_addend = 0; 3892 } 3893 3894 sreloc = htab->elf.irelifunc; 3895 elf_append_rela (output_bfd, sreloc, &outrel); 3896 3897 /* If this reloc is against an external symbol, we 3898 do not want to fiddle with the addend. Otherwise, 3899 we need to include the symbol value so that it 3900 becomes an addend for the dynamic reloc. For an 3901 internal symbol, we have updated addend. */ 3902 continue; 3903 } 3904 /* FALLTHROUGH */ 3905 case R_X86_64_PC32: 3906 case R_X86_64_PC32_BND: 3907 case R_X86_64_PC64: 3908 case R_X86_64_PLT32: 3909 case R_X86_64_PLT32_BND: 3910 goto do_relocation; 3911 3912 case R_X86_64_GOTPCREL: 3913 case R_X86_64_GOTPCREL64: 3914 base_got = htab->elf.sgot; 3915 off = h->got.offset; 3916 3917 if (base_got == NULL) 3918 abort (); 3919 3920 if (off == (bfd_vma) -1) 3921 { 3922 /* We can't use h->got.offset here to save state, or 3923 even just remember the offset, as finish_dynamic_symbol 3924 would use that as offset into .got. */ 3925 3926 if (htab->elf.splt != NULL) 3927 { 3928 plt_index = h->plt.offset / plt_entry_size - 1; 3929 off = (plt_index + 3) * GOT_ENTRY_SIZE; 3930 base_got = htab->elf.sgotplt; 3931 } 3932 else 3933 { 3934 plt_index = h->plt.offset / plt_entry_size; 3935 off = plt_index * GOT_ENTRY_SIZE; 3936 base_got = htab->elf.igotplt; 3937 } 3938 3939 if (h->dynindx == -1 3940 || h->forced_local 3941 || info->symbolic) 3942 { 3943 /* This references the local defitionion. We must 3944 initialize this entry in the global offset table. 3945 Since the offset must always be a multiple of 8, 3946 we use the least significant bit to record 3947 whether we have initialized it already. 3948 3949 When doing a dynamic link, we create a .rela.got 3950 relocation entry to initialize the value. This 3951 is done in the finish_dynamic_symbol routine. */ 3952 if ((off & 1) != 0) 3953 off &= ~1; 3954 else 3955 { 3956 bfd_put_64 (output_bfd, relocation, 3957 base_got->contents + off); 3958 /* Note that this is harmless for the GOTPLT64 3959 case, as -1 | 1 still is -1. */ 3960 h->got.offset |= 1; 3961 } 3962 } 3963 } 3964 3965 relocation = (base_got->output_section->vma 3966 + base_got->output_offset + off); 3967 3968 goto do_relocation; 3969 } 3970 } 3971 3972 /* When generating a shared object, the relocations handled here are 3973 copied into the output file to be resolved at run time. */ 3974 switch (r_type) 3975 { 3976 case R_X86_64_GOT32: 3977 case R_X86_64_GOT64: 3978 /* Relocation is to the entry for this symbol in the global 3979 offset table. */ 3980 case R_X86_64_GOTPCREL: 3981 case R_X86_64_GOTPCREL64: 3982 /* Use global offset table entry as symbol value. */ 3983 case R_X86_64_GOTPLT64: 3984 /* This is obsolete and treated the the same as GOT64. */ 3985 base_got = htab->elf.sgot; 3986 3987 if (htab->elf.sgot == NULL) 3988 abort (); 3989 3990 if (h != NULL) 3991 { 3992 bfd_boolean dyn; 3993 3994 off = h->got.offset; 3995 if (h->needs_plt 3996 && h->plt.offset != (bfd_vma)-1 3997 && off == (bfd_vma)-1) 3998 { 3999 /* We can't use h->got.offset here to save 4000 state, or even just remember the offset, as 4001 finish_dynamic_symbol would use that as offset into 4002 .got. */ 4003 bfd_vma plt_index = h->plt.offset / plt_entry_size - 1; 4004 off = (plt_index + 3) * GOT_ENTRY_SIZE; 4005 base_got = htab->elf.sgotplt; 4006 } 4007 4008 dyn = htab->elf.dynamic_sections_created; 4009 4010 if (! WILL_CALL_FINISH_DYNAMIC_SYMBOL (dyn, info->shared, h) 4011 || (info->shared 4012 && SYMBOL_REFERENCES_LOCAL (info, h)) 4013 || (ELF_ST_VISIBILITY (h->other) 4014 && h->root.type == bfd_link_hash_undefweak)) 4015 { 4016 /* This is actually a static link, or it is a -Bsymbolic 4017 link and the symbol is defined locally, or the symbol 4018 was forced to be local because of a version file. We 4019 must initialize this entry in the global offset table. 4020 Since the offset must always be a multiple of 8, we 4021 use the least significant bit to record whether we 4022 have initialized it already. 4023 4024 When doing a dynamic link, we create a .rela.got 4025 relocation entry to initialize the value. This is 4026 done in the finish_dynamic_symbol routine. */ 4027 if ((off & 1) != 0) 4028 off &= ~1; 4029 else 4030 { 4031 bfd_put_64 (output_bfd, relocation, 4032 base_got->contents + off); 4033 /* Note that this is harmless for the GOTPLT64 case, 4034 as -1 | 1 still is -1. */ 4035 h->got.offset |= 1; 4036 } 4037 } 4038 else 4039 unresolved_reloc = FALSE; 4040 } 4041 else 4042 { 4043 if (local_got_offsets == NULL) 4044 abort (); 4045 4046 off = local_got_offsets[r_symndx]; 4047 4048 /* The offset must always be a multiple of 8. We use 4049 the least significant bit to record whether we have 4050 already generated the necessary reloc. */ 4051 if ((off & 1) != 0) 4052 off &= ~1; 4053 else 4054 { 4055 bfd_put_64 (output_bfd, relocation, 4056 base_got->contents + off); 4057 4058 if (info->shared) 4059 { 4060 asection *s; 4061 Elf_Internal_Rela outrel; 4062 4063 /* We need to generate a R_X86_64_RELATIVE reloc 4064 for the dynamic linker. */ 4065 s = htab->elf.srelgot; 4066 if (s == NULL) 4067 abort (); 4068 4069 outrel.r_offset = (base_got->output_section->vma 4070 + base_got->output_offset 4071 + off); 4072 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); 4073 outrel.r_addend = relocation; 4074 elf_append_rela (output_bfd, s, &outrel); 4075 } 4076 4077 local_got_offsets[r_symndx] |= 1; 4078 } 4079 } 4080 4081 if (off >= (bfd_vma) -2) 4082 abort (); 4083 4084 relocation = base_got->output_section->vma 4085 + base_got->output_offset + off; 4086 if (r_type != R_X86_64_GOTPCREL && r_type != R_X86_64_GOTPCREL64) 4087 relocation -= htab->elf.sgotplt->output_section->vma 4088 - htab->elf.sgotplt->output_offset; 4089 4090 break; 4091 4092 case R_X86_64_GOTOFF64: 4093 /* Relocation is relative to the start of the global offset 4094 table. */ 4095 4096 /* Check to make sure it isn't a protected function or data 4097 symbol for shared library since it may not be local when 4098 used as function address or with copy relocation. We also 4099 need to make sure that a symbol is referenced locally. */ 4100 if (info->shared && h) 4101 { 4102 if (!h->def_regular) 4103 { 4104 const char *v; 4105 4106 switch (ELF_ST_VISIBILITY (h->other)) 4107 { 4108 case STV_HIDDEN: 4109 v = _("hidden symbol"); 4110 break; 4111 case STV_INTERNAL: 4112 v = _("internal symbol"); 4113 break; 4114 case STV_PROTECTED: 4115 v = _("protected symbol"); 4116 break; 4117 default: 4118 v = _("symbol"); 4119 break; 4120 } 4121 4122 (*_bfd_error_handler) 4123 (_("%B: relocation R_X86_64_GOTOFF64 against undefined %s `%s' can not be used when making a shared object"), 4124 input_bfd, v, h->root.root.string); 4125 bfd_set_error (bfd_error_bad_value); 4126 return FALSE; 4127 } 4128 else if (!info->executable 4129 && !SYMBOL_REFERENCES_LOCAL (info, h) 4130 && (h->type == STT_FUNC 4131 || h->type == STT_OBJECT) 4132 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) 4133 { 4134 (*_bfd_error_handler) 4135 (_("%B: relocation R_X86_64_GOTOFF64 against protected %s `%s' can not be used when making a shared object"), 4136 input_bfd, 4137 h->type == STT_FUNC ? "function" : "data", 4138 h->root.root.string); 4139 bfd_set_error (bfd_error_bad_value); 4140 return FALSE; 4141 } 4142 } 4143 4144 /* Note that sgot is not involved in this 4145 calculation. We always want the start of .got.plt. If we 4146 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is 4147 permitted by the ABI, we might have to change this 4148 calculation. */ 4149 relocation -= htab->elf.sgotplt->output_section->vma 4150 + htab->elf.sgotplt->output_offset; 4151 break; 4152 4153 case R_X86_64_GOTPC32: 4154 case R_X86_64_GOTPC64: 4155 /* Use global offset table as symbol value. */ 4156 relocation = htab->elf.sgotplt->output_section->vma 4157 + htab->elf.sgotplt->output_offset; 4158 unresolved_reloc = FALSE; 4159 break; 4160 4161 case R_X86_64_PLTOFF64: 4162 /* Relocation is PLT entry relative to GOT. For local 4163 symbols it's the symbol itself relative to GOT. */ 4164 if (h != NULL 4165 /* See PLT32 handling. */ 4166 && h->plt.offset != (bfd_vma) -1 4167 && htab->elf.splt != NULL) 4168 { 4169 if (htab->plt_bnd != NULL) 4170 { 4171 resolved_plt = htab->plt_bnd; 4172 plt_offset = eh->plt_bnd.offset; 4173 } 4174 else 4175 { 4176 resolved_plt = htab->elf.splt; 4177 plt_offset = h->plt.offset; 4178 } 4179 4180 relocation = (resolved_plt->output_section->vma 4181 + resolved_plt->output_offset 4182 + plt_offset); 4183 unresolved_reloc = FALSE; 4184 } 4185 4186 relocation -= htab->elf.sgotplt->output_section->vma 4187 + htab->elf.sgotplt->output_offset; 4188 break; 4189 4190 case R_X86_64_PLT32: 4191 case R_X86_64_PLT32_BND: 4192 /* Relocation is to the entry for this symbol in the 4193 procedure linkage table. */ 4194 4195 /* Resolve a PLT32 reloc against a local symbol directly, 4196 without using the procedure linkage table. */ 4197 if (h == NULL) 4198 break; 4199 4200 if ((h->plt.offset == (bfd_vma) -1 4201 && eh->plt_got.offset == (bfd_vma) -1) 4202 || htab->elf.splt == NULL) 4203 { 4204 /* We didn't make a PLT entry for this symbol. This 4205 happens when statically linking PIC code, or when 4206 using -Bsymbolic. */ 4207 break; 4208 } 4209 4210 if (h->plt.offset != (bfd_vma) -1) 4211 { 4212 if (htab->plt_bnd != NULL) 4213 { 4214 resolved_plt = htab->plt_bnd; 4215 plt_offset = eh->plt_bnd.offset; 4216 } 4217 else 4218 { 4219 resolved_plt = htab->elf.splt; 4220 plt_offset = h->plt.offset; 4221 } 4222 } 4223 else 4224 { 4225 /* Use the GOT PLT. */ 4226 resolved_plt = htab->plt_got; 4227 plt_offset = eh->plt_got.offset; 4228 } 4229 4230 relocation = (resolved_plt->output_section->vma 4231 + resolved_plt->output_offset 4232 + plt_offset); 4233 unresolved_reloc = FALSE; 4234 break; 4235 4236 case R_X86_64_SIZE32: 4237 case R_X86_64_SIZE64: 4238 /* Set to symbol size. */ 4239 relocation = st_size; 4240 goto direct; 4241 4242 case R_X86_64_PC8: 4243 case R_X86_64_PC16: 4244 case R_X86_64_PC32: 4245 case R_X86_64_PC32_BND: 4246 /* Don't complain about -fPIC if the symbol is undefined when 4247 building executable. */ 4248 if (info->shared 4249 && (input_section->flags & SEC_ALLOC) != 0 4250 && (input_section->flags & SEC_READONLY) != 0 4251 && h != NULL 4252 && !(info->executable 4253 && h->root.type == bfd_link_hash_undefined)) 4254 { 4255 bfd_boolean fail = FALSE; 4256 bfd_boolean branch 4257 = ((r_type == R_X86_64_PC32 4258 || r_type == R_X86_64_PC32_BND) 4259 && is_32bit_relative_branch (contents, rel->r_offset)); 4260 4261 if (SYMBOL_REFERENCES_LOCAL (info, h)) 4262 { 4263 /* Symbol is referenced locally. Make sure it is 4264 defined locally or for a branch. */ 4265 fail = !h->def_regular && !branch; 4266 } 4267 else if (!(info->executable 4268 && (h->needs_copy || eh->needs_copy))) 4269 { 4270 /* Symbol doesn't need copy reloc and isn't referenced 4271 locally. We only allow branch to symbol with 4272 non-default visibility. */ 4273 fail = (!branch 4274 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT); 4275 } 4276 4277 if (fail) 4278 { 4279 const char *fmt; 4280 const char *v; 4281 const char *pic = ""; 4282 4283 switch (ELF_ST_VISIBILITY (h->other)) 4284 { 4285 case STV_HIDDEN: 4286 v = _("hidden symbol"); 4287 break; 4288 case STV_INTERNAL: 4289 v = _("internal symbol"); 4290 break; 4291 case STV_PROTECTED: 4292 v = _("protected symbol"); 4293 break; 4294 default: 4295 v = _("symbol"); 4296 pic = _("; recompile with -fPIC"); 4297 break; 4298 } 4299 4300 if (h->def_regular) 4301 fmt = _("%B: relocation %s against %s `%s' can not be used when making a shared object%s"); 4302 else 4303 fmt = _("%B: relocation %s against undefined %s `%s' can not be used when making a shared object%s"); 4304 4305 (*_bfd_error_handler) (fmt, input_bfd, 4306 x86_64_elf_howto_table[r_type].name, 4307 v, h->root.root.string, pic); 4308 bfd_set_error (bfd_error_bad_value); 4309 return FALSE; 4310 } 4311 } 4312 /* Fall through. */ 4313 4314 case R_X86_64_8: 4315 case R_X86_64_16: 4316 case R_X86_64_32: 4317 case R_X86_64_PC64: 4318 case R_X86_64_64: 4319 /* FIXME: The ABI says the linker should make sure the value is 4320 the same when it's zeroextended to 64 bit. */ 4321 4322 direct: 4323 if ((input_section->flags & SEC_ALLOC) == 0) 4324 break; 4325 4326 /* Don't copy a pc-relative relocation into the output file 4327 if the symbol needs copy reloc or the symbol is undefined 4328 when building executable. */ 4329 if ((info->shared 4330 && !(info->executable 4331 && h != NULL 4332 && (h->needs_copy 4333 || eh->needs_copy 4334 || h->root.type == bfd_link_hash_undefined) 4335 && IS_X86_64_PCREL_TYPE (r_type)) 4336 && (h == NULL 4337 || ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 4338 || h->root.type != bfd_link_hash_undefweak) 4339 && ((! IS_X86_64_PCREL_TYPE (r_type) 4340 && r_type != R_X86_64_SIZE32 4341 && r_type != R_X86_64_SIZE64) 4342 || ! SYMBOL_CALLS_LOCAL (info, h))) 4343 || (ELIMINATE_COPY_RELOCS 4344 && !info->shared 4345 && h != NULL 4346 && h->dynindx != -1 4347 && !h->non_got_ref 4348 && ((h->def_dynamic 4349 && !h->def_regular) 4350 || h->root.type == bfd_link_hash_undefweak 4351 || h->root.type == bfd_link_hash_undefined))) 4352 { 4353 Elf_Internal_Rela outrel; 4354 bfd_boolean skip, relocate; 4355 asection *sreloc; 4356 4357 /* When generating a shared object, these relocations 4358 are copied into the output file to be resolved at run 4359 time. */ 4360 skip = FALSE; 4361 relocate = FALSE; 4362 4363 outrel.r_offset = 4364 _bfd_elf_section_offset (output_bfd, info, input_section, 4365 rel->r_offset); 4366 if (outrel.r_offset == (bfd_vma) -1) 4367 skip = TRUE; 4368 else if (outrel.r_offset == (bfd_vma) -2) 4369 skip = TRUE, relocate = TRUE; 4370 4371 outrel.r_offset += (input_section->output_section->vma 4372 + input_section->output_offset); 4373 4374 if (skip) 4375 memset (&outrel, 0, sizeof outrel); 4376 4377 /* h->dynindx may be -1 if this symbol was marked to 4378 become local. */ 4379 else if (h != NULL 4380 && h->dynindx != -1 4381 && (IS_X86_64_PCREL_TYPE (r_type) 4382 || ! info->shared 4383 || ! SYMBOLIC_BIND (info, h) 4384 || ! h->def_regular)) 4385 { 4386 outrel.r_info = htab->r_info (h->dynindx, r_type); 4387 outrel.r_addend = rel->r_addend; 4388 } 4389 else 4390 { 4391 /* This symbol is local, or marked to become local. */ 4392 if (r_type == htab->pointer_r_type) 4393 { 4394 relocate = TRUE; 4395 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); 4396 outrel.r_addend = relocation + rel->r_addend; 4397 } 4398 else if (r_type == R_X86_64_64 4399 && !ABI_64_P (output_bfd)) 4400 { 4401 relocate = TRUE; 4402 outrel.r_info = htab->r_info (0, 4403 R_X86_64_RELATIVE64); 4404 outrel.r_addend = relocation + rel->r_addend; 4405 /* Check addend overflow. */ 4406 if ((outrel.r_addend & 0x80000000) 4407 != (rel->r_addend & 0x80000000)) 4408 { 4409 const char *name; 4410 int addend = rel->r_addend; 4411 if (h && h->root.root.string) 4412 name = h->root.root.string; 4413 else 4414 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 4415 sym, NULL); 4416 if (addend < 0) 4417 (*_bfd_error_handler) 4418 (_("%B: addend -0x%x in relocation %s against " 4419 "symbol `%s' at 0x%lx in section `%A' is " 4420 "out of range"), 4421 input_bfd, input_section, addend, 4422 x86_64_elf_howto_table[r_type].name, 4423 name, (unsigned long) rel->r_offset); 4424 else 4425 (*_bfd_error_handler) 4426 (_("%B: addend 0x%x in relocation %s against " 4427 "symbol `%s' at 0x%lx in section `%A' is " 4428 "out of range"), 4429 input_bfd, input_section, addend, 4430 x86_64_elf_howto_table[r_type].name, 4431 name, (unsigned long) rel->r_offset); 4432 bfd_set_error (bfd_error_bad_value); 4433 return FALSE; 4434 } 4435 } 4436 else 4437 { 4438 long sindx; 4439 4440 if (bfd_is_abs_section (sec)) 4441 sindx = 0; 4442 else if (sec == NULL || sec->owner == NULL) 4443 { 4444 bfd_set_error (bfd_error_bad_value); 4445 return FALSE; 4446 } 4447 else 4448 { 4449 asection *osec; 4450 4451 /* We are turning this relocation into one 4452 against a section symbol. It would be 4453 proper to subtract the symbol's value, 4454 osec->vma, from the emitted reloc addend, 4455 but ld.so expects buggy relocs. */ 4456 osec = sec->output_section; 4457 sindx = elf_section_data (osec)->dynindx; 4458 if (sindx == 0) 4459 { 4460 asection *oi = htab->elf.text_index_section; 4461 sindx = elf_section_data (oi)->dynindx; 4462 } 4463 BFD_ASSERT (sindx != 0); 4464 } 4465 4466 outrel.r_info = htab->r_info (sindx, r_type); 4467 outrel.r_addend = relocation + rel->r_addend; 4468 } 4469 } 4470 4471 sreloc = elf_section_data (input_section)->sreloc; 4472 4473 if (sreloc == NULL || sreloc->contents == NULL) 4474 { 4475 r = bfd_reloc_notsupported; 4476 goto check_relocation_error; 4477 } 4478 4479 elf_append_rela (output_bfd, sreloc, &outrel); 4480 4481 /* If this reloc is against an external symbol, we do 4482 not want to fiddle with the addend. Otherwise, we 4483 need to include the symbol value so that it becomes 4484 an addend for the dynamic reloc. */ 4485 if (! relocate) 4486 continue; 4487 } 4488 4489 break; 4490 4491 case R_X86_64_TLSGD: 4492 case R_X86_64_GOTPC32_TLSDESC: 4493 case R_X86_64_TLSDESC_CALL: 4494 case R_X86_64_GOTTPOFF: 4495 tls_type = GOT_UNKNOWN; 4496 if (h == NULL && local_got_offsets) 4497 tls_type = elf_x86_64_local_got_tls_type (input_bfd) [r_symndx]; 4498 else if (h != NULL) 4499 tls_type = elf_x86_64_hash_entry (h)->tls_type; 4500 4501 if (! elf_x86_64_tls_transition (info, input_bfd, 4502 input_section, contents, 4503 symtab_hdr, sym_hashes, 4504 &r_type, tls_type, rel, 4505 relend, h, r_symndx)) 4506 return FALSE; 4507 4508 if (r_type == R_X86_64_TPOFF32) 4509 { 4510 bfd_vma roff = rel->r_offset; 4511 4512 BFD_ASSERT (! unresolved_reloc); 4513 4514 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) 4515 { 4516 /* GD->LE transition. For 64bit, change 4517 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 4518 .word 0x6666; rex64; call __tls_get_addr 4519 into: 4520 movq %fs:0, %rax 4521 leaq foo@tpoff(%rax), %rax 4522 For 32bit, change 4523 leaq foo@tlsgd(%rip), %rdi 4524 .word 0x6666; rex64; call __tls_get_addr 4525 into: 4526 movl %fs:0, %eax 4527 leaq foo@tpoff(%rax), %rax 4528 For largepic, change: 4529 leaq foo@tlsgd(%rip), %rdi 4530 movabsq $__tls_get_addr@pltoff, %rax 4531 addq %rbx, %rax 4532 call *%rax 4533 into: 4534 movq %fs:0, %rax 4535 leaq foo@tpoff(%rax), %rax 4536 nopw 0x0(%rax,%rax,1) */ 4537 int largepic = 0; 4538 if (ABI_64_P (output_bfd) 4539 && contents[roff + 5] == (bfd_byte) '\xb8') 4540 { 4541 memcpy (contents + roff - 3, 4542 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" 4543 "\0\0\0\0\x66\x0f\x1f\x44\0", 22); 4544 largepic = 1; 4545 } 4546 else if (ABI_64_P (output_bfd)) 4547 memcpy (contents + roff - 4, 4548 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 4549 16); 4550 else 4551 memcpy (contents + roff - 3, 4552 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 4553 15); 4554 bfd_put_32 (output_bfd, 4555 elf_x86_64_tpoff (info, relocation), 4556 contents + roff + 8 + largepic); 4557 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ 4558 rel++; 4559 continue; 4560 } 4561 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC) 4562 { 4563 /* GDesc -> LE transition. 4564 It's originally something like: 4565 leaq x@tlsdesc(%rip), %rax 4566 4567 Change it to: 4568 movl $x@tpoff, %rax. */ 4569 4570 unsigned int val, type; 4571 4572 type = bfd_get_8 (input_bfd, contents + roff - 3); 4573 val = bfd_get_8 (input_bfd, contents + roff - 1); 4574 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1), 4575 contents + roff - 3); 4576 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2); 4577 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7), 4578 contents + roff - 1); 4579 bfd_put_32 (output_bfd, 4580 elf_x86_64_tpoff (info, relocation), 4581 contents + roff); 4582 continue; 4583 } 4584 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL) 4585 { 4586 /* GDesc -> LE transition. 4587 It's originally: 4588 call *(%rax) 4589 Turn it into: 4590 xchg %ax,%ax. */ 4591 bfd_put_8 (output_bfd, 0x66, contents + roff); 4592 bfd_put_8 (output_bfd, 0x90, contents + roff + 1); 4593 continue; 4594 } 4595 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTTPOFF) 4596 { 4597 /* IE->LE transition: 4598 For 64bit, originally it can be one of: 4599 movq foo@gottpoff(%rip), %reg 4600 addq foo@gottpoff(%rip), %reg 4601 We change it into: 4602 movq $foo, %reg 4603 leaq foo(%reg), %reg 4604 addq $foo, %reg. 4605 For 32bit, originally it can be one of: 4606 movq foo@gottpoff(%rip), %reg 4607 addl foo@gottpoff(%rip), %reg 4608 We change it into: 4609 movq $foo, %reg 4610 leal foo(%reg), %reg 4611 addl $foo, %reg. */ 4612 4613 unsigned int val, type, reg; 4614 4615 if (roff >= 3) 4616 val = bfd_get_8 (input_bfd, contents + roff - 3); 4617 else 4618 val = 0; 4619 type = bfd_get_8 (input_bfd, contents + roff - 2); 4620 reg = bfd_get_8 (input_bfd, contents + roff - 1); 4621 reg >>= 3; 4622 if (type == 0x8b) 4623 { 4624 /* movq */ 4625 if (val == 0x4c) 4626 bfd_put_8 (output_bfd, 0x49, 4627 contents + roff - 3); 4628 else if (!ABI_64_P (output_bfd) && val == 0x44) 4629 bfd_put_8 (output_bfd, 0x41, 4630 contents + roff - 3); 4631 bfd_put_8 (output_bfd, 0xc7, 4632 contents + roff - 2); 4633 bfd_put_8 (output_bfd, 0xc0 | reg, 4634 contents + roff - 1); 4635 } 4636 else if (reg == 4) 4637 { 4638 /* addq/addl -> addq/addl - addressing with %rsp/%r12 4639 is special */ 4640 if (val == 0x4c) 4641 bfd_put_8 (output_bfd, 0x49, 4642 contents + roff - 3); 4643 else if (!ABI_64_P (output_bfd) && val == 0x44) 4644 bfd_put_8 (output_bfd, 0x41, 4645 contents + roff - 3); 4646 bfd_put_8 (output_bfd, 0x81, 4647 contents + roff - 2); 4648 bfd_put_8 (output_bfd, 0xc0 | reg, 4649 contents + roff - 1); 4650 } 4651 else 4652 { 4653 /* addq/addl -> leaq/leal */ 4654 if (val == 0x4c) 4655 bfd_put_8 (output_bfd, 0x4d, 4656 contents + roff - 3); 4657 else if (!ABI_64_P (output_bfd) && val == 0x44) 4658 bfd_put_8 (output_bfd, 0x45, 4659 contents + roff - 3); 4660 bfd_put_8 (output_bfd, 0x8d, 4661 contents + roff - 2); 4662 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3), 4663 contents + roff - 1); 4664 } 4665 bfd_put_32 (output_bfd, 4666 elf_x86_64_tpoff (info, relocation), 4667 contents + roff); 4668 continue; 4669 } 4670 else 4671 BFD_ASSERT (FALSE); 4672 } 4673 4674 if (htab->elf.sgot == NULL) 4675 abort (); 4676 4677 if (h != NULL) 4678 { 4679 off = h->got.offset; 4680 offplt = elf_x86_64_hash_entry (h)->tlsdesc_got; 4681 } 4682 else 4683 { 4684 if (local_got_offsets == NULL) 4685 abort (); 4686 4687 off = local_got_offsets[r_symndx]; 4688 offplt = local_tlsdesc_gotents[r_symndx]; 4689 } 4690 4691 if ((off & 1) != 0) 4692 off &= ~1; 4693 else 4694 { 4695 Elf_Internal_Rela outrel; 4696 int dr_type, indx; 4697 asection *sreloc; 4698 4699 if (htab->elf.srelgot == NULL) 4700 abort (); 4701 4702 indx = h && h->dynindx != -1 ? h->dynindx : 0; 4703 4704 if (GOT_TLS_GDESC_P (tls_type)) 4705 { 4706 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC); 4707 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt 4708 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size); 4709 outrel.r_offset = (htab->elf.sgotplt->output_section->vma 4710 + htab->elf.sgotplt->output_offset 4711 + offplt 4712 + htab->sgotplt_jump_table_size); 4713 sreloc = htab->elf.srelplt; 4714 if (indx == 0) 4715 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info); 4716 else 4717 outrel.r_addend = 0; 4718 elf_append_rela (output_bfd, sreloc, &outrel); 4719 } 4720 4721 sreloc = htab->elf.srelgot; 4722 4723 outrel.r_offset = (htab->elf.sgot->output_section->vma 4724 + htab->elf.sgot->output_offset + off); 4725 4726 if (GOT_TLS_GD_P (tls_type)) 4727 dr_type = R_X86_64_DTPMOD64; 4728 else if (GOT_TLS_GDESC_P (tls_type)) 4729 goto dr_done; 4730 else 4731 dr_type = R_X86_64_TPOFF64; 4732 4733 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off); 4734 outrel.r_addend = 0; 4735 if ((dr_type == R_X86_64_TPOFF64 4736 || dr_type == R_X86_64_TLSDESC) && indx == 0) 4737 outrel.r_addend = relocation - elf_x86_64_dtpoff_base (info); 4738 outrel.r_info = htab->r_info (indx, dr_type); 4739 4740 elf_append_rela (output_bfd, sreloc, &outrel); 4741 4742 if (GOT_TLS_GD_P (tls_type)) 4743 { 4744 if (indx == 0) 4745 { 4746 BFD_ASSERT (! unresolved_reloc); 4747 bfd_put_64 (output_bfd, 4748 relocation - elf_x86_64_dtpoff_base (info), 4749 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 4750 } 4751 else 4752 { 4753 bfd_put_64 (output_bfd, 0, 4754 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 4755 outrel.r_info = htab->r_info (indx, 4756 R_X86_64_DTPOFF64); 4757 outrel.r_offset += GOT_ENTRY_SIZE; 4758 elf_append_rela (output_bfd, sreloc, 4759 &outrel); 4760 } 4761 } 4762 4763 dr_done: 4764 if (h != NULL) 4765 h->got.offset |= 1; 4766 else 4767 local_got_offsets[r_symndx] |= 1; 4768 } 4769 4770 if (off >= (bfd_vma) -2 4771 && ! GOT_TLS_GDESC_P (tls_type)) 4772 abort (); 4773 if (r_type == ELF32_R_TYPE (rel->r_info)) 4774 { 4775 if (r_type == R_X86_64_GOTPC32_TLSDESC 4776 || r_type == R_X86_64_TLSDESC_CALL) 4777 relocation = htab->elf.sgotplt->output_section->vma 4778 + htab->elf.sgotplt->output_offset 4779 + offplt + htab->sgotplt_jump_table_size; 4780 else 4781 relocation = htab->elf.sgot->output_section->vma 4782 + htab->elf.sgot->output_offset + off; 4783 unresolved_reloc = FALSE; 4784 } 4785 else 4786 { 4787 bfd_vma roff = rel->r_offset; 4788 4789 if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSGD) 4790 { 4791 /* GD->IE transition. For 64bit, change 4792 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 4793 .word 0x6666; rex64; call __tls_get_addr@plt 4794 into: 4795 movq %fs:0, %rax 4796 addq foo@gottpoff(%rip), %rax 4797 For 32bit, change 4798 leaq foo@tlsgd(%rip), %rdi 4799 .word 0x6666; rex64; call __tls_get_addr@plt 4800 into: 4801 movl %fs:0, %eax 4802 addq foo@gottpoff(%rip), %rax 4803 For largepic, change: 4804 leaq foo@tlsgd(%rip), %rdi 4805 movabsq $__tls_get_addr@pltoff, %rax 4806 addq %rbx, %rax 4807 call *%rax 4808 into: 4809 movq %fs:0, %rax 4810 addq foo@gottpoff(%rax), %rax 4811 nopw 0x0(%rax,%rax,1) */ 4812 int largepic = 0; 4813 if (ABI_64_P (output_bfd) 4814 && contents[roff + 5] == (bfd_byte) '\xb8') 4815 { 4816 memcpy (contents + roff - 3, 4817 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" 4818 "\0\0\0\0\x66\x0f\x1f\x44\0", 22); 4819 largepic = 1; 4820 } 4821 else if (ABI_64_P (output_bfd)) 4822 memcpy (contents + roff - 4, 4823 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 4824 16); 4825 else 4826 memcpy (contents + roff - 3, 4827 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 4828 15); 4829 4830 relocation = (htab->elf.sgot->output_section->vma 4831 + htab->elf.sgot->output_offset + off 4832 - roff 4833 - largepic 4834 - input_section->output_section->vma 4835 - input_section->output_offset 4836 - 12); 4837 bfd_put_32 (output_bfd, relocation, 4838 contents + roff + 8 + largepic); 4839 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */ 4840 rel++; 4841 continue; 4842 } 4843 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_GOTPC32_TLSDESC) 4844 { 4845 /* GDesc -> IE transition. 4846 It's originally something like: 4847 leaq x@tlsdesc(%rip), %rax 4848 4849 Change it to: 4850 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */ 4851 4852 /* Now modify the instruction as appropriate. To 4853 turn a leaq into a movq in the form we use it, it 4854 suffices to change the second byte from 0x8d to 4855 0x8b. */ 4856 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2); 4857 4858 bfd_put_32 (output_bfd, 4859 htab->elf.sgot->output_section->vma 4860 + htab->elf.sgot->output_offset + off 4861 - rel->r_offset 4862 - input_section->output_section->vma 4863 - input_section->output_offset 4864 - 4, 4865 contents + roff); 4866 continue; 4867 } 4868 else if (ELF32_R_TYPE (rel->r_info) == R_X86_64_TLSDESC_CALL) 4869 { 4870 /* GDesc -> IE transition. 4871 It's originally: 4872 call *(%rax) 4873 4874 Change it to: 4875 xchg %ax, %ax. */ 4876 4877 bfd_put_8 (output_bfd, 0x66, contents + roff); 4878 bfd_put_8 (output_bfd, 0x90, contents + roff + 1); 4879 continue; 4880 } 4881 else 4882 BFD_ASSERT (FALSE); 4883 } 4884 break; 4885 4886 case R_X86_64_TLSLD: 4887 if (! elf_x86_64_tls_transition (info, input_bfd, 4888 input_section, contents, 4889 symtab_hdr, sym_hashes, 4890 &r_type, GOT_UNKNOWN, 4891 rel, relend, h, r_symndx)) 4892 return FALSE; 4893 4894 if (r_type != R_X86_64_TLSLD) 4895 { 4896 /* LD->LE transition: 4897 leaq foo@tlsld(%rip), %rdi; call __tls_get_addr. 4898 For 64bit, we change it into: 4899 .word 0x6666; .byte 0x66; movq %fs:0, %rax. 4900 For 32bit, we change it into: 4901 nopl 0x0(%rax); movl %fs:0, %eax. 4902 For largepic, change: 4903 leaq foo@tlsgd(%rip), %rdi 4904 movabsq $__tls_get_addr@pltoff, %rax 4905 addq %rbx, %rax 4906 call *%rax 4907 into: 4908 data32 data32 data32 nopw %cs:0x0(%rax,%rax,1) 4909 movq %fs:0, %eax */ 4910 4911 BFD_ASSERT (r_type == R_X86_64_TPOFF32); 4912 if (ABI_64_P (output_bfd) 4913 && contents[rel->r_offset + 5] == (bfd_byte) '\xb8') 4914 memcpy (contents + rel->r_offset - 3, 4915 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" 4916 "\x64\x48\x8b\x04\x25\0\0\0", 22); 4917 else if (ABI_64_P (output_bfd)) 4918 memcpy (contents + rel->r_offset - 3, 4919 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); 4920 else 4921 memcpy (contents + rel->r_offset - 3, 4922 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); 4923 /* Skip R_X86_64_PC32/R_X86_64_PLT32/R_X86_64_PLTOFF64. */ 4924 rel++; 4925 continue; 4926 } 4927 4928 if (htab->elf.sgot == NULL) 4929 abort (); 4930 4931 off = htab->tls_ld_got.offset; 4932 if (off & 1) 4933 off &= ~1; 4934 else 4935 { 4936 Elf_Internal_Rela outrel; 4937 4938 if (htab->elf.srelgot == NULL) 4939 abort (); 4940 4941 outrel.r_offset = (htab->elf.sgot->output_section->vma 4942 + htab->elf.sgot->output_offset + off); 4943 4944 bfd_put_64 (output_bfd, 0, 4945 htab->elf.sgot->contents + off); 4946 bfd_put_64 (output_bfd, 0, 4947 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 4948 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64); 4949 outrel.r_addend = 0; 4950 elf_append_rela (output_bfd, htab->elf.srelgot, 4951 &outrel); 4952 htab->tls_ld_got.offset |= 1; 4953 } 4954 relocation = htab->elf.sgot->output_section->vma 4955 + htab->elf.sgot->output_offset + off; 4956 unresolved_reloc = FALSE; 4957 break; 4958 4959 case R_X86_64_DTPOFF32: 4960 if (!info->executable|| (input_section->flags & SEC_CODE) == 0) 4961 relocation -= elf_x86_64_dtpoff_base (info); 4962 else 4963 relocation = elf_x86_64_tpoff (info, relocation); 4964 break; 4965 4966 case R_X86_64_TPOFF32: 4967 case R_X86_64_TPOFF64: 4968 BFD_ASSERT (info->executable); 4969 relocation = elf_x86_64_tpoff (info, relocation); 4970 break; 4971 4972 case R_X86_64_DTPOFF64: 4973 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0); 4974 relocation -= elf_x86_64_dtpoff_base (info); 4975 break; 4976 4977 default: 4978 break; 4979 } 4980 4981 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 4982 because such sections are not SEC_ALLOC and thus ld.so will 4983 not process them. */ 4984 if (unresolved_reloc 4985 && !((input_section->flags & SEC_DEBUGGING) != 0 4986 && h->def_dynamic) 4987 && _bfd_elf_section_offset (output_bfd, info, input_section, 4988 rel->r_offset) != (bfd_vma) -1) 4989 { 4990 (*_bfd_error_handler) 4991 (_("%B(%A+0x%lx): unresolvable %s relocation against symbol `%s'"), 4992 input_bfd, 4993 input_section, 4994 (long) rel->r_offset, 4995 howto->name, 4996 h->root.root.string); 4997 return FALSE; 4998 } 4999 5000 do_relocation: 5001 r = _bfd_final_link_relocate (howto, input_bfd, input_section, 5002 contents, rel->r_offset, 5003 relocation, rel->r_addend); 5004 5005 check_relocation_error: 5006 if (r != bfd_reloc_ok) 5007 { 5008 const char *name; 5009 5010 if (h != NULL) 5011 name = h->root.root.string; 5012 else 5013 { 5014 name = bfd_elf_string_from_elf_section (input_bfd, 5015 symtab_hdr->sh_link, 5016 sym->st_name); 5017 if (name == NULL) 5018 return FALSE; 5019 if (*name == '\0') 5020 name = bfd_section_name (input_bfd, sec); 5021 } 5022 5023 if (r == bfd_reloc_overflow) 5024 { 5025 if (! ((*info->callbacks->reloc_overflow) 5026 (info, (h ? &h->root : NULL), name, howto->name, 5027 (bfd_vma) 0, input_bfd, input_section, 5028 rel->r_offset))) 5029 return FALSE; 5030 } 5031 else 5032 { 5033 (*_bfd_error_handler) 5034 (_("%B(%A+0x%lx): reloc against `%s': error %d"), 5035 input_bfd, input_section, 5036 (long) rel->r_offset, name, (int) r); 5037 return FALSE; 5038 } 5039 } 5040 } 5041 5042 return TRUE; 5043 } 5044 5045 /* Finish up dynamic symbol handling. We set the contents of various 5046 dynamic sections here. */ 5047 5048 static bfd_boolean 5049 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, 5050 struct bfd_link_info *info, 5051 struct elf_link_hash_entry *h, 5052 Elf_Internal_Sym *sym ATTRIBUTE_UNUSED) 5053 { 5054 struct elf_x86_64_link_hash_table *htab; 5055 const struct elf_x86_64_backend_data *abed; 5056 bfd_boolean use_plt_bnd; 5057 struct elf_x86_64_link_hash_entry *eh; 5058 5059 htab = elf_x86_64_hash_table (info); 5060 if (htab == NULL) 5061 return FALSE; 5062 5063 /* Use MPX backend data in case of BND relocation. Use .plt_bnd 5064 section only if there is .plt section. */ 5065 use_plt_bnd = htab->elf.splt != NULL && htab->plt_bnd != NULL; 5066 abed = (use_plt_bnd 5067 ? &elf_x86_64_bnd_arch_bed 5068 : get_elf_x86_64_backend_data (output_bfd)); 5069 5070 eh = (struct elf_x86_64_link_hash_entry *) h; 5071 5072 if (h->plt.offset != (bfd_vma) -1) 5073 { 5074 bfd_vma plt_index; 5075 bfd_vma got_offset, plt_offset, plt_plt_offset, plt_got_offset; 5076 bfd_vma plt_plt_insn_end, plt_got_insn_size; 5077 Elf_Internal_Rela rela; 5078 bfd_byte *loc; 5079 asection *plt, *gotplt, *relplt, *resolved_plt; 5080 const struct elf_backend_data *bed; 5081 bfd_vma plt_got_pcrel_offset; 5082 5083 /* When building a static executable, use .iplt, .igot.plt and 5084 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 5085 if (htab->elf.splt != NULL) 5086 { 5087 plt = htab->elf.splt; 5088 gotplt = htab->elf.sgotplt; 5089 relplt = htab->elf.srelplt; 5090 } 5091 else 5092 { 5093 plt = htab->elf.iplt; 5094 gotplt = htab->elf.igotplt; 5095 relplt = htab->elf.irelplt; 5096 } 5097 5098 /* This symbol has an entry in the procedure linkage table. Set 5099 it up. */ 5100 if ((h->dynindx == -1 5101 && !((h->forced_local || info->executable) 5102 && h->def_regular 5103 && h->type == STT_GNU_IFUNC)) 5104 || plt == NULL 5105 || gotplt == NULL 5106 || relplt == NULL) 5107 abort (); 5108 5109 /* Get the index in the procedure linkage table which 5110 corresponds to this symbol. This is the index of this symbol 5111 in all the symbols for which we are making plt entries. The 5112 first entry in the procedure linkage table is reserved. 5113 5114 Get the offset into the .got table of the entry that 5115 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE 5116 bytes. The first three are reserved for the dynamic linker. 5117 5118 For static executables, we don't reserve anything. */ 5119 5120 if (plt == htab->elf.splt) 5121 { 5122 got_offset = h->plt.offset / abed->plt_entry_size - 1; 5123 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE; 5124 } 5125 else 5126 { 5127 got_offset = h->plt.offset / abed->plt_entry_size; 5128 got_offset = got_offset * GOT_ENTRY_SIZE; 5129 } 5130 5131 plt_plt_insn_end = abed->plt_plt_insn_end; 5132 plt_plt_offset = abed->plt_plt_offset; 5133 plt_got_insn_size = abed->plt_got_insn_size; 5134 plt_got_offset = abed->plt_got_offset; 5135 if (use_plt_bnd) 5136 { 5137 /* Use the second PLT with BND relocations. */ 5138 const bfd_byte *plt_entry, *plt2_entry; 5139 5140 if (eh->has_bnd_reloc) 5141 { 5142 plt_entry = elf_x86_64_bnd_plt_entry; 5143 plt2_entry = elf_x86_64_bnd_plt2_entry; 5144 } 5145 else 5146 { 5147 plt_entry = elf_x86_64_legacy_plt_entry; 5148 plt2_entry = elf_x86_64_legacy_plt2_entry; 5149 5150 /* Subtract 1 since there is no BND prefix. */ 5151 plt_plt_insn_end -= 1; 5152 plt_plt_offset -= 1; 5153 plt_got_insn_size -= 1; 5154 plt_got_offset -= 1; 5155 } 5156 5157 BFD_ASSERT (sizeof (elf_x86_64_bnd_plt_entry) 5158 == sizeof (elf_x86_64_legacy_plt_entry)); 5159 5160 /* Fill in the entry in the procedure linkage table. */ 5161 memcpy (plt->contents + h->plt.offset, 5162 plt_entry, sizeof (elf_x86_64_legacy_plt_entry)); 5163 /* Fill in the entry in the second PLT. */ 5164 memcpy (htab->plt_bnd->contents + eh->plt_bnd.offset, 5165 plt2_entry, sizeof (elf_x86_64_legacy_plt2_entry)); 5166 5167 resolved_plt = htab->plt_bnd; 5168 plt_offset = eh->plt_bnd.offset; 5169 } 5170 else 5171 { 5172 /* Fill in the entry in the procedure linkage table. */ 5173 memcpy (plt->contents + h->plt.offset, abed->plt_entry, 5174 abed->plt_entry_size); 5175 5176 resolved_plt = plt; 5177 plt_offset = h->plt.offset; 5178 } 5179 5180 /* Insert the relocation positions of the plt section. */ 5181 5182 /* Put offset the PC-relative instruction referring to the GOT entry, 5183 subtracting the size of that instruction. */ 5184 plt_got_pcrel_offset = (gotplt->output_section->vma 5185 + gotplt->output_offset 5186 + got_offset 5187 - resolved_plt->output_section->vma 5188 - resolved_plt->output_offset 5189 - plt_offset 5190 - plt_got_insn_size); 5191 5192 /* Check PC-relative offset overflow in PLT entry. */ 5193 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff) 5194 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in PLT entry for `%s'\n"), 5195 output_bfd, h->root.root.string); 5196 5197 bfd_put_32 (output_bfd, plt_got_pcrel_offset, 5198 resolved_plt->contents + plt_offset + plt_got_offset); 5199 5200 /* Fill in the entry in the global offset table, initially this 5201 points to the second part of the PLT entry. */ 5202 bfd_put_64 (output_bfd, (plt->output_section->vma 5203 + plt->output_offset 5204 + h->plt.offset + abed->plt_lazy_offset), 5205 gotplt->contents + got_offset); 5206 5207 /* Fill in the entry in the .rela.plt section. */ 5208 rela.r_offset = (gotplt->output_section->vma 5209 + gotplt->output_offset 5210 + got_offset); 5211 if (h->dynindx == -1 5212 || ((info->executable 5213 || ELF_ST_VISIBILITY (h->other) != STV_DEFAULT) 5214 && h->def_regular 5215 && h->type == STT_GNU_IFUNC)) 5216 { 5217 /* If an STT_GNU_IFUNC symbol is locally defined, generate 5218 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */ 5219 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE); 5220 rela.r_addend = (h->root.u.def.value 5221 + h->root.u.def.section->output_section->vma 5222 + h->root.u.def.section->output_offset); 5223 /* R_X86_64_IRELATIVE comes last. */ 5224 plt_index = htab->next_irelative_index--; 5225 } 5226 else 5227 { 5228 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT); 5229 rela.r_addend = 0; 5230 plt_index = htab->next_jump_slot_index++; 5231 } 5232 5233 /* Don't fill PLT entry for static executables. */ 5234 if (plt == htab->elf.splt) 5235 { 5236 bfd_vma plt0_offset = h->plt.offset + plt_plt_insn_end; 5237 5238 /* Put relocation index. */ 5239 bfd_put_32 (output_bfd, plt_index, 5240 plt->contents + h->plt.offset + abed->plt_reloc_offset); 5241 5242 /* Put offset for jmp .PLT0 and check for overflow. We don't 5243 check relocation index for overflow since branch displacement 5244 will overflow first. */ 5245 if (plt0_offset > 0x80000000) 5246 info->callbacks->einfo (_("%F%B: branch displacement overflow in PLT entry for `%s'\n"), 5247 output_bfd, h->root.root.string); 5248 bfd_put_32 (output_bfd, - plt0_offset, 5249 plt->contents + h->plt.offset + plt_plt_offset); 5250 } 5251 5252 bed = get_elf_backend_data (output_bfd); 5253 loc = relplt->contents + plt_index * bed->s->sizeof_rela; 5254 bed->s->swap_reloca_out (output_bfd, &rela, loc); 5255 } 5256 else if (eh->plt_got.offset != (bfd_vma) -1) 5257 { 5258 bfd_vma got_offset, plt_offset, plt_got_offset, plt_got_insn_size; 5259 asection *plt, *got; 5260 bfd_boolean got_after_plt; 5261 int32_t got_pcrel_offset; 5262 const bfd_byte *got_plt_entry; 5263 5264 /* Set the entry in the GOT procedure linkage table. */ 5265 plt = htab->plt_got; 5266 got = htab->elf.sgot; 5267 got_offset = h->got.offset; 5268 5269 if (got_offset == (bfd_vma) -1 5270 || h->type == STT_GNU_IFUNC 5271 || plt == NULL 5272 || got == NULL) 5273 abort (); 5274 5275 /* Use the second PLT entry template for the GOT PLT since they 5276 are the identical. */ 5277 plt_got_insn_size = elf_x86_64_bnd_arch_bed.plt_got_insn_size; 5278 plt_got_offset = elf_x86_64_bnd_arch_bed.plt_got_offset; 5279 if (eh->has_bnd_reloc) 5280 got_plt_entry = elf_x86_64_bnd_plt2_entry; 5281 else 5282 { 5283 got_plt_entry = elf_x86_64_legacy_plt2_entry; 5284 5285 /* Subtract 1 since there is no BND prefix. */ 5286 plt_got_insn_size -= 1; 5287 plt_got_offset -= 1; 5288 } 5289 5290 /* Fill in the entry in the GOT procedure linkage table. */ 5291 plt_offset = eh->plt_got.offset; 5292 memcpy (plt->contents + plt_offset, 5293 got_plt_entry, sizeof (elf_x86_64_legacy_plt2_entry)); 5294 5295 /* Put offset the PC-relative instruction referring to the GOT 5296 entry, subtracting the size of that instruction. */ 5297 got_pcrel_offset = (got->output_section->vma 5298 + got->output_offset 5299 + got_offset 5300 - plt->output_section->vma 5301 - plt->output_offset 5302 - plt_offset 5303 - plt_got_insn_size); 5304 5305 /* Check PC-relative offset overflow in GOT PLT entry. */ 5306 got_after_plt = got->output_section->vma > plt->output_section->vma; 5307 if ((got_after_plt && got_pcrel_offset < 0) 5308 || (!got_after_plt && got_pcrel_offset > 0)) 5309 info->callbacks->einfo (_("%F%B: PC-relative offset overflow in GOT PLT entry for `%s'\n"), 5310 output_bfd, h->root.root.string); 5311 5312 bfd_put_32 (output_bfd, got_pcrel_offset, 5313 plt->contents + plt_offset + plt_got_offset); 5314 } 5315 5316 if (!h->def_regular 5317 && (h->plt.offset != (bfd_vma) -1 5318 || eh->plt_got.offset != (bfd_vma) -1)) 5319 { 5320 /* Mark the symbol as undefined, rather than as defined in 5321 the .plt section. Leave the value if there were any 5322 relocations where pointer equality matters (this is a clue 5323 for the dynamic linker, to make function pointer 5324 comparisons work between an application and shared 5325 library), otherwise set it to zero. If a function is only 5326 called from a binary, there is no need to slow down 5327 shared libraries because of that. */ 5328 sym->st_shndx = SHN_UNDEF; 5329 if (!h->pointer_equality_needed) 5330 sym->st_value = 0; 5331 } 5332 5333 if (h->got.offset != (bfd_vma) -1 5334 && ! GOT_TLS_GD_ANY_P (elf_x86_64_hash_entry (h)->tls_type) 5335 && elf_x86_64_hash_entry (h)->tls_type != GOT_TLS_IE) 5336 { 5337 Elf_Internal_Rela rela; 5338 5339 /* This symbol has an entry in the global offset table. Set it 5340 up. */ 5341 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL) 5342 abort (); 5343 5344 rela.r_offset = (htab->elf.sgot->output_section->vma 5345 + htab->elf.sgot->output_offset 5346 + (h->got.offset &~ (bfd_vma) 1)); 5347 5348 /* If this is a static link, or it is a -Bsymbolic link and the 5349 symbol is defined locally or was forced to be local because 5350 of a version file, we just want to emit a RELATIVE reloc. 5351 The entry in the global offset table will already have been 5352 initialized in the relocate_section function. */ 5353 if (h->def_regular 5354 && h->type == STT_GNU_IFUNC) 5355 { 5356 if (info->shared) 5357 { 5358 /* Generate R_X86_64_GLOB_DAT. */ 5359 goto do_glob_dat; 5360 } 5361 else 5362 { 5363 asection *plt; 5364 5365 if (!h->pointer_equality_needed) 5366 abort (); 5367 5368 /* For non-shared object, we can't use .got.plt, which 5369 contains the real function addres if we need pointer 5370 equality. We load the GOT entry with the PLT entry. */ 5371 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; 5372 bfd_put_64 (output_bfd, (plt->output_section->vma 5373 + plt->output_offset 5374 + h->plt.offset), 5375 htab->elf.sgot->contents + h->got.offset); 5376 return TRUE; 5377 } 5378 } 5379 else if (info->shared 5380 && SYMBOL_REFERENCES_LOCAL (info, h)) 5381 { 5382 if (!h->def_regular) 5383 return FALSE; 5384 BFD_ASSERT((h->got.offset & 1) != 0); 5385 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE); 5386 rela.r_addend = (h->root.u.def.value 5387 + h->root.u.def.section->output_section->vma 5388 + h->root.u.def.section->output_offset); 5389 } 5390 else 5391 { 5392 BFD_ASSERT((h->got.offset & 1) == 0); 5393 do_glob_dat: 5394 bfd_put_64 (output_bfd, (bfd_vma) 0, 5395 htab->elf.sgot->contents + h->got.offset); 5396 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT); 5397 rela.r_addend = 0; 5398 } 5399 5400 elf_append_rela (output_bfd, htab->elf.srelgot, &rela); 5401 } 5402 5403 if (h->needs_copy) 5404 { 5405 Elf_Internal_Rela rela; 5406 5407 /* This symbol needs a copy reloc. Set it up. */ 5408 5409 if (h->dynindx == -1 5410 || (h->root.type != bfd_link_hash_defined 5411 && h->root.type != bfd_link_hash_defweak) 5412 || htab->srelbss == NULL) 5413 abort (); 5414 5415 rela.r_offset = (h->root.u.def.value 5416 + h->root.u.def.section->output_section->vma 5417 + h->root.u.def.section->output_offset); 5418 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY); 5419 rela.r_addend = 0; 5420 elf_append_rela (output_bfd, htab->srelbss, &rela); 5421 } 5422 5423 return TRUE; 5424 } 5425 5426 /* Finish up local dynamic symbol handling. We set the contents of 5427 various dynamic sections here. */ 5428 5429 static bfd_boolean 5430 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf) 5431 { 5432 struct elf_link_hash_entry *h 5433 = (struct elf_link_hash_entry *) *slot; 5434 struct bfd_link_info *info 5435 = (struct bfd_link_info *) inf; 5436 5437 return elf_x86_64_finish_dynamic_symbol (info->output_bfd, 5438 info, h, NULL); 5439 } 5440 5441 /* Used to decide how to sort relocs in an optimal manner for the 5442 dynamic linker, before writing them out. */ 5443 5444 static enum elf_reloc_type_class 5445 elf_x86_64_reloc_type_class (const struct bfd_link_info *info ATTRIBUTE_UNUSED, 5446 const asection *rel_sec ATTRIBUTE_UNUSED, 5447 const Elf_Internal_Rela *rela) 5448 { 5449 switch ((int) ELF32_R_TYPE (rela->r_info)) 5450 { 5451 case R_X86_64_RELATIVE: 5452 case R_X86_64_RELATIVE64: 5453 return reloc_class_relative; 5454 case R_X86_64_JUMP_SLOT: 5455 return reloc_class_plt; 5456 case R_X86_64_COPY: 5457 return reloc_class_copy; 5458 default: 5459 return reloc_class_normal; 5460 } 5461 } 5462 5463 /* Finish up the dynamic sections. */ 5464 5465 static bfd_boolean 5466 elf_x86_64_finish_dynamic_sections (bfd *output_bfd, 5467 struct bfd_link_info *info) 5468 { 5469 struct elf_x86_64_link_hash_table *htab; 5470 bfd *dynobj; 5471 asection *sdyn; 5472 const struct elf_x86_64_backend_data *abed; 5473 5474 htab = elf_x86_64_hash_table (info); 5475 if (htab == NULL) 5476 return FALSE; 5477 5478 /* Use MPX backend data in case of BND relocation. Use .plt_bnd 5479 section only if there is .plt section. */ 5480 abed = (htab->elf.splt != NULL && htab->plt_bnd != NULL 5481 ? &elf_x86_64_bnd_arch_bed 5482 : get_elf_x86_64_backend_data (output_bfd)); 5483 5484 dynobj = htab->elf.dynobj; 5485 sdyn = bfd_get_linker_section (dynobj, ".dynamic"); 5486 5487 if (htab->elf.dynamic_sections_created) 5488 { 5489 bfd_byte *dyncon, *dynconend; 5490 const struct elf_backend_data *bed; 5491 bfd_size_type sizeof_dyn; 5492 5493 if (sdyn == NULL || htab->elf.sgot == NULL) 5494 abort (); 5495 5496 bed = get_elf_backend_data (dynobj); 5497 sizeof_dyn = bed->s->sizeof_dyn; 5498 dyncon = sdyn->contents; 5499 dynconend = sdyn->contents + sdyn->size; 5500 for (; dyncon < dynconend; dyncon += sizeof_dyn) 5501 { 5502 Elf_Internal_Dyn dyn; 5503 asection *s; 5504 5505 (*bed->s->swap_dyn_in) (dynobj, dyncon, &dyn); 5506 5507 switch (dyn.d_tag) 5508 { 5509 default: 5510 continue; 5511 5512 case DT_PLTGOT: 5513 s = htab->elf.sgotplt; 5514 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset; 5515 break; 5516 5517 case DT_JMPREL: 5518 dyn.d_un.d_ptr = htab->elf.srelplt->output_section->vma; 5519 break; 5520 5521 case DT_PLTRELSZ: 5522 s = htab->elf.srelplt->output_section; 5523 dyn.d_un.d_val = s->size; 5524 break; 5525 5526 case DT_RELASZ: 5527 /* The procedure linkage table relocs (DT_JMPREL) should 5528 not be included in the overall relocs (DT_RELA). 5529 Therefore, we override the DT_RELASZ entry here to 5530 make it not include the JMPREL relocs. Since the 5531 linker script arranges for .rela.plt to follow all 5532 other relocation sections, we don't have to worry 5533 about changing the DT_RELA entry. */ 5534 if (htab->elf.srelplt != NULL) 5535 { 5536 s = htab->elf.srelplt->output_section; 5537 dyn.d_un.d_val -= s->size; 5538 } 5539 break; 5540 5541 case DT_TLSDESC_PLT: 5542 s = htab->elf.splt; 5543 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset 5544 + htab->tlsdesc_plt; 5545 break; 5546 5547 case DT_TLSDESC_GOT: 5548 s = htab->elf.sgot; 5549 dyn.d_un.d_ptr = s->output_section->vma + s->output_offset 5550 + htab->tlsdesc_got; 5551 break; 5552 } 5553 5554 (*bed->s->swap_dyn_out) (output_bfd, &dyn, dyncon); 5555 } 5556 5557 /* Fill in the special first entry in the procedure linkage table. */ 5558 if (htab->elf.splt && htab->elf.splt->size > 0) 5559 { 5560 /* Fill in the first entry in the procedure linkage table. */ 5561 memcpy (htab->elf.splt->contents, 5562 abed->plt0_entry, abed->plt_entry_size); 5563 /* Add offset for pushq GOT+8(%rip), since the instruction 5564 uses 6 bytes subtract this value. */ 5565 bfd_put_32 (output_bfd, 5566 (htab->elf.sgotplt->output_section->vma 5567 + htab->elf.sgotplt->output_offset 5568 + 8 5569 - htab->elf.splt->output_section->vma 5570 - htab->elf.splt->output_offset 5571 - 6), 5572 htab->elf.splt->contents + abed->plt0_got1_offset); 5573 /* Add offset for the PC-relative instruction accessing GOT+16, 5574 subtracting the offset to the end of that instruction. */ 5575 bfd_put_32 (output_bfd, 5576 (htab->elf.sgotplt->output_section->vma 5577 + htab->elf.sgotplt->output_offset 5578 + 16 5579 - htab->elf.splt->output_section->vma 5580 - htab->elf.splt->output_offset 5581 - abed->plt0_got2_insn_end), 5582 htab->elf.splt->contents + abed->plt0_got2_offset); 5583 5584 elf_section_data (htab->elf.splt->output_section) 5585 ->this_hdr.sh_entsize = abed->plt_entry_size; 5586 5587 if (htab->tlsdesc_plt) 5588 { 5589 bfd_put_64 (output_bfd, (bfd_vma) 0, 5590 htab->elf.sgot->contents + htab->tlsdesc_got); 5591 5592 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt, 5593 abed->plt0_entry, abed->plt_entry_size); 5594 5595 /* Add offset for pushq GOT+8(%rip), since the 5596 instruction uses 6 bytes subtract this value. */ 5597 bfd_put_32 (output_bfd, 5598 (htab->elf.sgotplt->output_section->vma 5599 + htab->elf.sgotplt->output_offset 5600 + 8 5601 - htab->elf.splt->output_section->vma 5602 - htab->elf.splt->output_offset 5603 - htab->tlsdesc_plt 5604 - 6), 5605 htab->elf.splt->contents 5606 + htab->tlsdesc_plt + abed->plt0_got1_offset); 5607 /* Add offset for the PC-relative instruction accessing GOT+TDG, 5608 where TGD stands for htab->tlsdesc_got, subtracting the offset 5609 to the end of that instruction. */ 5610 bfd_put_32 (output_bfd, 5611 (htab->elf.sgot->output_section->vma 5612 + htab->elf.sgot->output_offset 5613 + htab->tlsdesc_got 5614 - htab->elf.splt->output_section->vma 5615 - htab->elf.splt->output_offset 5616 - htab->tlsdesc_plt 5617 - abed->plt0_got2_insn_end), 5618 htab->elf.splt->contents 5619 + htab->tlsdesc_plt + abed->plt0_got2_offset); 5620 } 5621 } 5622 } 5623 5624 if (htab->plt_bnd != NULL) 5625 elf_section_data (htab->plt_bnd->output_section) 5626 ->this_hdr.sh_entsize = sizeof (elf_x86_64_bnd_plt2_entry); 5627 5628 if (htab->elf.sgotplt) 5629 { 5630 if (bfd_is_abs_section (htab->elf.sgotplt->output_section)) 5631 { 5632 (*_bfd_error_handler) 5633 (_("discarded output section: `%A'"), htab->elf.sgotplt); 5634 return FALSE; 5635 } 5636 5637 /* Fill in the first three entries in the global offset table. */ 5638 if (htab->elf.sgotplt->size > 0) 5639 { 5640 /* Set the first entry in the global offset table to the address of 5641 the dynamic section. */ 5642 if (sdyn == NULL) 5643 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents); 5644 else 5645 bfd_put_64 (output_bfd, 5646 sdyn->output_section->vma + sdyn->output_offset, 5647 htab->elf.sgotplt->contents); 5648 /* Write GOT[1] and GOT[2], needed for the dynamic linker. */ 5649 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE); 5650 bfd_put_64 (output_bfd, (bfd_vma) 0, htab->elf.sgotplt->contents + GOT_ENTRY_SIZE*2); 5651 } 5652 5653 elf_section_data (htab->elf.sgotplt->output_section)->this_hdr.sh_entsize = 5654 GOT_ENTRY_SIZE; 5655 } 5656 5657 /* Adjust .eh_frame for .plt section. */ 5658 if (htab->plt_eh_frame != NULL 5659 && htab->plt_eh_frame->contents != NULL) 5660 { 5661 if (htab->elf.splt != NULL 5662 && htab->elf.splt->size != 0 5663 && (htab->elf.splt->flags & SEC_EXCLUDE) == 0 5664 && htab->elf.splt->output_section != NULL 5665 && htab->plt_eh_frame->output_section != NULL) 5666 { 5667 bfd_vma plt_start = htab->elf.splt->output_section->vma; 5668 bfd_vma eh_frame_start = htab->plt_eh_frame->output_section->vma 5669 + htab->plt_eh_frame->output_offset 5670 + PLT_FDE_START_OFFSET; 5671 bfd_put_signed_32 (dynobj, plt_start - eh_frame_start, 5672 htab->plt_eh_frame->contents 5673 + PLT_FDE_START_OFFSET); 5674 } 5675 if (htab->plt_eh_frame->sec_info_type == SEC_INFO_TYPE_EH_FRAME) 5676 { 5677 if (! _bfd_elf_write_section_eh_frame (output_bfd, info, 5678 htab->plt_eh_frame, 5679 htab->plt_eh_frame->contents)) 5680 return FALSE; 5681 } 5682 } 5683 5684 if (htab->elf.sgot && htab->elf.sgot->size > 0) 5685 elf_section_data (htab->elf.sgot->output_section)->this_hdr.sh_entsize 5686 = GOT_ENTRY_SIZE; 5687 5688 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ 5689 htab_traverse (htab->loc_hash_table, 5690 elf_x86_64_finish_local_dynamic_symbol, 5691 info); 5692 5693 return TRUE; 5694 } 5695 5696 /* Return an array of PLT entry symbol values. */ 5697 5698 static bfd_vma * 5699 elf_x86_64_get_plt_sym_val (bfd *abfd, asymbol **dynsyms, asection *plt, 5700 asection *relplt) 5701 { 5702 bfd_boolean (*slurp_relocs) (bfd *, asection *, asymbol **, bfd_boolean); 5703 arelent *p; 5704 long count, i; 5705 bfd_vma *plt_sym_val; 5706 bfd_vma plt_offset; 5707 bfd_byte *plt_contents; 5708 const struct elf_x86_64_backend_data *bed; 5709 Elf_Internal_Shdr *hdr; 5710 asection *plt_bnd; 5711 5712 /* Get the .plt section contents. PLT passed down may point to the 5713 .plt.bnd section. Make sure that PLT always points to the .plt 5714 section. */ 5715 plt_bnd = bfd_get_section_by_name (abfd, ".plt.bnd"); 5716 if (plt_bnd) 5717 { 5718 if (plt != plt_bnd) 5719 abort (); 5720 plt = bfd_get_section_by_name (abfd, ".plt"); 5721 if (plt == NULL) 5722 abort (); 5723 bed = &elf_x86_64_bnd_arch_bed; 5724 } 5725 else 5726 bed = get_elf_x86_64_backend_data (abfd); 5727 5728 plt_contents = (bfd_byte *) bfd_malloc (plt->size); 5729 if (plt_contents == NULL) 5730 return NULL; 5731 if (!bfd_get_section_contents (abfd, (asection *) plt, 5732 plt_contents, 0, plt->size)) 5733 { 5734 bad_return: 5735 free (plt_contents); 5736 return NULL; 5737 } 5738 5739 slurp_relocs = get_elf_backend_data (abfd)->s->slurp_reloc_table; 5740 if (! (*slurp_relocs) (abfd, relplt, dynsyms, TRUE)) 5741 goto bad_return; 5742 5743 hdr = &elf_section_data (relplt)->this_hdr; 5744 count = relplt->size / hdr->sh_entsize; 5745 5746 plt_sym_val = (bfd_vma *) bfd_malloc (sizeof (bfd_vma) * count); 5747 if (plt_sym_val == NULL) 5748 goto bad_return; 5749 5750 for (i = 0; i < count; i++) 5751 plt_sym_val[i] = -1; 5752 5753 plt_offset = bed->plt_entry_size; 5754 p = relplt->relocation; 5755 for (i = 0; i < count; i++, p++) 5756 { 5757 long reloc_index; 5758 5759 /* Skip unknown relocation. */ 5760 if (p->howto == NULL) 5761 continue; 5762 5763 if (p->howto->type != R_X86_64_JUMP_SLOT 5764 && p->howto->type != R_X86_64_IRELATIVE) 5765 continue; 5766 5767 reloc_index = H_GET_32 (abfd, (plt_contents + plt_offset 5768 + bed->plt_reloc_offset)); 5769 if (reloc_index >= count) 5770 abort (); 5771 if (plt_bnd) 5772 { 5773 /* This is the index in .plt section. */ 5774 long plt_index = plt_offset / bed->plt_entry_size; 5775 /* Store VMA + the offset in .plt.bnd section. */ 5776 plt_sym_val[reloc_index] = 5777 (plt_bnd->vma 5778 + (plt_index - 1) * sizeof (elf_x86_64_legacy_plt2_entry)); 5779 } 5780 else 5781 plt_sym_val[reloc_index] = plt->vma + plt_offset; 5782 plt_offset += bed->plt_entry_size; 5783 5784 /* PR binutils/18437: Skip extra relocations in the .rela.plt 5785 section. */ 5786 if (plt_offset >= plt->size) 5787 break; 5788 } 5789 5790 free (plt_contents); 5791 5792 return plt_sym_val; 5793 } 5794 5795 /* Similar to _bfd_elf_get_synthetic_symtab, with .plt.bnd section 5796 support. */ 5797 5798 static long 5799 elf_x86_64_get_synthetic_symtab (bfd *abfd, 5800 long symcount, 5801 asymbol **syms, 5802 long dynsymcount, 5803 asymbol **dynsyms, 5804 asymbol **ret) 5805 { 5806 /* Pass the .plt.bnd section to _bfd_elf_ifunc_get_synthetic_symtab 5807 as PLT if it exists. */ 5808 asection *plt = bfd_get_section_by_name (abfd, ".plt.bnd"); 5809 if (plt == NULL) 5810 plt = bfd_get_section_by_name (abfd, ".plt"); 5811 return _bfd_elf_ifunc_get_synthetic_symtab (abfd, symcount, syms, 5812 dynsymcount, dynsyms, ret, 5813 plt, 5814 elf_x86_64_get_plt_sym_val); 5815 } 5816 5817 /* Handle an x86-64 specific section when reading an object file. This 5818 is called when elfcode.h finds a section with an unknown type. */ 5819 5820 static bfd_boolean 5821 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, 5822 const char *name, int shindex) 5823 { 5824 if (hdr->sh_type != SHT_X86_64_UNWIND) 5825 return FALSE; 5826 5827 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 5828 return FALSE; 5829 5830 return TRUE; 5831 } 5832 5833 /* Hook called by the linker routine which adds symbols from an object 5834 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead 5835 of .bss. */ 5836 5837 static bfd_boolean 5838 elf_x86_64_add_symbol_hook (bfd *abfd, 5839 struct bfd_link_info *info, 5840 Elf_Internal_Sym *sym, 5841 const char **namep ATTRIBUTE_UNUSED, 5842 flagword *flagsp ATTRIBUTE_UNUSED, 5843 asection **secp, 5844 bfd_vma *valp) 5845 { 5846 asection *lcomm; 5847 5848 switch (sym->st_shndx) 5849 { 5850 case SHN_X86_64_LCOMMON: 5851 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON"); 5852 if (lcomm == NULL) 5853 { 5854 lcomm = bfd_make_section_with_flags (abfd, 5855 "LARGE_COMMON", 5856 (SEC_ALLOC 5857 | SEC_IS_COMMON 5858 | SEC_LINKER_CREATED)); 5859 if (lcomm == NULL) 5860 return FALSE; 5861 elf_section_flags (lcomm) |= SHF_X86_64_LARGE; 5862 } 5863 *secp = lcomm; 5864 *valp = sym->st_size; 5865 return TRUE; 5866 } 5867 5868 if ((ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC 5869 || ELF_ST_BIND (sym->st_info) == STB_GNU_UNIQUE) 5870 && (abfd->flags & DYNAMIC) == 0 5871 && bfd_get_flavour (info->output_bfd) == bfd_target_elf_flavour) 5872 elf_tdata (info->output_bfd)->has_gnu_symbols = TRUE; 5873 5874 return TRUE; 5875 } 5876 5877 5878 /* Given a BFD section, try to locate the corresponding ELF section 5879 index. */ 5880 5881 static bfd_boolean 5882 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED, 5883 asection *sec, int *index_return) 5884 { 5885 if (sec == &_bfd_elf_large_com_section) 5886 { 5887 *index_return = SHN_X86_64_LCOMMON; 5888 return TRUE; 5889 } 5890 return FALSE; 5891 } 5892 5893 /* Process a symbol. */ 5894 5895 static void 5896 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, 5897 asymbol *asym) 5898 { 5899 elf_symbol_type *elfsym = (elf_symbol_type *) asym; 5900 5901 switch (elfsym->internal_elf_sym.st_shndx) 5902 { 5903 case SHN_X86_64_LCOMMON: 5904 asym->section = &_bfd_elf_large_com_section; 5905 asym->value = elfsym->internal_elf_sym.st_size; 5906 /* Common symbol doesn't set BSF_GLOBAL. */ 5907 asym->flags &= ~BSF_GLOBAL; 5908 break; 5909 } 5910 } 5911 5912 static bfd_boolean 5913 elf_x86_64_common_definition (Elf_Internal_Sym *sym) 5914 { 5915 return (sym->st_shndx == SHN_COMMON 5916 || sym->st_shndx == SHN_X86_64_LCOMMON); 5917 } 5918 5919 static unsigned int 5920 elf_x86_64_common_section_index (asection *sec) 5921 { 5922 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0) 5923 return SHN_COMMON; 5924 else 5925 return SHN_X86_64_LCOMMON; 5926 } 5927 5928 static asection * 5929 elf_x86_64_common_section (asection *sec) 5930 { 5931 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0) 5932 return bfd_com_section_ptr; 5933 else 5934 return &_bfd_elf_large_com_section; 5935 } 5936 5937 static bfd_boolean 5938 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h, 5939 const Elf_Internal_Sym *sym, 5940 asection **psec, 5941 bfd_boolean newdef, 5942 bfd_boolean olddef, 5943 bfd *oldbfd, 5944 const asection *oldsec) 5945 { 5946 /* A normal common symbol and a large common symbol result in a 5947 normal common symbol. We turn the large common symbol into a 5948 normal one. */ 5949 if (!olddef 5950 && h->root.type == bfd_link_hash_common 5951 && !newdef 5952 && bfd_is_com_section (*psec) 5953 && oldsec != *psec) 5954 { 5955 if (sym->st_shndx == SHN_COMMON 5956 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0) 5957 { 5958 h->root.u.c.p->section 5959 = bfd_make_section_old_way (oldbfd, "COMMON"); 5960 h->root.u.c.p->section->flags = SEC_ALLOC; 5961 } 5962 else if (sym->st_shndx == SHN_X86_64_LCOMMON 5963 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0) 5964 *psec = bfd_com_section_ptr; 5965 } 5966 5967 return TRUE; 5968 } 5969 5970 static int 5971 elf_x86_64_additional_program_headers (bfd *abfd, 5972 struct bfd_link_info *info ATTRIBUTE_UNUSED) 5973 { 5974 asection *s; 5975 int count = 0; 5976 5977 /* Check to see if we need a large readonly segment. */ 5978 s = bfd_get_section_by_name (abfd, ".lrodata"); 5979 if (s && (s->flags & SEC_LOAD)) 5980 count++; 5981 5982 /* Check to see if we need a large data segment. Since .lbss sections 5983 is placed right after the .bss section, there should be no need for 5984 a large data segment just because of .lbss. */ 5985 s = bfd_get_section_by_name (abfd, ".ldata"); 5986 if (s && (s->flags & SEC_LOAD)) 5987 count++; 5988 5989 return count; 5990 } 5991 5992 /* Return TRUE if symbol should be hashed in the `.gnu.hash' section. */ 5993 5994 static bfd_boolean 5995 elf_x86_64_hash_symbol (struct elf_link_hash_entry *h) 5996 { 5997 if (h->plt.offset != (bfd_vma) -1 5998 && !h->def_regular 5999 && !h->pointer_equality_needed) 6000 return FALSE; 6001 6002 return _bfd_elf_hash_symbol (h); 6003 } 6004 6005 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */ 6006 6007 static bfd_boolean 6008 elf_x86_64_relocs_compatible (const bfd_target *input, 6009 const bfd_target *output) 6010 { 6011 return ((xvec_get_elf_backend_data (input)->s->elfclass 6012 == xvec_get_elf_backend_data (output)->s->elfclass) 6013 && _bfd_elf_relocs_compatible (input, output)); 6014 } 6015 6016 static const struct bfd_elf_special_section 6017 elf_x86_64_special_sections[]= 6018 { 6019 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 6020 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, 6021 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE}, 6022 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 6023 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 6024 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, 6025 { NULL, 0, 0, 0, 0 } 6026 }; 6027 6028 #define TARGET_LITTLE_SYM x86_64_elf64_vec 6029 #define TARGET_LITTLE_NAME "elf64-x86-64" 6030 #define ELF_ARCH bfd_arch_i386 6031 #define ELF_TARGET_ID X86_64_ELF_DATA 6032 #define ELF_MACHINE_CODE EM_X86_64 6033 #define ELF_MAXPAGESIZE 0x200000 6034 #define ELF_MINPAGESIZE 0x1000 6035 #define ELF_COMMONPAGESIZE 0x1000 6036 6037 #define elf_backend_can_gc_sections 1 6038 #define elf_backend_can_refcount 1 6039 #define elf_backend_want_got_plt 1 6040 #define elf_backend_plt_readonly 1 6041 #define elf_backend_want_plt_sym 0 6042 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3) 6043 #define elf_backend_rela_normal 1 6044 #define elf_backend_plt_alignment 4 6045 #define elf_backend_extern_protected_data 1 6046 6047 #define elf_info_to_howto elf_x86_64_info_to_howto 6048 6049 #define bfd_elf64_bfd_link_hash_table_create \ 6050 elf_x86_64_link_hash_table_create 6051 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup 6052 #define bfd_elf64_bfd_reloc_name_lookup \ 6053 elf_x86_64_reloc_name_lookup 6054 6055 #define elf_backend_adjust_dynamic_symbol elf_x86_64_adjust_dynamic_symbol 6056 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible 6057 #define elf_backend_check_relocs elf_x86_64_check_relocs 6058 #define elf_backend_copy_indirect_symbol elf_x86_64_copy_indirect_symbol 6059 #define elf_backend_create_dynamic_sections elf_x86_64_create_dynamic_sections 6060 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections 6061 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol 6062 #define elf_backend_gc_mark_hook elf_x86_64_gc_mark_hook 6063 #define elf_backend_gc_sweep_hook elf_x86_64_gc_sweep_hook 6064 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus 6065 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo 6066 #ifdef CORE_HEADER 6067 #define elf_backend_write_core_note elf_x86_64_write_core_note 6068 #endif 6069 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class 6070 #define elf_backend_relocate_section elf_x86_64_relocate_section 6071 #define elf_backend_size_dynamic_sections elf_x86_64_size_dynamic_sections 6072 #define elf_backend_always_size_sections elf_x86_64_always_size_sections 6073 #define elf_backend_init_index_section _bfd_elf_init_1_index_section 6074 #define elf_backend_object_p elf64_x86_64_elf_object_p 6075 #define bfd_elf64_mkobject elf_x86_64_mkobject 6076 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab 6077 6078 #define elf_backend_section_from_shdr \ 6079 elf_x86_64_section_from_shdr 6080 6081 #define elf_backend_section_from_bfd_section \ 6082 elf_x86_64_elf_section_from_bfd_section 6083 #define elf_backend_add_symbol_hook \ 6084 elf_x86_64_add_symbol_hook 6085 #define elf_backend_symbol_processing \ 6086 elf_x86_64_symbol_processing 6087 #define elf_backend_common_section_index \ 6088 elf_x86_64_common_section_index 6089 #define elf_backend_common_section \ 6090 elf_x86_64_common_section 6091 #define elf_backend_common_definition \ 6092 elf_x86_64_common_definition 6093 #define elf_backend_merge_symbol \ 6094 elf_x86_64_merge_symbol 6095 #define elf_backend_special_sections \ 6096 elf_x86_64_special_sections 6097 #define elf_backend_additional_program_headers \ 6098 elf_x86_64_additional_program_headers 6099 #define elf_backend_hash_symbol \ 6100 elf_x86_64_hash_symbol 6101 6102 #include "elf64-target.h" 6103 6104 /* CloudABI support. */ 6105 6106 #undef TARGET_LITTLE_SYM 6107 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec 6108 #undef TARGET_LITTLE_NAME 6109 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi" 6110 6111 #undef ELF_OSABI 6112 #define ELF_OSABI ELFOSABI_CLOUDABI 6113 6114 #undef elf64_bed 6115 #define elf64_bed elf64_x86_64_cloudabi_bed 6116 6117 #include "elf64-target.h" 6118 6119 /* FreeBSD support. */ 6120 6121 #undef TARGET_LITTLE_SYM 6122 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec 6123 #undef TARGET_LITTLE_NAME 6124 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd" 6125 6126 #undef ELF_OSABI 6127 #define ELF_OSABI ELFOSABI_FREEBSD 6128 6129 #undef elf64_bed 6130 #define elf64_bed elf64_x86_64_fbsd_bed 6131 6132 #include "elf64-target.h" 6133 6134 /* Solaris 2 support. */ 6135 6136 #undef TARGET_LITTLE_SYM 6137 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec 6138 #undef TARGET_LITTLE_NAME 6139 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2" 6140 6141 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE 6142 objects won't be recognized. */ 6143 #undef ELF_OSABI 6144 6145 #undef elf64_bed 6146 #define elf64_bed elf64_x86_64_sol2_bed 6147 6148 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte 6149 boundary. */ 6150 #undef elf_backend_static_tls_alignment 6151 #define elf_backend_static_tls_alignment 16 6152 6153 /* The Solaris 2 ABI requires a plt symbol on all platforms. 6154 6155 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output 6156 File, p.63. */ 6157 #undef elf_backend_want_plt_sym 6158 #define elf_backend_want_plt_sym 1 6159 6160 #include "elf64-target.h" 6161 6162 /* Native Client support. */ 6163 6164 static bfd_boolean 6165 elf64_x86_64_nacl_elf_object_p (bfd *abfd) 6166 { 6167 /* Set the right machine number for a NaCl x86-64 ELF64 file. */ 6168 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl); 6169 return TRUE; 6170 } 6171 6172 #undef TARGET_LITTLE_SYM 6173 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec 6174 #undef TARGET_LITTLE_NAME 6175 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl" 6176 #undef elf64_bed 6177 #define elf64_bed elf64_x86_64_nacl_bed 6178 6179 #undef ELF_MAXPAGESIZE 6180 #undef ELF_MINPAGESIZE 6181 #undef ELF_COMMONPAGESIZE 6182 #define ELF_MAXPAGESIZE 0x10000 6183 #define ELF_MINPAGESIZE 0x10000 6184 #define ELF_COMMONPAGESIZE 0x10000 6185 6186 /* Restore defaults. */ 6187 #undef ELF_OSABI 6188 #undef elf_backend_static_tls_alignment 6189 #undef elf_backend_want_plt_sym 6190 #define elf_backend_want_plt_sym 0 6191 6192 /* NaCl uses substantially different PLT entries for the same effects. */ 6193 6194 #undef elf_backend_plt_alignment 6195 #define elf_backend_plt_alignment 5 6196 #define NACL_PLT_ENTRY_SIZE 64 6197 #define NACLMASK 0xe0 /* 32-byte alignment mask. */ 6198 6199 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = 6200 { 6201 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 6202 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */ 6203 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ 6204 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ 6205 0x41, 0xff, 0xe3, /* jmpq *%r11 */ 6206 6207 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */ 6208 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */ 6209 6210 /* 32 bytes of nop to pad out to the standard size. */ 6211 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ 6212 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 6213 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ 6214 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 6215 0x66, /* excess data32 prefix */ 6216 0x90 /* nop */ 6217 }; 6218 6219 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = 6220 { 6221 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */ 6222 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ 6223 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ 6224 0x41, 0xff, 0xe3, /* jmpq *%r11 */ 6225 6226 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */ 6227 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ 6228 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 6229 6230 /* Lazy GOT entries point here (32-byte aligned). */ 6231 0x68, /* pushq immediate */ 6232 0, 0, 0, 0, /* replaced with index into relocation table. */ 6233 0xe9, /* jmp relative */ 6234 0, 0, 0, 0, /* replaced with offset to start of .plt0. */ 6235 6236 /* 22 bytes of nop to pad out to the standard size. */ 6237 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data32 prefixes */ 6238 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 6239 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */ 6240 }; 6241 6242 /* .eh_frame covering the .plt section. */ 6243 6244 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] = 6245 { 6246 #if (PLT_CIE_LENGTH != 20 \ 6247 || PLT_FDE_LENGTH != 36 \ 6248 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \ 6249 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12) 6250 # error "Need elf_x86_64_backend_data parameters for eh_frame_plt offsets!" 6251 #endif 6252 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 6253 0, 0, 0, 0, /* CIE ID */ 6254 1, /* CIE version */ 6255 'z', 'R', 0, /* Augmentation string */ 6256 1, /* Code alignment factor */ 6257 0x78, /* Data alignment factor */ 6258 16, /* Return address column */ 6259 1, /* Augmentation size */ 6260 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 6261 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 6262 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 6263 DW_CFA_nop, DW_CFA_nop, 6264 6265 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 6266 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */ 6267 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 6268 0, 0, 0, 0, /* .plt size goes here */ 6269 0, /* Augmentation size */ 6270 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 6271 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 6272 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 6273 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */ 6274 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 6275 13, /* Block length */ 6276 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 6277 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 6278 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge, 6279 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 6280 DW_CFA_nop, DW_CFA_nop 6281 }; 6282 6283 static const struct elf_x86_64_backend_data elf_x86_64_nacl_arch_bed = 6284 { 6285 elf_x86_64_nacl_plt0_entry, /* plt0_entry */ 6286 elf_x86_64_nacl_plt_entry, /* plt_entry */ 6287 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */ 6288 2, /* plt0_got1_offset */ 6289 9, /* plt0_got2_offset */ 6290 13, /* plt0_got2_insn_end */ 6291 3, /* plt_got_offset */ 6292 33, /* plt_reloc_offset */ 6293 38, /* plt_plt_offset */ 6294 7, /* plt_got_insn_size */ 6295 42, /* plt_plt_insn_end */ 6296 32, /* plt_lazy_offset */ 6297 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */ 6298 sizeof (elf_x86_64_nacl_eh_frame_plt), /* eh_frame_plt_size */ 6299 }; 6300 6301 #undef elf_backend_arch_data 6302 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed 6303 6304 #undef elf_backend_object_p 6305 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p 6306 #undef elf_backend_modify_segment_map 6307 #define elf_backend_modify_segment_map nacl_modify_segment_map 6308 #undef elf_backend_modify_program_headers 6309 #define elf_backend_modify_program_headers nacl_modify_program_headers 6310 #undef elf_backend_final_write_processing 6311 #define elf_backend_final_write_processing nacl_final_write_processing 6312 6313 #include "elf64-target.h" 6314 6315 /* Native Client x32 support. */ 6316 6317 static bfd_boolean 6318 elf32_x86_64_nacl_elf_object_p (bfd *abfd) 6319 { 6320 /* Set the right machine number for a NaCl x86-64 ELF32 file. */ 6321 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl); 6322 return TRUE; 6323 } 6324 6325 #undef TARGET_LITTLE_SYM 6326 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec 6327 #undef TARGET_LITTLE_NAME 6328 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl" 6329 #undef elf32_bed 6330 #define elf32_bed elf32_x86_64_nacl_bed 6331 6332 #define bfd_elf32_bfd_link_hash_table_create \ 6333 elf_x86_64_link_hash_table_create 6334 #define bfd_elf32_bfd_reloc_type_lookup \ 6335 elf_x86_64_reloc_type_lookup 6336 #define bfd_elf32_bfd_reloc_name_lookup \ 6337 elf_x86_64_reloc_name_lookup 6338 #define bfd_elf32_mkobject \ 6339 elf_x86_64_mkobject 6340 #define bfd_elf32_get_synthetic_symtab \ 6341 elf_x86_64_get_synthetic_symtab 6342 6343 #undef elf_backend_object_p 6344 #define elf_backend_object_p \ 6345 elf32_x86_64_nacl_elf_object_p 6346 6347 #undef elf_backend_bfd_from_remote_memory 6348 #define elf_backend_bfd_from_remote_memory \ 6349 _bfd_elf32_bfd_from_remote_memory 6350 6351 #undef elf_backend_size_info 6352 #define elf_backend_size_info \ 6353 _bfd_elf32_size_info 6354 6355 #include "elf32-target.h" 6356 6357 /* Restore defaults. */ 6358 #undef elf_backend_object_p 6359 #define elf_backend_object_p elf64_x86_64_elf_object_p 6360 #undef elf_backend_bfd_from_remote_memory 6361 #undef elf_backend_size_info 6362 #undef elf_backend_modify_segment_map 6363 #undef elf_backend_modify_program_headers 6364 #undef elf_backend_final_write_processing 6365 6366 /* Intel L1OM support. */ 6367 6368 static bfd_boolean 6369 elf64_l1om_elf_object_p (bfd *abfd) 6370 { 6371 /* Set the right machine number for an L1OM elf64 file. */ 6372 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om); 6373 return TRUE; 6374 } 6375 6376 #undef TARGET_LITTLE_SYM 6377 #define TARGET_LITTLE_SYM l1om_elf64_vec 6378 #undef TARGET_LITTLE_NAME 6379 #define TARGET_LITTLE_NAME "elf64-l1om" 6380 #undef ELF_ARCH 6381 #define ELF_ARCH bfd_arch_l1om 6382 6383 #undef ELF_MACHINE_CODE 6384 #define ELF_MACHINE_CODE EM_L1OM 6385 6386 #undef ELF_OSABI 6387 6388 #undef elf64_bed 6389 #define elf64_bed elf64_l1om_bed 6390 6391 #undef elf_backend_object_p 6392 #define elf_backend_object_p elf64_l1om_elf_object_p 6393 6394 /* Restore defaults. */ 6395 #undef ELF_MAXPAGESIZE 6396 #undef ELF_MINPAGESIZE 6397 #undef ELF_COMMONPAGESIZE 6398 #define ELF_MAXPAGESIZE 0x200000 6399 #define ELF_MINPAGESIZE 0x1000 6400 #define ELF_COMMONPAGESIZE 0x1000 6401 #undef elf_backend_plt_alignment 6402 #define elf_backend_plt_alignment 4 6403 #undef elf_backend_arch_data 6404 #define elf_backend_arch_data &elf_x86_64_arch_bed 6405 6406 #include "elf64-target.h" 6407 6408 /* FreeBSD L1OM support. */ 6409 6410 #undef TARGET_LITTLE_SYM 6411 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec 6412 #undef TARGET_LITTLE_NAME 6413 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd" 6414 6415 #undef ELF_OSABI 6416 #define ELF_OSABI ELFOSABI_FREEBSD 6417 6418 #undef elf64_bed 6419 #define elf64_bed elf64_l1om_fbsd_bed 6420 6421 #include "elf64-target.h" 6422 6423 /* Intel K1OM support. */ 6424 6425 static bfd_boolean 6426 elf64_k1om_elf_object_p (bfd *abfd) 6427 { 6428 /* Set the right machine number for an K1OM elf64 file. */ 6429 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om); 6430 return TRUE; 6431 } 6432 6433 #undef TARGET_LITTLE_SYM 6434 #define TARGET_LITTLE_SYM k1om_elf64_vec 6435 #undef TARGET_LITTLE_NAME 6436 #define TARGET_LITTLE_NAME "elf64-k1om" 6437 #undef ELF_ARCH 6438 #define ELF_ARCH bfd_arch_k1om 6439 6440 #undef ELF_MACHINE_CODE 6441 #define ELF_MACHINE_CODE EM_K1OM 6442 6443 #undef ELF_OSABI 6444 6445 #undef elf64_bed 6446 #define elf64_bed elf64_k1om_bed 6447 6448 #undef elf_backend_object_p 6449 #define elf_backend_object_p elf64_k1om_elf_object_p 6450 6451 #undef elf_backend_static_tls_alignment 6452 6453 #undef elf_backend_want_plt_sym 6454 #define elf_backend_want_plt_sym 0 6455 6456 #include "elf64-target.h" 6457 6458 /* FreeBSD K1OM support. */ 6459 6460 #undef TARGET_LITTLE_SYM 6461 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec 6462 #undef TARGET_LITTLE_NAME 6463 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd" 6464 6465 #undef ELF_OSABI 6466 #define ELF_OSABI ELFOSABI_FREEBSD 6467 6468 #undef elf64_bed 6469 #define elf64_bed elf64_k1om_fbsd_bed 6470 6471 #include "elf64-target.h" 6472 6473 /* 32bit x86-64 support. */ 6474 6475 #undef TARGET_LITTLE_SYM 6476 #define TARGET_LITTLE_SYM x86_64_elf32_vec 6477 #undef TARGET_LITTLE_NAME 6478 #define TARGET_LITTLE_NAME "elf32-x86-64" 6479 #undef elf32_bed 6480 6481 #undef ELF_ARCH 6482 #define ELF_ARCH bfd_arch_i386 6483 6484 #undef ELF_MACHINE_CODE 6485 #define ELF_MACHINE_CODE EM_X86_64 6486 6487 #undef ELF_OSABI 6488 6489 #undef elf_backend_object_p 6490 #define elf_backend_object_p \ 6491 elf32_x86_64_elf_object_p 6492 6493 #undef elf_backend_bfd_from_remote_memory 6494 #define elf_backend_bfd_from_remote_memory \ 6495 _bfd_elf32_bfd_from_remote_memory 6496 6497 #undef elf_backend_size_info 6498 #define elf_backend_size_info \ 6499 _bfd_elf32_size_info 6500 6501 #include "elf32-target.h" 6502