1 /* X86-64 specific support for ELF 2 Copyright (C) 2000-2018 Free Software Foundation, Inc. 3 Contributed by Jan Hubicka <jh@suse.cz>. 4 5 This file is part of BFD, the Binary File Descriptor library. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 20 MA 02110-1301, USA. */ 21 22 #include "elfxx-x86.h" 23 #include "elf-nacl.h" 24 #include "dwarf2.h" 25 #include "libiberty.h" 26 27 #include "opcode/i386.h" 28 #include "elf/x86-64.h" 29 30 #ifdef CORE_HEADER 31 #include <stdarg.h> 32 #include CORE_HEADER 33 #endif 34 35 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ 36 #define MINUS_ONE (~ (bfd_vma) 0) 37 38 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the 39 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get 40 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE 41 since they are the same. */ 42 43 /* The relocation "howto" table. Order of fields: 44 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow, 45 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */ 46 static reloc_howto_type x86_64_elf_howto_table[] = 47 { 48 HOWTO(R_X86_64_NONE, 0, 3, 0, FALSE, 0, complain_overflow_dont, 49 bfd_elf_generic_reloc, "R_X86_64_NONE", FALSE, 0x00000000, 0x00000000, 50 FALSE), 51 HOWTO(R_X86_64_64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 52 bfd_elf_generic_reloc, "R_X86_64_64", FALSE, MINUS_ONE, MINUS_ONE, 53 FALSE), 54 HOWTO(R_X86_64_PC32, 0, 2, 32, TRUE, 0, complain_overflow_signed, 55 bfd_elf_generic_reloc, "R_X86_64_PC32", FALSE, 0xffffffff, 0xffffffff, 56 TRUE), 57 HOWTO(R_X86_64_GOT32, 0, 2, 32, FALSE, 0, complain_overflow_signed, 58 bfd_elf_generic_reloc, "R_X86_64_GOT32", FALSE, 0xffffffff, 0xffffffff, 59 FALSE), 60 HOWTO(R_X86_64_PLT32, 0, 2, 32, TRUE, 0, complain_overflow_signed, 61 bfd_elf_generic_reloc, "R_X86_64_PLT32", FALSE, 0xffffffff, 0xffffffff, 62 TRUE), 63 HOWTO(R_X86_64_COPY, 0, 2, 32, FALSE, 0, complain_overflow_bitfield, 64 bfd_elf_generic_reloc, "R_X86_64_COPY", FALSE, 0xffffffff, 0xffffffff, 65 FALSE), 66 HOWTO(R_X86_64_GLOB_DAT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 67 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", FALSE, MINUS_ONE, 68 MINUS_ONE, FALSE), 69 HOWTO(R_X86_64_JUMP_SLOT, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 70 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", FALSE, MINUS_ONE, 71 MINUS_ONE, FALSE), 72 HOWTO(R_X86_64_RELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 73 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", FALSE, MINUS_ONE, 74 MINUS_ONE, FALSE), 75 HOWTO(R_X86_64_GOTPCREL, 0, 2, 32, TRUE, 0, complain_overflow_signed, 76 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", FALSE, 0xffffffff, 77 0xffffffff, TRUE), 78 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned, 79 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff, 80 FALSE), 81 HOWTO(R_X86_64_32S, 0, 2, 32, FALSE, 0, complain_overflow_signed, 82 bfd_elf_generic_reloc, "R_X86_64_32S", FALSE, 0xffffffff, 0xffffffff, 83 FALSE), 84 HOWTO(R_X86_64_16, 0, 1, 16, FALSE, 0, complain_overflow_bitfield, 85 bfd_elf_generic_reloc, "R_X86_64_16", FALSE, 0xffff, 0xffff, FALSE), 86 HOWTO(R_X86_64_PC16,0, 1, 16, TRUE, 0, complain_overflow_bitfield, 87 bfd_elf_generic_reloc, "R_X86_64_PC16", FALSE, 0xffff, 0xffff, TRUE), 88 HOWTO(R_X86_64_8, 0, 0, 8, FALSE, 0, complain_overflow_bitfield, 89 bfd_elf_generic_reloc, "R_X86_64_8", FALSE, 0xff, 0xff, FALSE), 90 HOWTO(R_X86_64_PC8, 0, 0, 8, TRUE, 0, complain_overflow_signed, 91 bfd_elf_generic_reloc, "R_X86_64_PC8", FALSE, 0xff, 0xff, TRUE), 92 HOWTO(R_X86_64_DTPMOD64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 93 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", FALSE, MINUS_ONE, 94 MINUS_ONE, FALSE), 95 HOWTO(R_X86_64_DTPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 96 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", FALSE, MINUS_ONE, 97 MINUS_ONE, FALSE), 98 HOWTO(R_X86_64_TPOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 99 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", FALSE, MINUS_ONE, 100 MINUS_ONE, FALSE), 101 HOWTO(R_X86_64_TLSGD, 0, 2, 32, TRUE, 0, complain_overflow_signed, 102 bfd_elf_generic_reloc, "R_X86_64_TLSGD", FALSE, 0xffffffff, 103 0xffffffff, TRUE), 104 HOWTO(R_X86_64_TLSLD, 0, 2, 32, TRUE, 0, complain_overflow_signed, 105 bfd_elf_generic_reloc, "R_X86_64_TLSLD", FALSE, 0xffffffff, 106 0xffffffff, TRUE), 107 HOWTO(R_X86_64_DTPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed, 108 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", FALSE, 0xffffffff, 109 0xffffffff, FALSE), 110 HOWTO(R_X86_64_GOTTPOFF, 0, 2, 32, TRUE, 0, complain_overflow_signed, 111 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", FALSE, 0xffffffff, 112 0xffffffff, TRUE), 113 HOWTO(R_X86_64_TPOFF32, 0, 2, 32, FALSE, 0, complain_overflow_signed, 114 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", FALSE, 0xffffffff, 115 0xffffffff, FALSE), 116 HOWTO(R_X86_64_PC64, 0, 4, 64, TRUE, 0, complain_overflow_bitfield, 117 bfd_elf_generic_reloc, "R_X86_64_PC64", FALSE, MINUS_ONE, MINUS_ONE, 118 TRUE), 119 HOWTO(R_X86_64_GOTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 120 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", 121 FALSE, MINUS_ONE, MINUS_ONE, FALSE), 122 HOWTO(R_X86_64_GOTPC32, 0, 2, 32, TRUE, 0, complain_overflow_signed, 123 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", 124 FALSE, 0xffffffff, 0xffffffff, TRUE), 125 HOWTO(R_X86_64_GOT64, 0, 4, 64, FALSE, 0, complain_overflow_signed, 126 bfd_elf_generic_reloc, "R_X86_64_GOT64", FALSE, MINUS_ONE, MINUS_ONE, 127 FALSE), 128 HOWTO(R_X86_64_GOTPCREL64, 0, 4, 64, TRUE, 0, complain_overflow_signed, 129 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", FALSE, MINUS_ONE, 130 MINUS_ONE, TRUE), 131 HOWTO(R_X86_64_GOTPC64, 0, 4, 64, TRUE, 0, complain_overflow_signed, 132 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", 133 FALSE, MINUS_ONE, MINUS_ONE, TRUE), 134 HOWTO(R_X86_64_GOTPLT64, 0, 4, 64, FALSE, 0, complain_overflow_signed, 135 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", FALSE, MINUS_ONE, 136 MINUS_ONE, FALSE), 137 HOWTO(R_X86_64_PLTOFF64, 0, 4, 64, FALSE, 0, complain_overflow_signed, 138 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", FALSE, MINUS_ONE, 139 MINUS_ONE, FALSE), 140 HOWTO(R_X86_64_SIZE32, 0, 2, 32, FALSE, 0, complain_overflow_unsigned, 141 bfd_elf_generic_reloc, "R_X86_64_SIZE32", FALSE, 0xffffffff, 0xffffffff, 142 FALSE), 143 HOWTO(R_X86_64_SIZE64, 0, 4, 64, FALSE, 0, complain_overflow_unsigned, 144 bfd_elf_generic_reloc, "R_X86_64_SIZE64", FALSE, MINUS_ONE, MINUS_ONE, 145 FALSE), 146 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 2, 32, TRUE, 0, 147 complain_overflow_bitfield, bfd_elf_generic_reloc, 148 "R_X86_64_GOTPC32_TLSDESC", 149 FALSE, 0xffffffff, 0xffffffff, TRUE), 150 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, FALSE, 0, 151 complain_overflow_dont, bfd_elf_generic_reloc, 152 "R_X86_64_TLSDESC_CALL", 153 FALSE, 0, 0, FALSE), 154 HOWTO(R_X86_64_TLSDESC, 0, 4, 64, FALSE, 0, 155 complain_overflow_bitfield, bfd_elf_generic_reloc, 156 "R_X86_64_TLSDESC", 157 FALSE, MINUS_ONE, MINUS_ONE, FALSE), 158 HOWTO(R_X86_64_IRELATIVE, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 159 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", FALSE, MINUS_ONE, 160 MINUS_ONE, FALSE), 161 HOWTO(R_X86_64_RELATIVE64, 0, 4, 64, FALSE, 0, complain_overflow_bitfield, 162 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", FALSE, MINUS_ONE, 163 MINUS_ONE, FALSE), 164 HOWTO(R_X86_64_PC32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed, 165 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", FALSE, 0xffffffff, 0xffffffff, 166 TRUE), 167 HOWTO(R_X86_64_PLT32_BND, 0, 2, 32, TRUE, 0, complain_overflow_signed, 168 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", FALSE, 0xffffffff, 0xffffffff, 169 TRUE), 170 HOWTO(R_X86_64_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed, 171 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", FALSE, 0xffffffff, 172 0xffffffff, TRUE), 173 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 2, 32, TRUE, 0, complain_overflow_signed, 174 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", FALSE, 0xffffffff, 175 0xffffffff, TRUE), 176 177 /* We have a gap in the reloc numbers here. 178 R_X86_64_standard counts the number up to this point, and 179 R_X86_64_vt_offset is the value to subtract from a reloc type of 180 R_X86_64_GNU_VT* to form an index into this table. */ 181 #define R_X86_64_standard (R_X86_64_REX_GOTPCRELX + 1) 182 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard) 183 184 /* GNU extension to record C++ vtable hierarchy. */ 185 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 4, 0, FALSE, 0, complain_overflow_dont, 186 NULL, "R_X86_64_GNU_VTINHERIT", FALSE, 0, 0, FALSE), 187 188 /* GNU extension to record C++ vtable member usage. */ 189 HOWTO (R_X86_64_GNU_VTENTRY, 0, 4, 0, FALSE, 0, complain_overflow_dont, 190 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", FALSE, 0, 0, 191 FALSE), 192 193 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */ 194 HOWTO(R_X86_64_32, 0, 2, 32, FALSE, 0, complain_overflow_bitfield, 195 bfd_elf_generic_reloc, "R_X86_64_32", FALSE, 0xffffffff, 0xffffffff, 196 FALSE) 197 }; 198 199 /* Set if a relocation is converted from a GOTPCREL relocation. */ 200 #define R_X86_64_converted_reloc_bit (1 << 7) 201 202 #define X86_PCREL_TYPE_P(TYPE) \ 203 ( ((TYPE) == R_X86_64_PC8) \ 204 || ((TYPE) == R_X86_64_PC16) \ 205 || ((TYPE) == R_X86_64_PC32) \ 206 || ((TYPE) == R_X86_64_PC32_BND) \ 207 || ((TYPE) == R_X86_64_PC64)) 208 209 #define X86_SIZE_TYPE_P(TYPE) \ 210 ((TYPE) == R_X86_64_SIZE32 || (TYPE) == R_X86_64_SIZE64) 211 212 /* Map BFD relocs to the x86_64 elf relocs. */ 213 struct elf_reloc_map 214 { 215 bfd_reloc_code_real_type bfd_reloc_val; 216 unsigned char elf_reloc_val; 217 }; 218 219 static const struct elf_reloc_map x86_64_reloc_map[] = 220 { 221 { BFD_RELOC_NONE, R_X86_64_NONE, }, 222 { BFD_RELOC_64, R_X86_64_64, }, 223 { BFD_RELOC_32_PCREL, R_X86_64_PC32, }, 224 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,}, 225 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,}, 226 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, }, 227 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, }, 228 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, }, 229 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, }, 230 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, }, 231 { BFD_RELOC_32, R_X86_64_32, }, 232 { BFD_RELOC_X86_64_32S, R_X86_64_32S, }, 233 { BFD_RELOC_16, R_X86_64_16, }, 234 { BFD_RELOC_16_PCREL, R_X86_64_PC16, }, 235 { BFD_RELOC_8, R_X86_64_8, }, 236 { BFD_RELOC_8_PCREL, R_X86_64_PC8, }, 237 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, }, 238 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, }, 239 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, }, 240 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, }, 241 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, }, 242 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, }, 243 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, }, 244 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, }, 245 { BFD_RELOC_64_PCREL, R_X86_64_PC64, }, 246 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, }, 247 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, }, 248 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, }, 249 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, }, 250 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, }, 251 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, }, 252 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, }, 253 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, }, 254 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, }, 255 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, }, 256 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, }, 257 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, }, 258 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, }, 259 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, }, 260 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, }, 261 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, }, 262 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, }, 263 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, }, 264 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, }, 265 }; 266 267 static reloc_howto_type * 268 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type) 269 { 270 unsigned i; 271 272 if (r_type == (unsigned int) R_X86_64_32) 273 { 274 if (ABI_64_P (abfd)) 275 i = r_type; 276 else 277 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1; 278 } 279 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT 280 || r_type >= (unsigned int) R_X86_64_max) 281 { 282 if (r_type >= (unsigned int) R_X86_64_standard) 283 { 284 /* xgettext:c-format */ 285 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), 286 abfd, r_type); 287 bfd_set_error (bfd_error_bad_value); 288 return NULL; 289 } 290 i = r_type; 291 } 292 else 293 i = r_type - (unsigned int) R_X86_64_vt_offset; 294 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type); 295 return &x86_64_elf_howto_table[i]; 296 } 297 298 /* Given a BFD reloc type, return a HOWTO structure. */ 299 static reloc_howto_type * 300 elf_x86_64_reloc_type_lookup (bfd *abfd, 301 bfd_reloc_code_real_type code) 302 { 303 unsigned int i; 304 305 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map); 306 i++) 307 { 308 if (x86_64_reloc_map[i].bfd_reloc_val == code) 309 return elf_x86_64_rtype_to_howto (abfd, 310 x86_64_reloc_map[i].elf_reloc_val); 311 } 312 return NULL; 313 } 314 315 static reloc_howto_type * 316 elf_x86_64_reloc_name_lookup (bfd *abfd, 317 const char *r_name) 318 { 319 unsigned int i; 320 321 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0) 322 { 323 /* Get x32 R_X86_64_32. */ 324 reloc_howto_type *reloc 325 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1]; 326 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32); 327 return reloc; 328 } 329 330 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++) 331 if (x86_64_elf_howto_table[i].name != NULL 332 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0) 333 return &x86_64_elf_howto_table[i]; 334 335 return NULL; 336 } 337 338 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */ 339 340 static bfd_boolean 341 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr, 342 Elf_Internal_Rela *dst) 343 { 344 unsigned r_type; 345 346 r_type = ELF32_R_TYPE (dst->r_info); 347 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type); 348 if (cache_ptr->howto == NULL) 349 return FALSE; 350 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE); 351 return TRUE; 352 } 353 354 /* Support for core dump NOTE sections. */ 355 static bfd_boolean 356 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) 357 { 358 int offset; 359 size_t size; 360 361 switch (note->descsz) 362 { 363 default: 364 return FALSE; 365 366 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */ 367 /* pr_cursig */ 368 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); 369 370 /* pr_pid */ 371 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24); 372 373 /* pr_reg */ 374 offset = 72; 375 size = 216; 376 377 break; 378 379 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */ 380 /* pr_cursig */ 381 elf_tdata (abfd)->core->signal 382 = bfd_get_16 (abfd, note->descdata + 12); 383 384 /* pr_pid */ 385 elf_tdata (abfd)->core->lwpid 386 = bfd_get_32 (abfd, note->descdata + 32); 387 388 /* pr_reg */ 389 offset = 112; 390 size = 216; 391 392 break; 393 } 394 395 /* Make a ".reg/999" section. */ 396 return _bfd_elfcore_make_pseudosection (abfd, ".reg", 397 size, note->descpos + offset); 398 } 399 400 static bfd_boolean 401 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) 402 { 403 switch (note->descsz) 404 { 405 default: 406 return FALSE; 407 408 case 124: /* sizeof(struct elf_prpsinfo) on Linux/x32 */ 409 elf_tdata (abfd)->core->pid 410 = bfd_get_32 (abfd, note->descdata + 12); 411 elf_tdata (abfd)->core->program 412 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); 413 elf_tdata (abfd)->core->command 414 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); 415 break; 416 417 case 136: /* sizeof(struct elf_prpsinfo) on Linux/x86_64 */ 418 elf_tdata (abfd)->core->pid 419 = bfd_get_32 (abfd, note->descdata + 24); 420 elf_tdata (abfd)->core->program 421 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16); 422 elf_tdata (abfd)->core->command 423 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80); 424 } 425 426 /* Note that for some reason, a spurious space is tacked 427 onto the end of the args in some (at least one anyway) 428 implementations, so strip it off if it exists. */ 429 430 { 431 char *command = elf_tdata (abfd)->core->command; 432 int n = strlen (command); 433 434 if (0 < n && command[n - 1] == ' ') 435 command[n - 1] = '\0'; 436 } 437 438 return TRUE; 439 } 440 441 #ifdef CORE_HEADER 442 # if GCC_VERSION >= 8000 443 # pragma GCC diagnostic push 444 # pragma GCC diagnostic ignored "-Wstringop-truncation" 445 # endif 446 static char * 447 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz, 448 int note_type, ...) 449 { 450 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 451 va_list ap; 452 const char *fname, *psargs; 453 long pid; 454 int cursig; 455 const void *gregs; 456 457 switch (note_type) 458 { 459 default: 460 return NULL; 461 462 case NT_PRPSINFO: 463 va_start (ap, note_type); 464 fname = va_arg (ap, const char *); 465 psargs = va_arg (ap, const char *); 466 va_end (ap); 467 468 if (bed->s->elfclass == ELFCLASS32) 469 { 470 prpsinfo32_t data; 471 memset (&data, 0, sizeof (data)); 472 strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); 473 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); 474 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 475 &data, sizeof (data)); 476 } 477 else 478 { 479 prpsinfo64_t data; 480 memset (&data, 0, sizeof (data)); 481 strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); 482 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); 483 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 484 &data, sizeof (data)); 485 } 486 /* NOTREACHED */ 487 488 case NT_PRSTATUS: 489 va_start (ap, note_type); 490 pid = va_arg (ap, long); 491 cursig = va_arg (ap, int); 492 gregs = va_arg (ap, const void *); 493 va_end (ap); 494 495 if (bed->s->elfclass == ELFCLASS32) 496 { 497 if (bed->elf_machine_code == EM_X86_64) 498 { 499 prstatusx32_t prstat; 500 memset (&prstat, 0, sizeof (prstat)); 501 prstat.pr_pid = pid; 502 prstat.pr_cursig = cursig; 503 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 504 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 505 &prstat, sizeof (prstat)); 506 } 507 else 508 { 509 prstatus32_t prstat; 510 memset (&prstat, 0, sizeof (prstat)); 511 prstat.pr_pid = pid; 512 prstat.pr_cursig = cursig; 513 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 514 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 515 &prstat, sizeof (prstat)); 516 } 517 } 518 else 519 { 520 prstatus64_t prstat; 521 memset (&prstat, 0, sizeof (prstat)); 522 prstat.pr_pid = pid; 523 prstat.pr_cursig = cursig; 524 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 525 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 526 &prstat, sizeof (prstat)); 527 } 528 } 529 /* NOTREACHED */ 530 } 531 # if GCC_VERSION >= 8000 532 # pragma GCC diagnostic pop 533 # endif 534 #endif 535 536 /* Functions for the x86-64 ELF linker. */ 537 538 /* The size in bytes of an entry in the global offset table. */ 539 540 #define GOT_ENTRY_SIZE 8 541 542 /* The size in bytes of an entry in the lazy procedure linkage table. */ 543 544 #define LAZY_PLT_ENTRY_SIZE 16 545 546 /* The size in bytes of an entry in the non-lazy procedure linkage 547 table. */ 548 549 #define NON_LAZY_PLT_ENTRY_SIZE 8 550 551 /* The first entry in a lazy procedure linkage table looks like this. 552 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this 553 works. */ 554 555 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] = 556 { 557 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 558 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */ 559 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */ 560 }; 561 562 /* Subsequent entries in a lazy procedure linkage table look like this. */ 563 564 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] = 565 { 566 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 567 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 568 0x68, /* pushq immediate */ 569 0, 0, 0, 0, /* replaced with index into relocation table. */ 570 0xe9, /* jmp relative */ 571 0, 0, 0, 0 /* replaced with offset to start of .plt0. */ 572 }; 573 574 /* The first entry in a lazy procedure linkage table with BND prefix 575 like this. */ 576 577 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] = 578 { 579 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 580 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */ 581 0x0f, 0x1f, 0 /* nopl (%rax) */ 582 }; 583 584 /* Subsequent entries for branches with BND prefx in a lazy procedure 585 linkage table look like this. */ 586 587 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] = 588 { 589 0x68, 0, 0, 0, 0, /* pushq immediate */ 590 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ 591 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ 592 }; 593 594 /* The first entry in the IBT-enabled lazy procedure linkage table is the 595 the same as the lazy PLT with BND prefix so that bound registers are 596 preserved when control is passed to dynamic linker. Subsequent 597 entries for a IBT-enabled lazy procedure linkage table look like 598 this. */ 599 600 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 601 { 602 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 603 0x68, 0, 0, 0, 0, /* pushq immediate */ 604 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ 605 0x90 /* nop */ 606 }; 607 608 /* The first entry in the x32 IBT-enabled lazy procedure linkage table 609 is the same as the normal lazy PLT. Subsequent entries for an 610 x32 IBT-enabled lazy procedure linkage table look like this. */ 611 612 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 613 { 614 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 615 0x68, 0, 0, 0, 0, /* pushq immediate */ 616 0xe9, 0, 0, 0, 0, /* jmpq relative */ 617 0x66, 0x90 /* xchg %ax,%ax */ 618 }; 619 620 /* Entries in the non-lazey procedure linkage table look like this. */ 621 622 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = 623 { 624 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 625 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 626 0x66, 0x90 /* xchg %ax,%ax */ 627 }; 628 629 /* Entries for branches with BND prefix in the non-lazey procedure 630 linkage table look like this. */ 631 632 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = 633 { 634 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ 635 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 636 0x90 /* nop */ 637 }; 638 639 /* Entries for branches with IBT-enabled in the non-lazey procedure 640 linkage table look like this. They have the same size as the lazy 641 PLT entry. */ 642 643 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 644 { 645 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 646 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ 647 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 648 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */ 649 }; 650 651 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure 652 linkage table look like this. They have the same size as the lazy 653 PLT entry. */ 654 655 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 656 { 657 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 658 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 659 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 660 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */ 661 }; 662 663 /* The TLSDESC entry in a lazy procedure linkage table. */ 664 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] = 665 { 666 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 667 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 668 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */ 669 }; 670 671 /* .eh_frame covering the lazy .plt section. */ 672 673 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] = 674 { 675 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 676 0, 0, 0, 0, /* CIE ID */ 677 1, /* CIE version */ 678 'z', 'R', 0, /* Augmentation string */ 679 1, /* Code alignment factor */ 680 0x78, /* Data alignment factor */ 681 16, /* Return address column */ 682 1, /* Augmentation size */ 683 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 684 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 685 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 686 DW_CFA_nop, DW_CFA_nop, 687 688 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 689 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 690 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 691 0, 0, 0, 0, /* .plt size goes here */ 692 0, /* Augmentation size */ 693 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 694 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 695 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 696 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 697 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 698 11, /* Block length */ 699 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 700 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 701 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge, 702 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 703 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 704 }; 705 706 /* .eh_frame covering the lazy BND .plt section. */ 707 708 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] = 709 { 710 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 711 0, 0, 0, 0, /* CIE ID */ 712 1, /* CIE version */ 713 'z', 'R', 0, /* Augmentation string */ 714 1, /* Code alignment factor */ 715 0x78, /* Data alignment factor */ 716 16, /* Return address column */ 717 1, /* Augmentation size */ 718 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 719 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 720 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 721 DW_CFA_nop, DW_CFA_nop, 722 723 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 724 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 725 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 726 0, 0, 0, 0, /* .plt size goes here */ 727 0, /* Augmentation size */ 728 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 729 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 730 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 731 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 732 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 733 11, /* Block length */ 734 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 735 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 736 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge, 737 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 738 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 739 }; 740 741 /* .eh_frame covering the lazy .plt section with IBT-enabled. */ 742 743 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] = 744 { 745 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 746 0, 0, 0, 0, /* CIE ID */ 747 1, /* CIE version */ 748 'z', 'R', 0, /* Augmentation string */ 749 1, /* Code alignment factor */ 750 0x78, /* Data alignment factor */ 751 16, /* Return address column */ 752 1, /* Augmentation size */ 753 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 754 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 755 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 756 DW_CFA_nop, DW_CFA_nop, 757 758 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 759 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 760 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 761 0, 0, 0, 0, /* .plt size goes here */ 762 0, /* Augmentation size */ 763 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 764 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 765 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 766 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 767 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 768 11, /* Block length */ 769 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 770 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 771 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge, 772 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 773 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 774 }; 775 776 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */ 777 778 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] = 779 { 780 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 781 0, 0, 0, 0, /* CIE ID */ 782 1, /* CIE version */ 783 'z', 'R', 0, /* Augmentation string */ 784 1, /* Code alignment factor */ 785 0x78, /* Data alignment factor */ 786 16, /* Return address column */ 787 1, /* Augmentation size */ 788 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 789 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 790 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 791 DW_CFA_nop, DW_CFA_nop, 792 793 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 794 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 795 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 796 0, 0, 0, 0, /* .plt size goes here */ 797 0, /* Augmentation size */ 798 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 799 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 800 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 801 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 802 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 803 11, /* Block length */ 804 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 805 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 806 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge, 807 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 808 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 809 }; 810 811 /* .eh_frame covering the non-lazy .plt section. */ 812 813 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] = 814 { 815 #define PLT_GOT_FDE_LENGTH 20 816 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 817 0, 0, 0, 0, /* CIE ID */ 818 1, /* CIE version */ 819 'z', 'R', 0, /* Augmentation string */ 820 1, /* Code alignment factor */ 821 0x78, /* Data alignment factor */ 822 16, /* Return address column */ 823 1, /* Augmentation size */ 824 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 825 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 826 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 827 DW_CFA_nop, DW_CFA_nop, 828 829 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 830 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 831 0, 0, 0, 0, /* the start of non-lazy .plt goes here */ 832 0, 0, 0, 0, /* non-lazy .plt size goes here */ 833 0, /* Augmentation size */ 834 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, 835 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 836 }; 837 838 /* These are the standard parameters. */ 839 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt = 840 { 841 elf_x86_64_lazy_plt0_entry, /* plt0_entry */ 842 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 843 elf_x86_64_lazy_plt_entry, /* plt_entry */ 844 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 845 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 846 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 847 6, /* plt_tlsdesc_got1_offset */ 848 12, /* plt_tlsdesc_got2_offset */ 849 10, /* plt_tlsdesc_got1_insn_end */ 850 16, /* plt_tlsdesc_got2_insn_end */ 851 2, /* plt0_got1_offset */ 852 8, /* plt0_got2_offset */ 853 12, /* plt0_got2_insn_end */ 854 2, /* plt_got_offset */ 855 7, /* plt_reloc_offset */ 856 12, /* plt_plt_offset */ 857 6, /* plt_got_insn_size */ 858 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */ 859 6, /* plt_lazy_offset */ 860 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ 861 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */ 862 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */ 863 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */ 864 }; 865 866 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt = 867 { 868 elf_x86_64_non_lazy_plt_entry, /* plt_entry */ 869 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */ 870 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 871 2, /* plt_got_offset */ 872 6, /* plt_got_insn_size */ 873 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 874 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 875 }; 876 877 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt = 878 { 879 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ 880 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 881 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */ 882 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 883 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 884 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 885 6, /* plt_tlsdesc_got1_offset */ 886 12, /* plt_tlsdesc_got2_offset */ 887 10, /* plt_tlsdesc_got1_insn_end */ 888 16, /* plt_tlsdesc_got2_insn_end */ 889 2, /* plt0_got1_offset */ 890 1+8, /* plt0_got2_offset */ 891 1+12, /* plt0_got2_insn_end */ 892 1+2, /* plt_got_offset */ 893 1, /* plt_reloc_offset */ 894 7, /* plt_plt_offset */ 895 1+6, /* plt_got_insn_size */ 896 11, /* plt_plt_insn_end */ 897 0, /* plt_lazy_offset */ 898 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ 899 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */ 900 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */ 901 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */ 902 }; 903 904 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt = 905 { 906 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */ 907 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */ 908 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 909 1+2, /* plt_got_offset */ 910 1+6, /* plt_got_insn_size */ 911 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 912 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 913 }; 914 915 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt = 916 { 917 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ 918 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 919 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */ 920 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 921 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 922 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 923 6, /* plt_tlsdesc_got1_offset */ 924 12, /* plt_tlsdesc_got2_offset */ 925 10, /* plt_tlsdesc_got1_insn_end */ 926 16, /* plt_tlsdesc_got2_insn_end */ 927 2, /* plt0_got1_offset */ 928 1+8, /* plt0_got2_offset */ 929 1+12, /* plt0_got2_insn_end */ 930 4+1+2, /* plt_got_offset */ 931 4+1, /* plt_reloc_offset */ 932 4+1+6, /* plt_plt_offset */ 933 4+1+6, /* plt_got_insn_size */ 934 4+1+5+5, /* plt_plt_insn_end */ 935 0, /* plt_lazy_offset */ 936 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ 937 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */ 938 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ 939 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ 940 }; 941 942 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt = 943 { 944 elf_x86_64_lazy_plt0_entry, /* plt0_entry */ 945 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 946 elf_x32_lazy_ibt_plt_entry, /* plt_entry */ 947 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 948 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 949 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 950 6, /* plt_tlsdesc_got1_offset */ 951 12, /* plt_tlsdesc_got2_offset */ 952 10, /* plt_tlsdesc_got1_insn_end */ 953 16, /* plt_tlsdesc_got2_insn_end */ 954 2, /* plt0_got1_offset */ 955 8, /* plt0_got2_offset */ 956 12, /* plt0_got2_insn_end */ 957 4+2, /* plt_got_offset */ 958 4+1, /* plt_reloc_offset */ 959 4+6, /* plt_plt_offset */ 960 4+6, /* plt_got_insn_size */ 961 4+5+5, /* plt_plt_insn_end */ 962 0, /* plt_lazy_offset */ 963 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ 964 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */ 965 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ 966 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ 967 }; 968 969 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt = 970 { 971 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */ 972 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */ 973 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 974 4+1+2, /* plt_got_offset */ 975 4+1+6, /* plt_got_insn_size */ 976 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 977 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 978 }; 979 980 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt = 981 { 982 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */ 983 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */ 984 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 985 4+2, /* plt_got_offset */ 986 4+6, /* plt_got_insn_size */ 987 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 988 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 989 }; 990 991 static const struct elf_x86_backend_data elf_x86_64_arch_bed = 992 { 993 is_normal /* os */ 994 }; 995 996 #define elf_backend_arch_data &elf_x86_64_arch_bed 997 998 static bfd_boolean 999 elf64_x86_64_elf_object_p (bfd *abfd) 1000 { 1001 /* Set the right machine number for an x86-64 elf64 file. */ 1002 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); 1003 return TRUE; 1004 } 1005 1006 static bfd_boolean 1007 elf32_x86_64_elf_object_p (bfd *abfd) 1008 { 1009 /* Set the right machine number for an x86-64 elf32 file. */ 1010 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); 1011 return TRUE; 1012 } 1013 1014 /* Return TRUE if the TLS access code sequence support transition 1015 from R_TYPE. */ 1016 1017 static bfd_boolean 1018 elf_x86_64_check_tls_transition (bfd *abfd, 1019 struct bfd_link_info *info, 1020 asection *sec, 1021 bfd_byte *contents, 1022 Elf_Internal_Shdr *symtab_hdr, 1023 struct elf_link_hash_entry **sym_hashes, 1024 unsigned int r_type, 1025 const Elf_Internal_Rela *rel, 1026 const Elf_Internal_Rela *relend) 1027 { 1028 unsigned int val; 1029 unsigned long r_symndx; 1030 bfd_boolean largepic = FALSE; 1031 struct elf_link_hash_entry *h; 1032 bfd_vma offset; 1033 struct elf_x86_link_hash_table *htab; 1034 bfd_byte *call; 1035 bfd_boolean indirect_call; 1036 1037 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 1038 offset = rel->r_offset; 1039 switch (r_type) 1040 { 1041 case R_X86_64_TLSGD: 1042 case R_X86_64_TLSLD: 1043 if ((rel + 1) >= relend) 1044 return FALSE; 1045 1046 if (r_type == R_X86_64_TLSGD) 1047 { 1048 /* Check transition from GD access model. For 64bit, only 1049 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 1050 .word 0x6666; rex64; call __tls_get_addr@PLT 1051 or 1052 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 1053 .byte 0x66; rex64 1054 call *__tls_get_addr@GOTPCREL(%rip) 1055 which may be converted to 1056 addr32 call __tls_get_addr 1057 can transit to different access model. For 32bit, only 1058 leaq foo@tlsgd(%rip), %rdi 1059 .word 0x6666; rex64; call __tls_get_addr@PLT 1060 or 1061 leaq foo@tlsgd(%rip), %rdi 1062 .byte 0x66; rex64 1063 call *__tls_get_addr@GOTPCREL(%rip) 1064 which may be converted to 1065 addr32 call __tls_get_addr 1066 can transit to different access model. For largepic, 1067 we also support: 1068 leaq foo@tlsgd(%rip), %rdi 1069 movabsq $__tls_get_addr@pltoff, %rax 1070 addq $r15, %rax 1071 call *%rax 1072 or 1073 leaq foo@tlsgd(%rip), %rdi 1074 movabsq $__tls_get_addr@pltoff, %rax 1075 addq $rbx, %rax 1076 call *%rax */ 1077 1078 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; 1079 1080 if ((offset + 12) > sec->size) 1081 return FALSE; 1082 1083 call = contents + offset + 4; 1084 if (call[0] != 0x66 1085 || !((call[1] == 0x48 1086 && call[2] == 0xff 1087 && call[3] == 0x15) 1088 || (call[1] == 0x48 1089 && call[2] == 0x67 1090 && call[3] == 0xe8) 1091 || (call[1] == 0x66 1092 && call[2] == 0x48 1093 && call[3] == 0xe8))) 1094 { 1095 if (!ABI_64_P (abfd) 1096 || (offset + 19) > sec->size 1097 || offset < 3 1098 || memcmp (call - 7, leaq + 1, 3) != 0 1099 || memcmp (call, "\x48\xb8", 2) != 0 1100 || call[11] != 0x01 1101 || call[13] != 0xff 1102 || call[14] != 0xd0 1103 || !((call[10] == 0x48 && call[12] == 0xd8) 1104 || (call[10] == 0x4c && call[12] == 0xf8))) 1105 return FALSE; 1106 largepic = TRUE; 1107 } 1108 else if (ABI_64_P (abfd)) 1109 { 1110 if (offset < 4 1111 || memcmp (contents + offset - 4, leaq, 4) != 0) 1112 return FALSE; 1113 } 1114 else 1115 { 1116 if (offset < 3 1117 || memcmp (contents + offset - 3, leaq + 1, 3) != 0) 1118 return FALSE; 1119 } 1120 indirect_call = call[2] == 0xff; 1121 } 1122 else 1123 { 1124 /* Check transition from LD access model. Only 1125 leaq foo@tlsld(%rip), %rdi; 1126 call __tls_get_addr@PLT 1127 or 1128 leaq foo@tlsld(%rip), %rdi; 1129 call *__tls_get_addr@GOTPCREL(%rip) 1130 which may be converted to 1131 addr32 call __tls_get_addr 1132 can transit to different access model. For largepic 1133 we also support: 1134 leaq foo@tlsld(%rip), %rdi 1135 movabsq $__tls_get_addr@pltoff, %rax 1136 addq $r15, %rax 1137 call *%rax 1138 or 1139 leaq foo@tlsld(%rip), %rdi 1140 movabsq $__tls_get_addr@pltoff, %rax 1141 addq $rbx, %rax 1142 call *%rax */ 1143 1144 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; 1145 1146 if (offset < 3 || (offset + 9) > sec->size) 1147 return FALSE; 1148 1149 if (memcmp (contents + offset - 3, lea, 3) != 0) 1150 return FALSE; 1151 1152 call = contents + offset + 4; 1153 if (!(call[0] == 0xe8 1154 || (call[0] == 0xff && call[1] == 0x15) 1155 || (call[0] == 0x67 && call[1] == 0xe8))) 1156 { 1157 if (!ABI_64_P (abfd) 1158 || (offset + 19) > sec->size 1159 || memcmp (call, "\x48\xb8", 2) != 0 1160 || call[11] != 0x01 1161 || call[13] != 0xff 1162 || call[14] != 0xd0 1163 || !((call[10] == 0x48 && call[12] == 0xd8) 1164 || (call[10] == 0x4c && call[12] == 0xf8))) 1165 return FALSE; 1166 largepic = TRUE; 1167 } 1168 indirect_call = call[0] == 0xff; 1169 } 1170 1171 r_symndx = htab->r_sym (rel[1].r_info); 1172 if (r_symndx < symtab_hdr->sh_info) 1173 return FALSE; 1174 1175 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 1176 if (h == NULL 1177 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr) 1178 return FALSE; 1179 else 1180 { 1181 r_type = (ELF32_R_TYPE (rel[1].r_info) 1182 & ~R_X86_64_converted_reloc_bit); 1183 if (largepic) 1184 return r_type == R_X86_64_PLTOFF64; 1185 else if (indirect_call) 1186 return r_type == R_X86_64_GOTPCRELX; 1187 else 1188 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32); 1189 } 1190 1191 case R_X86_64_GOTTPOFF: 1192 /* Check transition from IE access model: 1193 mov foo@gottpoff(%rip), %reg 1194 add foo@gottpoff(%rip), %reg 1195 */ 1196 1197 /* Check REX prefix first. */ 1198 if (offset >= 3 && (offset + 4) <= sec->size) 1199 { 1200 val = bfd_get_8 (abfd, contents + offset - 3); 1201 if (val != 0x48 && val != 0x4c) 1202 { 1203 /* X32 may have 0x44 REX prefix or no REX prefix. */ 1204 if (ABI_64_P (abfd)) 1205 return FALSE; 1206 } 1207 } 1208 else 1209 { 1210 /* X32 may not have any REX prefix. */ 1211 if (ABI_64_P (abfd)) 1212 return FALSE; 1213 if (offset < 2 || (offset + 3) > sec->size) 1214 return FALSE; 1215 } 1216 1217 val = bfd_get_8 (abfd, contents + offset - 2); 1218 if (val != 0x8b && val != 0x03) 1219 return FALSE; 1220 1221 val = bfd_get_8 (abfd, contents + offset - 1); 1222 return (val & 0xc7) == 5; 1223 1224 case R_X86_64_GOTPC32_TLSDESC: 1225 /* Check transition from GDesc access model: 1226 leaq x@tlsdesc(%rip), %rax 1227 1228 Make sure it's a leaq adding rip to a 32-bit offset 1229 into any register, although it's probably almost always 1230 going to be rax. */ 1231 1232 if (offset < 3 || (offset + 4) > sec->size) 1233 return FALSE; 1234 1235 val = bfd_get_8 (abfd, contents + offset - 3); 1236 if ((val & 0xfb) != 0x48) 1237 return FALSE; 1238 1239 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d) 1240 return FALSE; 1241 1242 val = bfd_get_8 (abfd, contents + offset - 1); 1243 return (val & 0xc7) == 0x05; 1244 1245 case R_X86_64_TLSDESC_CALL: 1246 /* Check transition from GDesc access model: 1247 call *x@tlsdesc(%rax) 1248 */ 1249 if (offset + 2 <= sec->size) 1250 { 1251 /* Make sure that it's a call *x@tlsdesc(%rax). */ 1252 call = contents + offset; 1253 return call[0] == 0xff && call[1] == 0x10; 1254 } 1255 1256 return FALSE; 1257 1258 default: 1259 abort (); 1260 } 1261 } 1262 1263 /* Return TRUE if the TLS access transition is OK or no transition 1264 will be performed. Update R_TYPE if there is a transition. */ 1265 1266 static bfd_boolean 1267 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, 1268 asection *sec, bfd_byte *contents, 1269 Elf_Internal_Shdr *symtab_hdr, 1270 struct elf_link_hash_entry **sym_hashes, 1271 unsigned int *r_type, int tls_type, 1272 const Elf_Internal_Rela *rel, 1273 const Elf_Internal_Rela *relend, 1274 struct elf_link_hash_entry *h, 1275 unsigned long r_symndx, 1276 bfd_boolean from_relocate_section) 1277 { 1278 unsigned int from_type = *r_type; 1279 unsigned int to_type = from_type; 1280 bfd_boolean check = TRUE; 1281 1282 /* Skip TLS transition for functions. */ 1283 if (h != NULL 1284 && (h->type == STT_FUNC 1285 || h->type == STT_GNU_IFUNC)) 1286 return TRUE; 1287 1288 switch (from_type) 1289 { 1290 case R_X86_64_TLSGD: 1291 case R_X86_64_GOTPC32_TLSDESC: 1292 case R_X86_64_TLSDESC_CALL: 1293 case R_X86_64_GOTTPOFF: 1294 if (bfd_link_executable (info)) 1295 { 1296 if (h == NULL) 1297 to_type = R_X86_64_TPOFF32; 1298 else 1299 to_type = R_X86_64_GOTTPOFF; 1300 } 1301 1302 /* When we are called from elf_x86_64_relocate_section, there may 1303 be additional transitions based on TLS_TYPE. */ 1304 if (from_relocate_section) 1305 { 1306 unsigned int new_to_type = to_type; 1307 1308 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type)) 1309 new_to_type = R_X86_64_TPOFF32; 1310 1311 if (to_type == R_X86_64_TLSGD 1312 || to_type == R_X86_64_GOTPC32_TLSDESC 1313 || to_type == R_X86_64_TLSDESC_CALL) 1314 { 1315 if (tls_type == GOT_TLS_IE) 1316 new_to_type = R_X86_64_GOTTPOFF; 1317 } 1318 1319 /* We checked the transition before when we were called from 1320 elf_x86_64_check_relocs. We only want to check the new 1321 transition which hasn't been checked before. */ 1322 check = new_to_type != to_type && from_type == to_type; 1323 to_type = new_to_type; 1324 } 1325 1326 break; 1327 1328 case R_X86_64_TLSLD: 1329 if (bfd_link_executable (info)) 1330 to_type = R_X86_64_TPOFF32; 1331 break; 1332 1333 default: 1334 return TRUE; 1335 } 1336 1337 /* Return TRUE if there is no transition. */ 1338 if (from_type == to_type) 1339 return TRUE; 1340 1341 /* Check if the transition can be performed. */ 1342 if (check 1343 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents, 1344 symtab_hdr, sym_hashes, 1345 from_type, rel, relend)) 1346 { 1347 reloc_howto_type *from, *to; 1348 const char *name; 1349 1350 from = elf_x86_64_rtype_to_howto (abfd, from_type); 1351 to = elf_x86_64_rtype_to_howto (abfd, to_type); 1352 1353 if (from == NULL || to == NULL) 1354 return FALSE; 1355 1356 if (h) 1357 name = h->root.root.string; 1358 else 1359 { 1360 struct elf_x86_link_hash_table *htab; 1361 1362 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 1363 if (htab == NULL) 1364 name = "*unknown*"; 1365 else 1366 { 1367 Elf_Internal_Sym *isym; 1368 1369 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 1370 abfd, r_symndx); 1371 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); 1372 } 1373 } 1374 1375 _bfd_error_handler 1376 /* xgettext:c-format */ 1377 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64 1378 " in section `%pA' failed"), 1379 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec); 1380 bfd_set_error (bfd_error_bad_value); 1381 return FALSE; 1382 } 1383 1384 *r_type = to_type; 1385 return TRUE; 1386 } 1387 1388 /* Rename some of the generic section flags to better document how they 1389 are used here. */ 1390 #define check_relocs_failed sec_flg0 1391 1392 static bfd_boolean 1393 elf_x86_64_need_pic (struct bfd_link_info *info, 1394 bfd *input_bfd, asection *sec, 1395 struct elf_link_hash_entry *h, 1396 Elf_Internal_Shdr *symtab_hdr, 1397 Elf_Internal_Sym *isym, 1398 reloc_howto_type *howto) 1399 { 1400 const char *v = ""; 1401 const char *und = ""; 1402 const char *pic = ""; 1403 const char *object; 1404 1405 const char *name; 1406 if (h) 1407 { 1408 name = h->root.root.string; 1409 switch (ELF_ST_VISIBILITY (h->other)) 1410 { 1411 case STV_HIDDEN: 1412 v = _("hidden symbol "); 1413 break; 1414 case STV_INTERNAL: 1415 v = _("internal symbol "); 1416 break; 1417 case STV_PROTECTED: 1418 v = _("protected symbol "); 1419 break; 1420 default: 1421 if (((struct elf_x86_link_hash_entry *) h)->def_protected) 1422 v = _("protected symbol "); 1423 else 1424 v = _("symbol "); 1425 pic = _("; recompile with -fPIC"); 1426 break; 1427 } 1428 1429 if (!h->def_regular && !h->def_dynamic) 1430 und = _("undefined "); 1431 } 1432 else 1433 { 1434 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL); 1435 pic = _("; recompile with -fPIC"); 1436 } 1437 1438 if (bfd_link_dll (info)) 1439 object = _("a shared object"); 1440 else if (bfd_link_pie (info)) 1441 object = _("a PIE object"); 1442 else 1443 object = _("a PDE object"); 1444 1445 /* xgettext:c-format */ 1446 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can " 1447 "not be used when making %s%s"), 1448 input_bfd, howto->name, und, v, name, 1449 object, pic); 1450 bfd_set_error (bfd_error_bad_value); 1451 sec->check_relocs_failed = 1; 1452 return FALSE; 1453 } 1454 1455 /* With the local symbol, foo, we convert 1456 mov foo@GOTPCREL(%rip), %reg 1457 to 1458 lea foo(%rip), %reg 1459 and convert 1460 call/jmp *foo@GOTPCREL(%rip) 1461 to 1462 nop call foo/jmp foo nop 1463 When PIC is false, convert 1464 test %reg, foo@GOTPCREL(%rip) 1465 to 1466 test $foo, %reg 1467 and convert 1468 binop foo@GOTPCREL(%rip), %reg 1469 to 1470 binop $foo, %reg 1471 where binop is one of adc, add, and, cmp, or, sbb, sub, xor 1472 instructions. */ 1473 1474 static bfd_boolean 1475 elf_x86_64_convert_load_reloc (bfd *abfd, 1476 bfd_byte *contents, 1477 unsigned int *r_type_p, 1478 Elf_Internal_Rela *irel, 1479 struct elf_link_hash_entry *h, 1480 bfd_boolean *converted, 1481 struct bfd_link_info *link_info) 1482 { 1483 struct elf_x86_link_hash_table *htab; 1484 bfd_boolean is_pic; 1485 bfd_boolean no_overflow; 1486 bfd_boolean relocx; 1487 bfd_boolean to_reloc_pc32; 1488 asection *tsec; 1489 bfd_signed_vma raddend; 1490 unsigned int opcode; 1491 unsigned int modrm; 1492 unsigned int r_type = *r_type_p; 1493 unsigned int r_symndx; 1494 bfd_vma roff = irel->r_offset; 1495 1496 if (roff < (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2)) 1497 return TRUE; 1498 1499 raddend = irel->r_addend; 1500 /* Addend for 32-bit PC-relative relocation must be -4. */ 1501 if (raddend != -4) 1502 return TRUE; 1503 1504 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA); 1505 is_pic = bfd_link_pic (link_info); 1506 1507 relocx = (r_type == R_X86_64_GOTPCRELX 1508 || r_type == R_X86_64_REX_GOTPCRELX); 1509 1510 /* TRUE if --no-relax is used. */ 1511 no_overflow = link_info->disable_target_specific_optimizations > 1; 1512 1513 r_symndx = htab->r_sym (irel->r_info); 1514 1515 opcode = bfd_get_8 (abfd, contents + roff - 2); 1516 1517 /* Convert mov to lea since it has been done for a while. */ 1518 if (opcode != 0x8b) 1519 { 1520 /* Only convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX 1521 for call, jmp or one of adc, add, and, cmp, or, sbb, sub, 1522 test, xor instructions. */ 1523 if (!relocx) 1524 return TRUE; 1525 } 1526 1527 /* We convert only to R_X86_64_PC32: 1528 1. Branch. 1529 2. R_X86_64_GOTPCREL since we can't modify REX byte. 1530 3. no_overflow is true. 1531 4. PIC. 1532 */ 1533 to_reloc_pc32 = (opcode == 0xff 1534 || !relocx 1535 || no_overflow 1536 || is_pic); 1537 1538 /* Get the symbol referred to by the reloc. */ 1539 if (h == NULL) 1540 { 1541 Elf_Internal_Sym *isym 1542 = bfd_sym_from_r_symndx (&htab->sym_cache, abfd, r_symndx); 1543 1544 /* Skip relocation against undefined symbols. */ 1545 if (isym->st_shndx == SHN_UNDEF) 1546 return TRUE; 1547 1548 if (isym->st_shndx == SHN_ABS) 1549 tsec = bfd_abs_section_ptr; 1550 else if (isym->st_shndx == SHN_COMMON) 1551 tsec = bfd_com_section_ptr; 1552 else if (isym->st_shndx == SHN_X86_64_LCOMMON) 1553 tsec = &_bfd_elf_large_com_section; 1554 else 1555 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); 1556 } 1557 else 1558 { 1559 /* Undefined weak symbol is only bound locally in executable 1560 and its reference is resolved as 0 without relocation 1561 overflow. We can only perform this optimization for 1562 GOTPCRELX relocations since we need to modify REX byte. 1563 It is OK convert mov with R_X86_64_GOTPCREL to 1564 R_X86_64_PC32. */ 1565 bfd_boolean local_ref; 1566 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h); 1567 1568 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */ 1569 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h); 1570 if ((relocx || opcode == 0x8b) 1571 && (h->root.type == bfd_link_hash_undefweak 1572 && !eh->linker_def 1573 && local_ref)) 1574 { 1575 if (opcode == 0xff) 1576 { 1577 /* Skip for branch instructions since R_X86_64_PC32 1578 may overflow. */ 1579 if (no_overflow) 1580 return TRUE; 1581 } 1582 else if (relocx) 1583 { 1584 /* For non-branch instructions, we can convert to 1585 R_X86_64_32/R_X86_64_32S since we know if there 1586 is a REX byte. */ 1587 to_reloc_pc32 = FALSE; 1588 } 1589 1590 /* Since we don't know the current PC when PIC is true, 1591 we can't convert to R_X86_64_PC32. */ 1592 if (to_reloc_pc32 && is_pic) 1593 return TRUE; 1594 1595 goto convert; 1596 } 1597 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since 1598 ld.so may use its link-time address. */ 1599 else if (h->start_stop 1600 || eh->linker_def 1601 || ((h->def_regular 1602 || h->root.type == bfd_link_hash_defined 1603 || h->root.type == bfd_link_hash_defweak) 1604 && h != htab->elf.hdynamic 1605 && local_ref)) 1606 { 1607 /* bfd_link_hash_new or bfd_link_hash_undefined is 1608 set by an assignment in a linker script in 1609 bfd_elf_record_link_assignment. start_stop is set 1610 on __start_SECNAME/__stop_SECNAME which mark section 1611 SECNAME. */ 1612 if (h->start_stop 1613 || eh->linker_def 1614 || (h->def_regular 1615 && (h->root.type == bfd_link_hash_new 1616 || h->root.type == bfd_link_hash_undefined 1617 || ((h->root.type == bfd_link_hash_defined 1618 || h->root.type == bfd_link_hash_defweak) 1619 && h->root.u.def.section == bfd_und_section_ptr)))) 1620 { 1621 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ 1622 if (no_overflow) 1623 return TRUE; 1624 goto convert; 1625 } 1626 tsec = h->root.u.def.section; 1627 } 1628 else 1629 return TRUE; 1630 } 1631 1632 /* Don't convert GOTPCREL relocation against large section. */ 1633 if (elf_section_data (tsec) != NULL 1634 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0) 1635 return TRUE; 1636 1637 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */ 1638 if (no_overflow) 1639 return TRUE; 1640 1641 convert: 1642 if (opcode == 0xff) 1643 { 1644 /* We have "call/jmp *foo@GOTPCREL(%rip)". */ 1645 unsigned int nop; 1646 unsigned int disp; 1647 bfd_vma nop_offset; 1648 1649 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to 1650 R_X86_64_PC32. */ 1651 modrm = bfd_get_8 (abfd, contents + roff - 1); 1652 if (modrm == 0x25) 1653 { 1654 /* Convert to "jmp foo nop". */ 1655 modrm = 0xe9; 1656 nop = NOP_OPCODE; 1657 nop_offset = irel->r_offset + 3; 1658 disp = bfd_get_32 (abfd, contents + irel->r_offset); 1659 irel->r_offset -= 1; 1660 bfd_put_32 (abfd, disp, contents + irel->r_offset); 1661 } 1662 else 1663 { 1664 struct elf_x86_link_hash_entry *eh 1665 = (struct elf_x86_link_hash_entry *) h; 1666 1667 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE 1668 is a nop prefix. */ 1669 modrm = 0xe8; 1670 /* To support TLS optimization, always use addr32 prefix for 1671 "call *__tls_get_addr@GOTPCREL(%rip)". */ 1672 if (eh && eh->tls_get_addr) 1673 { 1674 nop = 0x67; 1675 nop_offset = irel->r_offset - 2; 1676 } 1677 else 1678 { 1679 nop = link_info->call_nop_byte; 1680 if (link_info->call_nop_as_suffix) 1681 { 1682 nop_offset = irel->r_offset + 3; 1683 disp = bfd_get_32 (abfd, contents + irel->r_offset); 1684 irel->r_offset -= 1; 1685 bfd_put_32 (abfd, disp, contents + irel->r_offset); 1686 } 1687 else 1688 nop_offset = irel->r_offset - 2; 1689 } 1690 } 1691 bfd_put_8 (abfd, nop, contents + nop_offset); 1692 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1); 1693 r_type = R_X86_64_PC32; 1694 } 1695 else 1696 { 1697 unsigned int rex; 1698 unsigned int rex_mask = REX_R; 1699 1700 if (r_type == R_X86_64_REX_GOTPCRELX) 1701 rex = bfd_get_8 (abfd, contents + roff - 3); 1702 else 1703 rex = 0; 1704 1705 if (opcode == 0x8b) 1706 { 1707 if (to_reloc_pc32) 1708 { 1709 /* Convert "mov foo@GOTPCREL(%rip), %reg" to 1710 "lea foo(%rip), %reg". */ 1711 opcode = 0x8d; 1712 r_type = R_X86_64_PC32; 1713 } 1714 else 1715 { 1716 /* Convert "mov foo@GOTPCREL(%rip), %reg" to 1717 "mov $foo, %reg". */ 1718 opcode = 0xc7; 1719 modrm = bfd_get_8 (abfd, contents + roff - 1); 1720 modrm = 0xc0 | (modrm & 0x38) >> 3; 1721 if ((rex & REX_W) != 0 1722 && ABI_64_P (link_info->output_bfd)) 1723 { 1724 /* Keep the REX_W bit in REX byte for LP64. */ 1725 r_type = R_X86_64_32S; 1726 goto rewrite_modrm_rex; 1727 } 1728 else 1729 { 1730 /* If the REX_W bit in REX byte isn't needed, 1731 use R_X86_64_32 and clear the W bit to avoid 1732 sign-extend imm32 to imm64. */ 1733 r_type = R_X86_64_32; 1734 /* Clear the W bit in REX byte. */ 1735 rex_mask |= REX_W; 1736 goto rewrite_modrm_rex; 1737 } 1738 } 1739 } 1740 else 1741 { 1742 /* R_X86_64_PC32 isn't supported. */ 1743 if (to_reloc_pc32) 1744 return TRUE; 1745 1746 modrm = bfd_get_8 (abfd, contents + roff - 1); 1747 if (opcode == 0x85) 1748 { 1749 /* Convert "test %reg, foo@GOTPCREL(%rip)" to 1750 "test $foo, %reg". */ 1751 modrm = 0xc0 | (modrm & 0x38) >> 3; 1752 opcode = 0xf7; 1753 } 1754 else 1755 { 1756 /* Convert "binop foo@GOTPCREL(%rip), %reg" to 1757 "binop $foo, %reg". */ 1758 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c); 1759 opcode = 0x81; 1760 } 1761 1762 /* Use R_X86_64_32 with 32-bit operand to avoid relocation 1763 overflow when sign-extending imm32 to imm64. */ 1764 r_type = (rex & REX_W) != 0 ? R_X86_64_32S : R_X86_64_32; 1765 1766 rewrite_modrm_rex: 1767 bfd_put_8 (abfd, modrm, contents + roff - 1); 1768 1769 if (rex) 1770 { 1771 /* Move the R bit to the B bit in REX byte. */ 1772 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2; 1773 bfd_put_8 (abfd, rex, contents + roff - 3); 1774 } 1775 1776 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */ 1777 irel->r_addend = 0; 1778 } 1779 1780 bfd_put_8 (abfd, opcode, contents + roff - 2); 1781 } 1782 1783 *r_type_p = r_type; 1784 irel->r_info = htab->r_info (r_symndx, 1785 r_type | R_X86_64_converted_reloc_bit); 1786 1787 *converted = TRUE; 1788 1789 return TRUE; 1790 } 1791 1792 /* Look through the relocs for a section during the first phase, and 1793 calculate needed space in the global offset table, procedure 1794 linkage table, and dynamic reloc sections. */ 1795 1796 static bfd_boolean 1797 elf_x86_64_check_relocs (bfd *abfd, struct bfd_link_info *info, 1798 asection *sec, 1799 const Elf_Internal_Rela *relocs) 1800 { 1801 struct elf_x86_link_hash_table *htab; 1802 Elf_Internal_Shdr *symtab_hdr; 1803 struct elf_link_hash_entry **sym_hashes; 1804 const Elf_Internal_Rela *rel; 1805 const Elf_Internal_Rela *rel_end; 1806 asection *sreloc; 1807 bfd_byte *contents; 1808 bfd_boolean converted; 1809 1810 if (bfd_link_relocatable (info)) 1811 return TRUE; 1812 1813 /* Don't do anything special with non-loaded, non-alloced sections. 1814 In particular, any relocs in such sections should not affect GOT 1815 and PLT reference counting (ie. we don't allow them to create GOT 1816 or PLT entries), there's no possibility or desire to optimize TLS 1817 relocs, and there's not much point in propagating relocs to shared 1818 libs that the dynamic linker won't relocate. */ 1819 if ((sec->flags & SEC_ALLOC) == 0) 1820 return TRUE; 1821 1822 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 1823 if (htab == NULL) 1824 { 1825 sec->check_relocs_failed = 1; 1826 return FALSE; 1827 } 1828 1829 BFD_ASSERT (is_x86_elf (abfd, htab)); 1830 1831 /* Get the section contents. */ 1832 if (elf_section_data (sec)->this_hdr.contents != NULL) 1833 contents = elf_section_data (sec)->this_hdr.contents; 1834 else if (!bfd_malloc_and_get_section (abfd, sec, &contents)) 1835 { 1836 sec->check_relocs_failed = 1; 1837 return FALSE; 1838 } 1839 1840 symtab_hdr = &elf_symtab_hdr (abfd); 1841 sym_hashes = elf_sym_hashes (abfd); 1842 1843 converted = FALSE; 1844 1845 sreloc = NULL; 1846 1847 rel_end = relocs + sec->reloc_count; 1848 for (rel = relocs; rel < rel_end; rel++) 1849 { 1850 unsigned int r_type; 1851 unsigned int r_symndx; 1852 struct elf_link_hash_entry *h; 1853 struct elf_x86_link_hash_entry *eh; 1854 Elf_Internal_Sym *isym; 1855 const char *name; 1856 bfd_boolean size_reloc; 1857 bfd_boolean converted_reloc; 1858 1859 r_symndx = htab->r_sym (rel->r_info); 1860 r_type = ELF32_R_TYPE (rel->r_info); 1861 1862 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) 1863 { 1864 /* xgettext:c-format */ 1865 _bfd_error_handler (_("%pB: bad symbol index: %d"), 1866 abfd, r_symndx); 1867 goto error_return; 1868 } 1869 1870 if (r_symndx < symtab_hdr->sh_info) 1871 { 1872 /* A local symbol. */ 1873 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 1874 abfd, r_symndx); 1875 if (isym == NULL) 1876 goto error_return; 1877 1878 /* Check relocation against local STT_GNU_IFUNC symbol. */ 1879 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 1880 { 1881 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel, 1882 TRUE); 1883 if (h == NULL) 1884 goto error_return; 1885 1886 /* Fake a STT_GNU_IFUNC symbol. */ 1887 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr, 1888 isym, NULL); 1889 h->type = STT_GNU_IFUNC; 1890 h->def_regular = 1; 1891 h->ref_regular = 1; 1892 h->forced_local = 1; 1893 h->root.type = bfd_link_hash_defined; 1894 } 1895 else 1896 h = NULL; 1897 } 1898 else 1899 { 1900 isym = NULL; 1901 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 1902 while (h->root.type == bfd_link_hash_indirect 1903 || h->root.type == bfd_link_hash_warning) 1904 h = (struct elf_link_hash_entry *) h->root.u.i.link; 1905 } 1906 1907 /* Check invalid x32 relocations. */ 1908 if (!ABI_64_P (abfd)) 1909 switch (r_type) 1910 { 1911 default: 1912 break; 1913 1914 case R_X86_64_DTPOFF64: 1915 case R_X86_64_TPOFF64: 1916 case R_X86_64_PC64: 1917 case R_X86_64_GOTOFF64: 1918 case R_X86_64_GOT64: 1919 case R_X86_64_GOTPCREL64: 1920 case R_X86_64_GOTPC64: 1921 case R_X86_64_GOTPLT64: 1922 case R_X86_64_PLTOFF64: 1923 { 1924 if (h) 1925 name = h->root.root.string; 1926 else 1927 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, 1928 NULL); 1929 _bfd_error_handler 1930 /* xgettext:c-format */ 1931 (_("%pB: relocation %s against symbol `%s' isn't " 1932 "supported in x32 mode"), abfd, 1933 x86_64_elf_howto_table[r_type].name, name); 1934 bfd_set_error (bfd_error_bad_value); 1935 goto error_return; 1936 } 1937 break; 1938 } 1939 1940 if (h != NULL) 1941 { 1942 /* It is referenced by a non-shared object. */ 1943 h->ref_regular = 1; 1944 1945 if (h->type == STT_GNU_IFUNC) 1946 elf_tdata (info->output_bfd)->has_gnu_symbols 1947 |= elf_gnu_symbol_ifunc; 1948 } 1949 1950 converted_reloc = FALSE; 1951 if ((r_type == R_X86_64_GOTPCREL 1952 || r_type == R_X86_64_GOTPCRELX 1953 || r_type == R_X86_64_REX_GOTPCRELX) 1954 && (h == NULL || h->type != STT_GNU_IFUNC)) 1955 { 1956 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel; 1957 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type, 1958 irel, h, &converted_reloc, 1959 info)) 1960 goto error_return; 1961 1962 if (converted_reloc) 1963 converted = TRUE; 1964 } 1965 1966 if (! elf_x86_64_tls_transition (info, abfd, sec, contents, 1967 symtab_hdr, sym_hashes, 1968 &r_type, GOT_UNKNOWN, 1969 rel, rel_end, h, r_symndx, FALSE)) 1970 goto error_return; 1971 1972 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */ 1973 if (h == htab->elf.hgot) 1974 htab->got_referenced = TRUE; 1975 1976 eh = (struct elf_x86_link_hash_entry *) h; 1977 switch (r_type) 1978 { 1979 case R_X86_64_TLSLD: 1980 htab->tls_ld_or_ldm_got.refcount = 1; 1981 goto create_got; 1982 1983 case R_X86_64_TPOFF32: 1984 if (!bfd_link_executable (info) && ABI_64_P (abfd)) 1985 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, 1986 &x86_64_elf_howto_table[r_type]); 1987 if (eh != NULL) 1988 eh->zero_undefweak &= 0x2; 1989 break; 1990 1991 case R_X86_64_GOTTPOFF: 1992 if (!bfd_link_executable (info)) 1993 info->flags |= DF_STATIC_TLS; 1994 /* Fall through */ 1995 1996 case R_X86_64_GOT32: 1997 case R_X86_64_GOTPCREL: 1998 case R_X86_64_GOTPCRELX: 1999 case R_X86_64_REX_GOTPCRELX: 2000 case R_X86_64_TLSGD: 2001 case R_X86_64_GOT64: 2002 case R_X86_64_GOTPCREL64: 2003 case R_X86_64_GOTPLT64: 2004 case R_X86_64_GOTPC32_TLSDESC: 2005 case R_X86_64_TLSDESC_CALL: 2006 /* This symbol requires a global offset table entry. */ 2007 { 2008 int tls_type, old_tls_type; 2009 2010 switch (r_type) 2011 { 2012 default: tls_type = GOT_NORMAL; break; 2013 case R_X86_64_TLSGD: tls_type = GOT_TLS_GD; break; 2014 case R_X86_64_GOTTPOFF: tls_type = GOT_TLS_IE; break; 2015 case R_X86_64_GOTPC32_TLSDESC: 2016 case R_X86_64_TLSDESC_CALL: 2017 tls_type = GOT_TLS_GDESC; break; 2018 } 2019 2020 if (h != NULL) 2021 { 2022 h->got.refcount = 1; 2023 old_tls_type = eh->tls_type; 2024 } 2025 else 2026 { 2027 bfd_signed_vma *local_got_refcounts; 2028 2029 /* This is a global offset table entry for a local symbol. */ 2030 local_got_refcounts = elf_local_got_refcounts (abfd); 2031 if (local_got_refcounts == NULL) 2032 { 2033 bfd_size_type size; 2034 2035 size = symtab_hdr->sh_info; 2036 size *= sizeof (bfd_signed_vma) 2037 + sizeof (bfd_vma) + sizeof (char); 2038 local_got_refcounts = ((bfd_signed_vma *) 2039 bfd_zalloc (abfd, size)); 2040 if (local_got_refcounts == NULL) 2041 goto error_return; 2042 elf_local_got_refcounts (abfd) = local_got_refcounts; 2043 elf_x86_local_tlsdesc_gotent (abfd) 2044 = (bfd_vma *) (local_got_refcounts + symtab_hdr->sh_info); 2045 elf_x86_local_got_tls_type (abfd) 2046 = (char *) (local_got_refcounts + 2 * symtab_hdr->sh_info); 2047 } 2048 local_got_refcounts[r_symndx] = 1; 2049 old_tls_type 2050 = elf_x86_local_got_tls_type (abfd) [r_symndx]; 2051 } 2052 2053 /* If a TLS symbol is accessed using IE at least once, 2054 there is no point to use dynamic model for it. */ 2055 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN 2056 && (! GOT_TLS_GD_ANY_P (old_tls_type) 2057 || tls_type != GOT_TLS_IE)) 2058 { 2059 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type)) 2060 tls_type = old_tls_type; 2061 else if (GOT_TLS_GD_ANY_P (old_tls_type) 2062 && GOT_TLS_GD_ANY_P (tls_type)) 2063 tls_type |= old_tls_type; 2064 else 2065 { 2066 if (h) 2067 name = h->root.root.string; 2068 else 2069 name = bfd_elf_sym_name (abfd, symtab_hdr, 2070 isym, NULL); 2071 _bfd_error_handler 2072 /* xgettext:c-format */ 2073 (_("%pB: '%s' accessed both as normal and" 2074 " thread local symbol"), 2075 abfd, name); 2076 bfd_set_error (bfd_error_bad_value); 2077 goto error_return; 2078 } 2079 } 2080 2081 if (old_tls_type != tls_type) 2082 { 2083 if (eh != NULL) 2084 eh->tls_type = tls_type; 2085 else 2086 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type; 2087 } 2088 } 2089 /* Fall through */ 2090 2091 case R_X86_64_GOTOFF64: 2092 case R_X86_64_GOTPC32: 2093 case R_X86_64_GOTPC64: 2094 create_got: 2095 if (eh != NULL) 2096 eh->zero_undefweak &= 0x2; 2097 break; 2098 2099 case R_X86_64_PLT32: 2100 case R_X86_64_PLT32_BND: 2101 /* This symbol requires a procedure linkage table entry. We 2102 actually build the entry in adjust_dynamic_symbol, 2103 because this might be a case of linking PIC code which is 2104 never referenced by a dynamic object, in which case we 2105 don't need to generate a procedure linkage table entry 2106 after all. */ 2107 2108 /* If this is a local symbol, we resolve it directly without 2109 creating a procedure linkage table entry. */ 2110 if (h == NULL) 2111 continue; 2112 2113 eh->zero_undefweak &= 0x2; 2114 h->needs_plt = 1; 2115 h->plt.refcount = 1; 2116 break; 2117 2118 case R_X86_64_PLTOFF64: 2119 /* This tries to form the 'address' of a function relative 2120 to GOT. For global symbols we need a PLT entry. */ 2121 if (h != NULL) 2122 { 2123 h->needs_plt = 1; 2124 h->plt.refcount = 1; 2125 } 2126 goto create_got; 2127 2128 case R_X86_64_SIZE32: 2129 case R_X86_64_SIZE64: 2130 size_reloc = TRUE; 2131 goto do_size; 2132 2133 case R_X86_64_32: 2134 if (!ABI_64_P (abfd)) 2135 goto pointer; 2136 /* Fall through. */ 2137 case R_X86_64_8: 2138 case R_X86_64_16: 2139 case R_X86_64_32S: 2140 /* Check relocation overflow as these relocs may lead to 2141 run-time relocation overflow. Don't error out for 2142 sections we don't care about, such as debug sections or 2143 when relocation overflow check is disabled. */ 2144 if (!info->no_reloc_overflow_check 2145 && !converted_reloc 2146 && (bfd_link_pic (info) 2147 || (bfd_link_executable (info) 2148 && h != NULL 2149 && !h->def_regular 2150 && h->def_dynamic 2151 && (sec->flags & SEC_READONLY) == 0))) 2152 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, 2153 &x86_64_elf_howto_table[r_type]); 2154 /* Fall through. */ 2155 2156 case R_X86_64_PC8: 2157 case R_X86_64_PC16: 2158 case R_X86_64_PC32: 2159 case R_X86_64_PC32_BND: 2160 case R_X86_64_PC64: 2161 case R_X86_64_64: 2162 pointer: 2163 if (eh != NULL && (sec->flags & SEC_CODE) != 0) 2164 eh->zero_undefweak |= 0x2; 2165 /* We are called after all symbols have been resolved. Only 2166 relocation against STT_GNU_IFUNC symbol must go through 2167 PLT. */ 2168 if (h != NULL 2169 && (bfd_link_executable (info) 2170 || h->type == STT_GNU_IFUNC)) 2171 { 2172 bfd_boolean func_pointer_ref = FALSE; 2173 2174 if (r_type == R_X86_64_PC32) 2175 { 2176 /* Since something like ".long foo - ." may be used 2177 as pointer, make sure that PLT is used if foo is 2178 a function defined in a shared library. */ 2179 if ((sec->flags & SEC_CODE) == 0) 2180 { 2181 h->pointer_equality_needed = 1; 2182 if (bfd_link_pie (info) 2183 && h->type == STT_FUNC 2184 && !h->def_regular 2185 && h->def_dynamic) 2186 { 2187 h->needs_plt = 1; 2188 h->plt.refcount = 1; 2189 } 2190 } 2191 } 2192 else if (r_type != R_X86_64_PC32_BND 2193 && r_type != R_X86_64_PC64) 2194 { 2195 h->pointer_equality_needed = 1; 2196 /* At run-time, R_X86_64_64 can be resolved for both 2197 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S 2198 can only be resolved for x32. */ 2199 if ((sec->flags & SEC_READONLY) == 0 2200 && (r_type == R_X86_64_64 2201 || (!ABI_64_P (abfd) 2202 && (r_type == R_X86_64_32 2203 || r_type == R_X86_64_32S)))) 2204 func_pointer_ref = TRUE; 2205 } 2206 2207 if (!func_pointer_ref) 2208 { 2209 /* If this reloc is in a read-only section, we might 2210 need a copy reloc. We can't check reliably at this 2211 stage whether the section is read-only, as input 2212 sections have not yet been mapped to output sections. 2213 Tentatively set the flag for now, and correct in 2214 adjust_dynamic_symbol. */ 2215 h->non_got_ref = 1; 2216 2217 /* We may need a .plt entry if the symbol is a function 2218 defined in a shared lib or is a function referenced 2219 from the code or read-only section. */ 2220 if (!h->def_regular 2221 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0) 2222 h->plt.refcount = 1; 2223 } 2224 } 2225 2226 size_reloc = FALSE; 2227 do_size: 2228 if (NEED_DYNAMIC_RELOCATION_P (info, TRUE, h, sec, r_type, 2229 htab->pointer_r_type)) 2230 { 2231 struct elf_dyn_relocs *p; 2232 struct elf_dyn_relocs **head; 2233 2234 /* We must copy these reloc types into the output file. 2235 Create a reloc section in dynobj and make room for 2236 this reloc. */ 2237 if (sreloc == NULL) 2238 { 2239 sreloc = _bfd_elf_make_dynamic_reloc_section 2240 (sec, htab->elf.dynobj, ABI_64_P (abfd) ? 3 : 2, 2241 abfd, /*rela?*/ TRUE); 2242 2243 if (sreloc == NULL) 2244 goto error_return; 2245 } 2246 2247 /* If this is a global symbol, we count the number of 2248 relocations we need for this symbol. */ 2249 if (h != NULL) 2250 head = &eh->dyn_relocs; 2251 else 2252 { 2253 /* Track dynamic relocs needed for local syms too. 2254 We really need local syms available to do this 2255 easily. Oh well. */ 2256 asection *s; 2257 void **vpp; 2258 2259 isym = bfd_sym_from_r_symndx (&htab->sym_cache, 2260 abfd, r_symndx); 2261 if (isym == NULL) 2262 goto error_return; 2263 2264 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 2265 if (s == NULL) 2266 s = sec; 2267 2268 /* Beware of type punned pointers vs strict aliasing 2269 rules. */ 2270 vpp = &(elf_section_data (s)->local_dynrel); 2271 head = (struct elf_dyn_relocs **)vpp; 2272 } 2273 2274 p = *head; 2275 if (p == NULL || p->sec != sec) 2276 { 2277 bfd_size_type amt = sizeof *p; 2278 2279 p = ((struct elf_dyn_relocs *) 2280 bfd_alloc (htab->elf.dynobj, amt)); 2281 if (p == NULL) 2282 goto error_return; 2283 p->next = *head; 2284 *head = p; 2285 p->sec = sec; 2286 p->count = 0; 2287 p->pc_count = 0; 2288 } 2289 2290 p->count += 1; 2291 /* Count size relocation as PC-relative relocation. */ 2292 if (X86_PCREL_TYPE_P (r_type) || size_reloc) 2293 p->pc_count += 1; 2294 } 2295 break; 2296 2297 /* This relocation describes the C++ object vtable hierarchy. 2298 Reconstruct it for later use during GC. */ 2299 case R_X86_64_GNU_VTINHERIT: 2300 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) 2301 goto error_return; 2302 break; 2303 2304 /* This relocation describes which C++ vtable entries are actually 2305 used. Record for later use during GC. */ 2306 case R_X86_64_GNU_VTENTRY: 2307 BFD_ASSERT (h != NULL); 2308 if (h != NULL 2309 && !bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) 2310 goto error_return; 2311 break; 2312 2313 default: 2314 break; 2315 } 2316 } 2317 2318 if (elf_section_data (sec)->this_hdr.contents != contents) 2319 { 2320 if (!converted && !info->keep_memory) 2321 free (contents); 2322 else 2323 { 2324 /* Cache the section contents for elf_link_input_bfd if any 2325 load is converted or --no-keep-memory isn't used. */ 2326 elf_section_data (sec)->this_hdr.contents = contents; 2327 } 2328 } 2329 2330 /* Cache relocations if any load is converted. */ 2331 if (elf_section_data (sec)->relocs != relocs && converted) 2332 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs; 2333 2334 return TRUE; 2335 2336 error_return: 2337 if (elf_section_data (sec)->this_hdr.contents != contents) 2338 free (contents); 2339 sec->check_relocs_failed = 1; 2340 return FALSE; 2341 } 2342 2343 /* Return the relocation value for @tpoff relocation 2344 if STT_TLS virtual address is ADDRESS. */ 2345 2346 static bfd_vma 2347 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address) 2348 { 2349 struct elf_link_hash_table *htab = elf_hash_table (info); 2350 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd); 2351 bfd_vma static_tls_size; 2352 2353 /* If tls_segment is NULL, we should have signalled an error already. */ 2354 if (htab->tls_sec == NULL) 2355 return 0; 2356 2357 /* Consider special static TLS alignment requirements. */ 2358 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment); 2359 return address - static_tls_size - htab->tls_sec->vma; 2360 } 2361 2362 /* Relocate an x86_64 ELF section. */ 2363 2364 static bfd_boolean 2365 elf_x86_64_relocate_section (bfd *output_bfd, 2366 struct bfd_link_info *info, 2367 bfd *input_bfd, 2368 asection *input_section, 2369 bfd_byte *contents, 2370 Elf_Internal_Rela *relocs, 2371 Elf_Internal_Sym *local_syms, 2372 asection **local_sections) 2373 { 2374 struct elf_x86_link_hash_table *htab; 2375 Elf_Internal_Shdr *symtab_hdr; 2376 struct elf_link_hash_entry **sym_hashes; 2377 bfd_vma *local_got_offsets; 2378 bfd_vma *local_tlsdesc_gotents; 2379 Elf_Internal_Rela *rel; 2380 Elf_Internal_Rela *wrel; 2381 Elf_Internal_Rela *relend; 2382 unsigned int plt_entry_size; 2383 2384 /* Skip if check_relocs failed. */ 2385 if (input_section->check_relocs_failed) 2386 return FALSE; 2387 2388 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 2389 if (htab == NULL) 2390 return FALSE; 2391 2392 BFD_ASSERT (is_x86_elf (input_bfd, htab)); 2393 2394 plt_entry_size = htab->plt.plt_entry_size; 2395 symtab_hdr = &elf_symtab_hdr (input_bfd); 2396 sym_hashes = elf_sym_hashes (input_bfd); 2397 local_got_offsets = elf_local_got_offsets (input_bfd); 2398 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd); 2399 2400 _bfd_x86_elf_set_tls_module_base (info); 2401 2402 rel = wrel = relocs; 2403 relend = relocs + input_section->reloc_count; 2404 for (; rel < relend; wrel++, rel++) 2405 { 2406 unsigned int r_type, r_type_tls; 2407 reloc_howto_type *howto; 2408 unsigned long r_symndx; 2409 struct elf_link_hash_entry *h; 2410 struct elf_x86_link_hash_entry *eh; 2411 Elf_Internal_Sym *sym; 2412 asection *sec; 2413 bfd_vma off, offplt, plt_offset; 2414 bfd_vma relocation; 2415 bfd_boolean unresolved_reloc; 2416 bfd_reloc_status_type r; 2417 int tls_type; 2418 asection *base_got, *resolved_plt; 2419 bfd_vma st_size; 2420 bfd_boolean resolved_to_zero; 2421 bfd_boolean relative_reloc; 2422 bfd_boolean converted_reloc; 2423 bfd_boolean need_copy_reloc_in_pie; 2424 2425 r_type = ELF32_R_TYPE (rel->r_info); 2426 if (r_type == (int) R_X86_64_GNU_VTINHERIT 2427 || r_type == (int) R_X86_64_GNU_VTENTRY) 2428 { 2429 if (wrel != rel) 2430 *wrel = *rel; 2431 continue; 2432 } 2433 2434 r_symndx = htab->r_sym (rel->r_info); 2435 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0; 2436 if (converted_reloc) 2437 { 2438 r_type &= ~R_X86_64_converted_reloc_bit; 2439 rel->r_info = htab->r_info (r_symndx, r_type); 2440 } 2441 2442 if (r_type >= (int) R_X86_64_standard) 2443 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); 2444 2445 if (r_type != (int) R_X86_64_32 2446 || ABI_64_P (output_bfd)) 2447 howto = x86_64_elf_howto_table + r_type; 2448 else 2449 howto = (x86_64_elf_howto_table 2450 + ARRAY_SIZE (x86_64_elf_howto_table) - 1); 2451 h = NULL; 2452 sym = NULL; 2453 sec = NULL; 2454 unresolved_reloc = FALSE; 2455 if (r_symndx < symtab_hdr->sh_info) 2456 { 2457 sym = local_syms + r_symndx; 2458 sec = local_sections[r_symndx]; 2459 2460 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, 2461 &sec, rel); 2462 st_size = sym->st_size; 2463 2464 /* Relocate against local STT_GNU_IFUNC symbol. */ 2465 if (!bfd_link_relocatable (info) 2466 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) 2467 { 2468 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd, 2469 rel, FALSE); 2470 if (h == NULL) 2471 abort (); 2472 2473 /* Set STT_GNU_IFUNC symbol value. */ 2474 h->root.u.def.value = sym->st_value; 2475 h->root.u.def.section = sec; 2476 } 2477 } 2478 else 2479 { 2480 bfd_boolean warned ATTRIBUTE_UNUSED; 2481 bfd_boolean ignored ATTRIBUTE_UNUSED; 2482 2483 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 2484 r_symndx, symtab_hdr, sym_hashes, 2485 h, sec, relocation, 2486 unresolved_reloc, warned, ignored); 2487 st_size = h->size; 2488 } 2489 2490 if (sec != NULL && discarded_section (sec)) 2491 { 2492 _bfd_clear_contents (howto, input_bfd, input_section, 2493 contents + rel->r_offset); 2494 wrel->r_offset = rel->r_offset; 2495 wrel->r_info = 0; 2496 wrel->r_addend = 0; 2497 2498 /* For ld -r, remove relocations in debug sections against 2499 sections defined in discarded sections. Not done for 2500 eh_frame editing code expects to be present. */ 2501 if (bfd_link_relocatable (info) 2502 && (input_section->flags & SEC_DEBUGGING)) 2503 wrel--; 2504 2505 continue; 2506 } 2507 2508 if (bfd_link_relocatable (info)) 2509 { 2510 if (wrel != rel) 2511 *wrel = *rel; 2512 continue; 2513 } 2514 2515 if (rel->r_addend == 0 && !ABI_64_P (output_bfd)) 2516 { 2517 if (r_type == R_X86_64_64) 2518 { 2519 /* For x32, treat R_X86_64_64 like R_X86_64_32 and 2520 zero-extend it to 64bit if addend is zero. */ 2521 r_type = R_X86_64_32; 2522 memset (contents + rel->r_offset + 4, 0, 4); 2523 } 2524 else if (r_type == R_X86_64_SIZE64) 2525 { 2526 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and 2527 zero-extend it to 64bit if addend is zero. */ 2528 r_type = R_X86_64_SIZE32; 2529 memset (contents + rel->r_offset + 4, 0, 4); 2530 } 2531 } 2532 2533 eh = (struct elf_x86_link_hash_entry *) h; 2534 2535 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle 2536 it here if it is defined in a non-shared object. */ 2537 if (h != NULL 2538 && h->type == STT_GNU_IFUNC 2539 && h->def_regular) 2540 { 2541 bfd_vma plt_index; 2542 const char *name; 2543 2544 if ((input_section->flags & SEC_ALLOC) == 0) 2545 { 2546 /* If this is a SHT_NOTE section without SHF_ALLOC, treat 2547 STT_GNU_IFUNC symbol as STT_FUNC. */ 2548 if (elf_section_type (input_section) == SHT_NOTE) 2549 goto skip_ifunc; 2550 /* Dynamic relocs are not propagated for SEC_DEBUGGING 2551 sections because such sections are not SEC_ALLOC and 2552 thus ld.so will not process them. */ 2553 if ((input_section->flags & SEC_DEBUGGING) != 0) 2554 continue; 2555 abort (); 2556 } 2557 2558 switch (r_type) 2559 { 2560 default: 2561 break; 2562 2563 case R_X86_64_GOTPCREL: 2564 case R_X86_64_GOTPCRELX: 2565 case R_X86_64_REX_GOTPCRELX: 2566 case R_X86_64_GOTPCREL64: 2567 base_got = htab->elf.sgot; 2568 off = h->got.offset; 2569 2570 if (base_got == NULL) 2571 abort (); 2572 2573 if (off == (bfd_vma) -1) 2574 { 2575 /* We can't use h->got.offset here to save state, or 2576 even just remember the offset, as finish_dynamic_symbol 2577 would use that as offset into .got. */ 2578 2579 if (h->plt.offset == (bfd_vma) -1) 2580 abort (); 2581 2582 if (htab->elf.splt != NULL) 2583 { 2584 plt_index = (h->plt.offset / plt_entry_size 2585 - htab->plt.has_plt0); 2586 off = (plt_index + 3) * GOT_ENTRY_SIZE; 2587 base_got = htab->elf.sgotplt; 2588 } 2589 else 2590 { 2591 plt_index = h->plt.offset / plt_entry_size; 2592 off = plt_index * GOT_ENTRY_SIZE; 2593 base_got = htab->elf.igotplt; 2594 } 2595 2596 if (h->dynindx == -1 2597 || h->forced_local 2598 || info->symbolic) 2599 { 2600 /* This references the local defitionion. We must 2601 initialize this entry in the global offset table. 2602 Since the offset must always be a multiple of 8, 2603 we use the least significant bit to record 2604 whether we have initialized it already. 2605 2606 When doing a dynamic link, we create a .rela.got 2607 relocation entry to initialize the value. This 2608 is done in the finish_dynamic_symbol routine. */ 2609 if ((off & 1) != 0) 2610 off &= ~1; 2611 else 2612 { 2613 bfd_put_64 (output_bfd, relocation, 2614 base_got->contents + off); 2615 /* Note that this is harmless for the GOTPLT64 2616 case, as -1 | 1 still is -1. */ 2617 h->got.offset |= 1; 2618 } 2619 } 2620 } 2621 2622 relocation = (base_got->output_section->vma 2623 + base_got->output_offset + off); 2624 2625 goto do_relocation; 2626 } 2627 2628 if (h->plt.offset == (bfd_vma) -1) 2629 { 2630 /* Handle static pointers of STT_GNU_IFUNC symbols. */ 2631 if (r_type == htab->pointer_r_type 2632 && (input_section->flags & SEC_CODE) == 0) 2633 goto do_ifunc_pointer; 2634 goto bad_ifunc_reloc; 2635 } 2636 2637 /* STT_GNU_IFUNC symbol must go through PLT. */ 2638 if (htab->elf.splt != NULL) 2639 { 2640 if (htab->plt_second != NULL) 2641 { 2642 resolved_plt = htab->plt_second; 2643 plt_offset = eh->plt_second.offset; 2644 } 2645 else 2646 { 2647 resolved_plt = htab->elf.splt; 2648 plt_offset = h->plt.offset; 2649 } 2650 } 2651 else 2652 { 2653 resolved_plt = htab->elf.iplt; 2654 plt_offset = h->plt.offset; 2655 } 2656 2657 relocation = (resolved_plt->output_section->vma 2658 + resolved_plt->output_offset + plt_offset); 2659 2660 switch (r_type) 2661 { 2662 default: 2663 bad_ifunc_reloc: 2664 if (h->root.root.string) 2665 name = h->root.root.string; 2666 else 2667 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, 2668 NULL); 2669 _bfd_error_handler 2670 /* xgettext:c-format */ 2671 (_("%pB: relocation %s against STT_GNU_IFUNC " 2672 "symbol `%s' isn't supported"), input_bfd, 2673 howto->name, name); 2674 bfd_set_error (bfd_error_bad_value); 2675 return FALSE; 2676 2677 case R_X86_64_32S: 2678 if (bfd_link_pic (info)) 2679 abort (); 2680 goto do_relocation; 2681 2682 case R_X86_64_32: 2683 if (ABI_64_P (output_bfd)) 2684 goto do_relocation; 2685 /* FALLTHROUGH */ 2686 case R_X86_64_64: 2687 do_ifunc_pointer: 2688 if (rel->r_addend != 0) 2689 { 2690 if (h->root.root.string) 2691 name = h->root.root.string; 2692 else 2693 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 2694 sym, NULL); 2695 _bfd_error_handler 2696 /* xgettext:c-format */ 2697 (_("%pB: relocation %s against STT_GNU_IFUNC " 2698 "symbol `%s' has non-zero addend: %" PRId64), 2699 input_bfd, howto->name, name, (int64_t) rel->r_addend); 2700 bfd_set_error (bfd_error_bad_value); 2701 return FALSE; 2702 } 2703 2704 /* Generate dynamic relcoation only when there is a 2705 non-GOT reference in a shared object or there is no 2706 PLT. */ 2707 if ((bfd_link_pic (info) && h->non_got_ref) 2708 || h->plt.offset == (bfd_vma) -1) 2709 { 2710 Elf_Internal_Rela outrel; 2711 asection *sreloc; 2712 2713 /* Need a dynamic relocation to get the real function 2714 address. */ 2715 outrel.r_offset = _bfd_elf_section_offset (output_bfd, 2716 info, 2717 input_section, 2718 rel->r_offset); 2719 if (outrel.r_offset == (bfd_vma) -1 2720 || outrel.r_offset == (bfd_vma) -2) 2721 abort (); 2722 2723 outrel.r_offset += (input_section->output_section->vma 2724 + input_section->output_offset); 2725 2726 if (POINTER_LOCAL_IFUNC_P (info, h)) 2727 { 2728 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), 2729 h->root.root.string, 2730 h->root.u.def.section->owner); 2731 2732 /* This symbol is resolved locally. */ 2733 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE); 2734 outrel.r_addend = (h->root.u.def.value 2735 + h->root.u.def.section->output_section->vma 2736 + h->root.u.def.section->output_offset); 2737 } 2738 else 2739 { 2740 outrel.r_info = htab->r_info (h->dynindx, r_type); 2741 outrel.r_addend = 0; 2742 } 2743 2744 /* Dynamic relocations are stored in 2745 1. .rela.ifunc section in PIC object. 2746 2. .rela.got section in dynamic executable. 2747 3. .rela.iplt section in static executable. */ 2748 if (bfd_link_pic (info)) 2749 sreloc = htab->elf.irelifunc; 2750 else if (htab->elf.splt != NULL) 2751 sreloc = htab->elf.srelgot; 2752 else 2753 sreloc = htab->elf.irelplt; 2754 elf_append_rela (output_bfd, sreloc, &outrel); 2755 2756 /* If this reloc is against an external symbol, we 2757 do not want to fiddle with the addend. Otherwise, 2758 we need to include the symbol value so that it 2759 becomes an addend for the dynamic reloc. For an 2760 internal symbol, we have updated addend. */ 2761 continue; 2762 } 2763 /* FALLTHROUGH */ 2764 case R_X86_64_PC32: 2765 case R_X86_64_PC32_BND: 2766 case R_X86_64_PC64: 2767 case R_X86_64_PLT32: 2768 case R_X86_64_PLT32_BND: 2769 goto do_relocation; 2770 } 2771 } 2772 2773 skip_ifunc: 2774 resolved_to_zero = (eh != NULL 2775 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh)); 2776 2777 /* When generating a shared object, the relocations handled here are 2778 copied into the output file to be resolved at run time. */ 2779 switch (r_type) 2780 { 2781 case R_X86_64_GOT32: 2782 case R_X86_64_GOT64: 2783 /* Relocation is to the entry for this symbol in the global 2784 offset table. */ 2785 case R_X86_64_GOTPCREL: 2786 case R_X86_64_GOTPCRELX: 2787 case R_X86_64_REX_GOTPCRELX: 2788 case R_X86_64_GOTPCREL64: 2789 /* Use global offset table entry as symbol value. */ 2790 case R_X86_64_GOTPLT64: 2791 /* This is obsolete and treated the same as GOT64. */ 2792 base_got = htab->elf.sgot; 2793 2794 if (htab->elf.sgot == NULL) 2795 abort (); 2796 2797 relative_reloc = FALSE; 2798 if (h != NULL) 2799 { 2800 off = h->got.offset; 2801 if (h->needs_plt 2802 && h->plt.offset != (bfd_vma)-1 2803 && off == (bfd_vma)-1) 2804 { 2805 /* We can't use h->got.offset here to save 2806 state, or even just remember the offset, as 2807 finish_dynamic_symbol would use that as offset into 2808 .got. */ 2809 bfd_vma plt_index = (h->plt.offset / plt_entry_size 2810 - htab->plt.has_plt0); 2811 off = (plt_index + 3) * GOT_ENTRY_SIZE; 2812 base_got = htab->elf.sgotplt; 2813 } 2814 2815 if (RESOLVED_LOCALLY_P (info, h, htab)) 2816 { 2817 /* We must initialize this entry in the global offset 2818 table. Since the offset must always be a multiple 2819 of 8, we use the least significant bit to record 2820 whether we have initialized it already. 2821 2822 When doing a dynamic link, we create a .rela.got 2823 relocation entry to initialize the value. This is 2824 done in the finish_dynamic_symbol routine. */ 2825 if ((off & 1) != 0) 2826 off &= ~1; 2827 else 2828 { 2829 bfd_put_64 (output_bfd, relocation, 2830 base_got->contents + off); 2831 /* Note that this is harmless for the GOTPLT64 case, 2832 as -1 | 1 still is -1. */ 2833 h->got.offset |= 1; 2834 2835 if (GENERATE_RELATIVE_RELOC_P (info, h)) 2836 { 2837 /* If this symbol isn't dynamic in PIC, 2838 generate R_X86_64_RELATIVE here. */ 2839 eh->no_finish_dynamic_symbol = 1; 2840 relative_reloc = TRUE; 2841 } 2842 } 2843 } 2844 else 2845 unresolved_reloc = FALSE; 2846 } 2847 else 2848 { 2849 if (local_got_offsets == NULL) 2850 abort (); 2851 2852 off = local_got_offsets[r_symndx]; 2853 2854 /* The offset must always be a multiple of 8. We use 2855 the least significant bit to record whether we have 2856 already generated the necessary reloc. */ 2857 if ((off & 1) != 0) 2858 off &= ~1; 2859 else 2860 { 2861 bfd_put_64 (output_bfd, relocation, 2862 base_got->contents + off); 2863 local_got_offsets[r_symndx] |= 1; 2864 2865 if (bfd_link_pic (info)) 2866 relative_reloc = TRUE; 2867 } 2868 } 2869 2870 if (relative_reloc) 2871 { 2872 asection *s; 2873 Elf_Internal_Rela outrel; 2874 2875 /* We need to generate a R_X86_64_RELATIVE reloc 2876 for the dynamic linker. */ 2877 s = htab->elf.srelgot; 2878 if (s == NULL) 2879 abort (); 2880 2881 outrel.r_offset = (base_got->output_section->vma 2882 + base_got->output_offset 2883 + off); 2884 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); 2885 outrel.r_addend = relocation; 2886 elf_append_rela (output_bfd, s, &outrel); 2887 } 2888 2889 if (off >= (bfd_vma) -2) 2890 abort (); 2891 2892 relocation = base_got->output_section->vma 2893 + base_got->output_offset + off; 2894 if (r_type != R_X86_64_GOTPCREL 2895 && r_type != R_X86_64_GOTPCRELX 2896 && r_type != R_X86_64_REX_GOTPCRELX 2897 && r_type != R_X86_64_GOTPCREL64) 2898 relocation -= htab->elf.sgotplt->output_section->vma 2899 - htab->elf.sgotplt->output_offset; 2900 2901 break; 2902 2903 case R_X86_64_GOTOFF64: 2904 /* Relocation is relative to the start of the global offset 2905 table. */ 2906 2907 /* Check to make sure it isn't a protected function or data 2908 symbol for shared library since it may not be local when 2909 used as function address or with copy relocation. We also 2910 need to make sure that a symbol is referenced locally. */ 2911 if (bfd_link_pic (info) && h) 2912 { 2913 if (!h->def_regular) 2914 { 2915 const char *v; 2916 2917 switch (ELF_ST_VISIBILITY (h->other)) 2918 { 2919 case STV_HIDDEN: 2920 v = _("hidden symbol"); 2921 break; 2922 case STV_INTERNAL: 2923 v = _("internal symbol"); 2924 break; 2925 case STV_PROTECTED: 2926 v = _("protected symbol"); 2927 break; 2928 default: 2929 v = _("symbol"); 2930 break; 2931 } 2932 2933 _bfd_error_handler 2934 /* xgettext:c-format */ 2935 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s" 2936 " `%s' can not be used when making a shared object"), 2937 input_bfd, v, h->root.root.string); 2938 bfd_set_error (bfd_error_bad_value); 2939 return FALSE; 2940 } 2941 else if (!bfd_link_executable (info) 2942 && !SYMBOL_REFERENCES_LOCAL_P (info, h) 2943 && (h->type == STT_FUNC 2944 || h->type == STT_OBJECT) 2945 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) 2946 { 2947 _bfd_error_handler 2948 /* xgettext:c-format */ 2949 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s" 2950 " `%s' can not be used when making a shared object"), 2951 input_bfd, 2952 h->type == STT_FUNC ? "function" : "data", 2953 h->root.root.string); 2954 bfd_set_error (bfd_error_bad_value); 2955 return FALSE; 2956 } 2957 } 2958 2959 /* Note that sgot is not involved in this 2960 calculation. We always want the start of .got.plt. If we 2961 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is 2962 permitted by the ABI, we might have to change this 2963 calculation. */ 2964 relocation -= htab->elf.sgotplt->output_section->vma 2965 + htab->elf.sgotplt->output_offset; 2966 break; 2967 2968 case R_X86_64_GOTPC32: 2969 case R_X86_64_GOTPC64: 2970 /* Use global offset table as symbol value. */ 2971 relocation = htab->elf.sgotplt->output_section->vma 2972 + htab->elf.sgotplt->output_offset; 2973 unresolved_reloc = FALSE; 2974 break; 2975 2976 case R_X86_64_PLTOFF64: 2977 /* Relocation is PLT entry relative to GOT. For local 2978 symbols it's the symbol itself relative to GOT. */ 2979 if (h != NULL 2980 /* See PLT32 handling. */ 2981 && (h->plt.offset != (bfd_vma) -1 2982 || eh->plt_got.offset != (bfd_vma) -1) 2983 && htab->elf.splt != NULL) 2984 { 2985 if (eh->plt_got.offset != (bfd_vma) -1) 2986 { 2987 /* Use the GOT PLT. */ 2988 resolved_plt = htab->plt_got; 2989 plt_offset = eh->plt_got.offset; 2990 } 2991 else if (htab->plt_second != NULL) 2992 { 2993 resolved_plt = htab->plt_second; 2994 plt_offset = eh->plt_second.offset; 2995 } 2996 else 2997 { 2998 resolved_plt = htab->elf.splt; 2999 plt_offset = h->plt.offset; 3000 } 3001 3002 relocation = (resolved_plt->output_section->vma 3003 + resolved_plt->output_offset 3004 + plt_offset); 3005 unresolved_reloc = FALSE; 3006 } 3007 3008 relocation -= htab->elf.sgotplt->output_section->vma 3009 + htab->elf.sgotplt->output_offset; 3010 break; 3011 3012 case R_X86_64_PLT32: 3013 case R_X86_64_PLT32_BND: 3014 /* Relocation is to the entry for this symbol in the 3015 procedure linkage table. */ 3016 3017 /* Resolve a PLT32 reloc against a local symbol directly, 3018 without using the procedure linkage table. */ 3019 if (h == NULL) 3020 break; 3021 3022 if ((h->plt.offset == (bfd_vma) -1 3023 && eh->plt_got.offset == (bfd_vma) -1) 3024 || htab->elf.splt == NULL) 3025 { 3026 /* We didn't make a PLT entry for this symbol. This 3027 happens when statically linking PIC code, or when 3028 using -Bsymbolic. */ 3029 break; 3030 } 3031 3032 use_plt: 3033 if (h->plt.offset != (bfd_vma) -1) 3034 { 3035 if (htab->plt_second != NULL) 3036 { 3037 resolved_plt = htab->plt_second; 3038 plt_offset = eh->plt_second.offset; 3039 } 3040 else 3041 { 3042 resolved_plt = htab->elf.splt; 3043 plt_offset = h->plt.offset; 3044 } 3045 } 3046 else 3047 { 3048 /* Use the GOT PLT. */ 3049 resolved_plt = htab->plt_got; 3050 plt_offset = eh->plt_got.offset; 3051 } 3052 3053 relocation = (resolved_plt->output_section->vma 3054 + resolved_plt->output_offset 3055 + plt_offset); 3056 unresolved_reloc = FALSE; 3057 break; 3058 3059 case R_X86_64_SIZE32: 3060 case R_X86_64_SIZE64: 3061 /* Set to symbol size. */ 3062 relocation = st_size; 3063 goto direct; 3064 3065 case R_X86_64_PC8: 3066 case R_X86_64_PC16: 3067 case R_X86_64_PC32: 3068 case R_X86_64_PC32_BND: 3069 /* Don't complain about -fPIC if the symbol is undefined when 3070 building executable unless it is unresolved weak symbol, 3071 references a dynamic definition in PIE or -z nocopyreloc 3072 is used. */ 3073 if ((input_section->flags & SEC_ALLOC) != 0 3074 && (input_section->flags & SEC_READONLY) != 0 3075 && h != NULL 3076 && ((bfd_link_executable (info) 3077 && ((h->root.type == bfd_link_hash_undefweak 3078 && !resolved_to_zero) 3079 || (bfd_link_pie (info) 3080 && !h->def_regular 3081 && h->def_dynamic) 3082 || ((info->nocopyreloc 3083 || (eh->def_protected 3084 && elf_has_no_copy_on_protected (h->root.u.def.section->owner))) 3085 && h->def_dynamic 3086 && !(h->root.u.def.section->flags & SEC_CODE)))) 3087 || bfd_link_dll (info))) 3088 { 3089 bfd_boolean fail = FALSE; 3090 if (SYMBOL_REFERENCES_LOCAL_P (info, h)) 3091 { 3092 /* Symbol is referenced locally. Make sure it is 3093 defined locally. */ 3094 fail = !(h->def_regular || ELF_COMMON_DEF_P (h)); 3095 } 3096 else if (!(bfd_link_pie (info) 3097 && (h->needs_copy || eh->needs_copy))) 3098 { 3099 /* Symbol doesn't need copy reloc and isn't referenced 3100 locally. Address of protected function may not be 3101 reachable at run-time. */ 3102 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 3103 || (ELF_ST_VISIBILITY (h->other) == STV_PROTECTED 3104 && h->type == STT_FUNC)); 3105 } 3106 3107 if (fail) 3108 return elf_x86_64_need_pic (info, input_bfd, input_section, 3109 h, NULL, NULL, howto); 3110 } 3111 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE 3112 as function address. */ 3113 else if (h != NULL 3114 && (input_section->flags & SEC_CODE) == 0 3115 && bfd_link_pie (info) 3116 && h->type == STT_FUNC 3117 && !h->def_regular 3118 && h->def_dynamic) 3119 goto use_plt; 3120 /* Fall through. */ 3121 3122 case R_X86_64_8: 3123 case R_X86_64_16: 3124 case R_X86_64_32: 3125 case R_X86_64_PC64: 3126 case R_X86_64_64: 3127 /* FIXME: The ABI says the linker should make sure the value is 3128 the same when it's zeroextended to 64 bit. */ 3129 3130 direct: 3131 if ((input_section->flags & SEC_ALLOC) == 0) 3132 break; 3133 3134 need_copy_reloc_in_pie = (bfd_link_pie (info) 3135 && h != NULL 3136 && (h->needs_copy 3137 || eh->needs_copy 3138 || (h->root.type 3139 == bfd_link_hash_undefined)) 3140 && (X86_PCREL_TYPE_P (r_type) 3141 || X86_SIZE_TYPE_P (r_type))); 3142 3143 if (GENERATE_DYNAMIC_RELOCATION_P (info, eh, r_type, 3144 need_copy_reloc_in_pie, 3145 resolved_to_zero, FALSE)) 3146 { 3147 Elf_Internal_Rela outrel; 3148 bfd_boolean skip, relocate; 3149 asection *sreloc; 3150 3151 /* When generating a shared object, these relocations 3152 are copied into the output file to be resolved at run 3153 time. */ 3154 skip = FALSE; 3155 relocate = FALSE; 3156 3157 outrel.r_offset = 3158 _bfd_elf_section_offset (output_bfd, info, input_section, 3159 rel->r_offset); 3160 if (outrel.r_offset == (bfd_vma) -1) 3161 skip = TRUE; 3162 else if (outrel.r_offset == (bfd_vma) -2) 3163 skip = TRUE, relocate = TRUE; 3164 3165 outrel.r_offset += (input_section->output_section->vma 3166 + input_section->output_offset); 3167 3168 if (skip) 3169 memset (&outrel, 0, sizeof outrel); 3170 3171 else if (COPY_INPUT_RELOC_P (info, h, r_type)) 3172 { 3173 outrel.r_info = htab->r_info (h->dynindx, r_type); 3174 outrel.r_addend = rel->r_addend; 3175 } 3176 else 3177 { 3178 /* This symbol is local, or marked to become local. 3179 When relocation overflow check is disabled, we 3180 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */ 3181 if (r_type == htab->pointer_r_type 3182 || (r_type == R_X86_64_32 3183 && info->no_reloc_overflow_check)) 3184 { 3185 relocate = TRUE; 3186 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); 3187 outrel.r_addend = relocation + rel->r_addend; 3188 } 3189 else if (r_type == R_X86_64_64 3190 && !ABI_64_P (output_bfd)) 3191 { 3192 relocate = TRUE; 3193 outrel.r_info = htab->r_info (0, 3194 R_X86_64_RELATIVE64); 3195 outrel.r_addend = relocation + rel->r_addend; 3196 /* Check addend overflow. */ 3197 if ((outrel.r_addend & 0x80000000) 3198 != (rel->r_addend & 0x80000000)) 3199 { 3200 const char *name; 3201 int addend = rel->r_addend; 3202 if (h && h->root.root.string) 3203 name = h->root.root.string; 3204 else 3205 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 3206 sym, NULL); 3207 _bfd_error_handler 3208 /* xgettext:c-format */ 3209 (_("%pB: addend %s%#x in relocation %s against " 3210 "symbol `%s' at %#" PRIx64 3211 " in section `%pA' is out of range"), 3212 input_bfd, addend < 0 ? "-" : "", addend, 3213 howto->name, name, (uint64_t) rel->r_offset, 3214 input_section); 3215 bfd_set_error (bfd_error_bad_value); 3216 return FALSE; 3217 } 3218 } 3219 else 3220 { 3221 long sindx; 3222 3223 if (bfd_is_abs_section (sec)) 3224 sindx = 0; 3225 else if (sec == NULL || sec->owner == NULL) 3226 { 3227 bfd_set_error (bfd_error_bad_value); 3228 return FALSE; 3229 } 3230 else 3231 { 3232 asection *osec; 3233 3234 /* We are turning this relocation into one 3235 against a section symbol. It would be 3236 proper to subtract the symbol's value, 3237 osec->vma, from the emitted reloc addend, 3238 but ld.so expects buggy relocs. */ 3239 osec = sec->output_section; 3240 sindx = elf_section_data (osec)->dynindx; 3241 if (sindx == 0) 3242 { 3243 asection *oi = htab->elf.text_index_section; 3244 sindx = elf_section_data (oi)->dynindx; 3245 } 3246 BFD_ASSERT (sindx != 0); 3247 } 3248 3249 outrel.r_info = htab->r_info (sindx, r_type); 3250 outrel.r_addend = relocation + rel->r_addend; 3251 } 3252 } 3253 3254 sreloc = elf_section_data (input_section)->sreloc; 3255 3256 if (sreloc == NULL || sreloc->contents == NULL) 3257 { 3258 r = bfd_reloc_notsupported; 3259 goto check_relocation_error; 3260 } 3261 3262 elf_append_rela (output_bfd, sreloc, &outrel); 3263 3264 /* If this reloc is against an external symbol, we do 3265 not want to fiddle with the addend. Otherwise, we 3266 need to include the symbol value so that it becomes 3267 an addend for the dynamic reloc. */ 3268 if (! relocate) 3269 continue; 3270 } 3271 3272 break; 3273 3274 case R_X86_64_TLSGD: 3275 case R_X86_64_GOTPC32_TLSDESC: 3276 case R_X86_64_TLSDESC_CALL: 3277 case R_X86_64_GOTTPOFF: 3278 tls_type = GOT_UNKNOWN; 3279 if (h == NULL && local_got_offsets) 3280 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx]; 3281 else if (h != NULL) 3282 tls_type = elf_x86_hash_entry (h)->tls_type; 3283 3284 r_type_tls = r_type; 3285 if (! elf_x86_64_tls_transition (info, input_bfd, 3286 input_section, contents, 3287 symtab_hdr, sym_hashes, 3288 &r_type_tls, tls_type, rel, 3289 relend, h, r_symndx, TRUE)) 3290 return FALSE; 3291 3292 if (r_type_tls == R_X86_64_TPOFF32) 3293 { 3294 bfd_vma roff = rel->r_offset; 3295 3296 BFD_ASSERT (! unresolved_reloc); 3297 3298 if (r_type == R_X86_64_TLSGD) 3299 { 3300 /* GD->LE transition. For 64bit, change 3301 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 3302 .word 0x6666; rex64; call __tls_get_addr@PLT 3303 or 3304 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 3305 .byte 0x66; rex64 3306 call *__tls_get_addr@GOTPCREL(%rip) 3307 which may be converted to 3308 addr32 call __tls_get_addr 3309 into: 3310 movq %fs:0, %rax 3311 leaq foo@tpoff(%rax), %rax 3312 For 32bit, change 3313 leaq foo@tlsgd(%rip), %rdi 3314 .word 0x6666; rex64; call __tls_get_addr@PLT 3315 or 3316 leaq foo@tlsgd(%rip), %rdi 3317 .byte 0x66; rex64 3318 call *__tls_get_addr@GOTPCREL(%rip) 3319 which may be converted to 3320 addr32 call __tls_get_addr 3321 into: 3322 movl %fs:0, %eax 3323 leaq foo@tpoff(%rax), %rax 3324 For largepic, change: 3325 leaq foo@tlsgd(%rip), %rdi 3326 movabsq $__tls_get_addr@pltoff, %rax 3327 addq %r15, %rax 3328 call *%rax 3329 into: 3330 movq %fs:0, %rax 3331 leaq foo@tpoff(%rax), %rax 3332 nopw 0x0(%rax,%rax,1) */ 3333 int largepic = 0; 3334 if (ABI_64_P (output_bfd)) 3335 { 3336 if (contents[roff + 5] == 0xb8) 3337 { 3338 memcpy (contents + roff - 3, 3339 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" 3340 "\0\0\0\0\x66\x0f\x1f\x44\0", 22); 3341 largepic = 1; 3342 } 3343 else 3344 memcpy (contents + roff - 4, 3345 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 3346 16); 3347 } 3348 else 3349 memcpy (contents + roff - 3, 3350 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 3351 15); 3352 bfd_put_32 (output_bfd, 3353 elf_x86_64_tpoff (info, relocation), 3354 contents + roff + 8 + largepic); 3355 /* Skip R_X86_64_PC32, R_X86_64_PLT32, 3356 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */ 3357 rel++; 3358 wrel++; 3359 continue; 3360 } 3361 else if (r_type == R_X86_64_GOTPC32_TLSDESC) 3362 { 3363 /* GDesc -> LE transition. 3364 It's originally something like: 3365 leaq x@tlsdesc(%rip), %rax 3366 3367 Change it to: 3368 movl $x@tpoff, %rax. */ 3369 3370 unsigned int val, type; 3371 3372 type = bfd_get_8 (input_bfd, contents + roff - 3); 3373 val = bfd_get_8 (input_bfd, contents + roff - 1); 3374 bfd_put_8 (output_bfd, 0x48 | ((type >> 2) & 1), 3375 contents + roff - 3); 3376 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2); 3377 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7), 3378 contents + roff - 1); 3379 bfd_put_32 (output_bfd, 3380 elf_x86_64_tpoff (info, relocation), 3381 contents + roff); 3382 continue; 3383 } 3384 else if (r_type == R_X86_64_TLSDESC_CALL) 3385 { 3386 /* GDesc -> LE transition. 3387 It's originally: 3388 call *(%rax) 3389 Turn it into: 3390 xchg %ax,%ax. */ 3391 bfd_put_8 (output_bfd, 0x66, contents + roff); 3392 bfd_put_8 (output_bfd, 0x90, contents + roff + 1); 3393 continue; 3394 } 3395 else if (r_type == R_X86_64_GOTTPOFF) 3396 { 3397 /* IE->LE transition: 3398 For 64bit, originally it can be one of: 3399 movq foo@gottpoff(%rip), %reg 3400 addq foo@gottpoff(%rip), %reg 3401 We change it into: 3402 movq $foo, %reg 3403 leaq foo(%reg), %reg 3404 addq $foo, %reg. 3405 For 32bit, originally it can be one of: 3406 movq foo@gottpoff(%rip), %reg 3407 addl foo@gottpoff(%rip), %reg 3408 We change it into: 3409 movq $foo, %reg 3410 leal foo(%reg), %reg 3411 addl $foo, %reg. */ 3412 3413 unsigned int val, type, reg; 3414 3415 if (roff >= 3) 3416 val = bfd_get_8 (input_bfd, contents + roff - 3); 3417 else 3418 val = 0; 3419 type = bfd_get_8 (input_bfd, contents + roff - 2); 3420 reg = bfd_get_8 (input_bfd, contents + roff - 1); 3421 reg >>= 3; 3422 if (type == 0x8b) 3423 { 3424 /* movq */ 3425 if (val == 0x4c) 3426 bfd_put_8 (output_bfd, 0x49, 3427 contents + roff - 3); 3428 else if (!ABI_64_P (output_bfd) && val == 0x44) 3429 bfd_put_8 (output_bfd, 0x41, 3430 contents + roff - 3); 3431 bfd_put_8 (output_bfd, 0xc7, 3432 contents + roff - 2); 3433 bfd_put_8 (output_bfd, 0xc0 | reg, 3434 contents + roff - 1); 3435 } 3436 else if (reg == 4) 3437 { 3438 /* addq/addl -> addq/addl - addressing with %rsp/%r12 3439 is special */ 3440 if (val == 0x4c) 3441 bfd_put_8 (output_bfd, 0x49, 3442 contents + roff - 3); 3443 else if (!ABI_64_P (output_bfd) && val == 0x44) 3444 bfd_put_8 (output_bfd, 0x41, 3445 contents + roff - 3); 3446 bfd_put_8 (output_bfd, 0x81, 3447 contents + roff - 2); 3448 bfd_put_8 (output_bfd, 0xc0 | reg, 3449 contents + roff - 1); 3450 } 3451 else 3452 { 3453 /* addq/addl -> leaq/leal */ 3454 if (val == 0x4c) 3455 bfd_put_8 (output_bfd, 0x4d, 3456 contents + roff - 3); 3457 else if (!ABI_64_P (output_bfd) && val == 0x44) 3458 bfd_put_8 (output_bfd, 0x45, 3459 contents + roff - 3); 3460 bfd_put_8 (output_bfd, 0x8d, 3461 contents + roff - 2); 3462 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3), 3463 contents + roff - 1); 3464 } 3465 bfd_put_32 (output_bfd, 3466 elf_x86_64_tpoff (info, relocation), 3467 contents + roff); 3468 continue; 3469 } 3470 else 3471 BFD_ASSERT (FALSE); 3472 } 3473 3474 if (htab->elf.sgot == NULL) 3475 abort (); 3476 3477 if (h != NULL) 3478 { 3479 off = h->got.offset; 3480 offplt = elf_x86_hash_entry (h)->tlsdesc_got; 3481 } 3482 else 3483 { 3484 if (local_got_offsets == NULL) 3485 abort (); 3486 3487 off = local_got_offsets[r_symndx]; 3488 offplt = local_tlsdesc_gotents[r_symndx]; 3489 } 3490 3491 if ((off & 1) != 0) 3492 off &= ~1; 3493 else 3494 { 3495 Elf_Internal_Rela outrel; 3496 int dr_type, indx; 3497 asection *sreloc; 3498 3499 if (htab->elf.srelgot == NULL) 3500 abort (); 3501 3502 indx = h && h->dynindx != -1 ? h->dynindx : 0; 3503 3504 if (GOT_TLS_GDESC_P (tls_type)) 3505 { 3506 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC); 3507 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt 3508 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size); 3509 outrel.r_offset = (htab->elf.sgotplt->output_section->vma 3510 + htab->elf.sgotplt->output_offset 3511 + offplt 3512 + htab->sgotplt_jump_table_size); 3513 sreloc = htab->elf.srelplt; 3514 if (indx == 0) 3515 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); 3516 else 3517 outrel.r_addend = 0; 3518 elf_append_rela (output_bfd, sreloc, &outrel); 3519 } 3520 3521 sreloc = htab->elf.srelgot; 3522 3523 outrel.r_offset = (htab->elf.sgot->output_section->vma 3524 + htab->elf.sgot->output_offset + off); 3525 3526 if (GOT_TLS_GD_P (tls_type)) 3527 dr_type = R_X86_64_DTPMOD64; 3528 else if (GOT_TLS_GDESC_P (tls_type)) 3529 goto dr_done; 3530 else 3531 dr_type = R_X86_64_TPOFF64; 3532 3533 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off); 3534 outrel.r_addend = 0; 3535 if ((dr_type == R_X86_64_TPOFF64 3536 || dr_type == R_X86_64_TLSDESC) && indx == 0) 3537 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); 3538 outrel.r_info = htab->r_info (indx, dr_type); 3539 3540 elf_append_rela (output_bfd, sreloc, &outrel); 3541 3542 if (GOT_TLS_GD_P (tls_type)) 3543 { 3544 if (indx == 0) 3545 { 3546 BFD_ASSERT (! unresolved_reloc); 3547 bfd_put_64 (output_bfd, 3548 relocation - _bfd_x86_elf_dtpoff_base (info), 3549 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 3550 } 3551 else 3552 { 3553 bfd_put_64 (output_bfd, 0, 3554 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 3555 outrel.r_info = htab->r_info (indx, 3556 R_X86_64_DTPOFF64); 3557 outrel.r_offset += GOT_ENTRY_SIZE; 3558 elf_append_rela (output_bfd, sreloc, 3559 &outrel); 3560 } 3561 } 3562 3563 dr_done: 3564 if (h != NULL) 3565 h->got.offset |= 1; 3566 else 3567 local_got_offsets[r_symndx] |= 1; 3568 } 3569 3570 if (off >= (bfd_vma) -2 3571 && ! GOT_TLS_GDESC_P (tls_type)) 3572 abort (); 3573 if (r_type_tls == r_type) 3574 { 3575 if (r_type == R_X86_64_GOTPC32_TLSDESC 3576 || r_type == R_X86_64_TLSDESC_CALL) 3577 relocation = htab->elf.sgotplt->output_section->vma 3578 + htab->elf.sgotplt->output_offset 3579 + offplt + htab->sgotplt_jump_table_size; 3580 else 3581 relocation = htab->elf.sgot->output_section->vma 3582 + htab->elf.sgot->output_offset + off; 3583 unresolved_reloc = FALSE; 3584 } 3585 else 3586 { 3587 bfd_vma roff = rel->r_offset; 3588 3589 if (r_type == R_X86_64_TLSGD) 3590 { 3591 /* GD->IE transition. For 64bit, change 3592 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 3593 .word 0x6666; rex64; call __tls_get_addr@PLT 3594 or 3595 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 3596 .byte 0x66; rex64 3597 call *__tls_get_addr@GOTPCREL(%rip 3598 which may be converted to 3599 addr32 call __tls_get_addr 3600 into: 3601 movq %fs:0, %rax 3602 addq foo@gottpoff(%rip), %rax 3603 For 32bit, change 3604 leaq foo@tlsgd(%rip), %rdi 3605 .word 0x6666; rex64; call __tls_get_addr@PLT 3606 or 3607 leaq foo@tlsgd(%rip), %rdi 3608 .byte 0x66; rex64; 3609 call *__tls_get_addr@GOTPCREL(%rip) 3610 which may be converted to 3611 addr32 call __tls_get_addr 3612 into: 3613 movl %fs:0, %eax 3614 addq foo@gottpoff(%rip), %rax 3615 For largepic, change: 3616 leaq foo@tlsgd(%rip), %rdi 3617 movabsq $__tls_get_addr@pltoff, %rax 3618 addq %r15, %rax 3619 call *%rax 3620 into: 3621 movq %fs:0, %rax 3622 addq foo@gottpoff(%rax), %rax 3623 nopw 0x0(%rax,%rax,1) */ 3624 int largepic = 0; 3625 if (ABI_64_P (output_bfd)) 3626 { 3627 if (contents[roff + 5] == 0xb8) 3628 { 3629 memcpy (contents + roff - 3, 3630 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" 3631 "\0\0\0\0\x66\x0f\x1f\x44\0", 22); 3632 largepic = 1; 3633 } 3634 else 3635 memcpy (contents + roff - 4, 3636 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 3637 16); 3638 } 3639 else 3640 memcpy (contents + roff - 3, 3641 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 3642 15); 3643 3644 relocation = (htab->elf.sgot->output_section->vma 3645 + htab->elf.sgot->output_offset + off 3646 - roff 3647 - largepic 3648 - input_section->output_section->vma 3649 - input_section->output_offset 3650 - 12); 3651 bfd_put_32 (output_bfd, relocation, 3652 contents + roff + 8 + largepic); 3653 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */ 3654 rel++; 3655 wrel++; 3656 continue; 3657 } 3658 else if (r_type == R_X86_64_GOTPC32_TLSDESC) 3659 { 3660 /* GDesc -> IE transition. 3661 It's originally something like: 3662 leaq x@tlsdesc(%rip), %rax 3663 3664 Change it to: 3665 movq x@gottpoff(%rip), %rax # before xchg %ax,%ax. */ 3666 3667 /* Now modify the instruction as appropriate. To 3668 turn a leaq into a movq in the form we use it, it 3669 suffices to change the second byte from 0x8d to 3670 0x8b. */ 3671 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2); 3672 3673 bfd_put_32 (output_bfd, 3674 htab->elf.sgot->output_section->vma 3675 + htab->elf.sgot->output_offset + off 3676 - rel->r_offset 3677 - input_section->output_section->vma 3678 - input_section->output_offset 3679 - 4, 3680 contents + roff); 3681 continue; 3682 } 3683 else if (r_type == R_X86_64_TLSDESC_CALL) 3684 { 3685 /* GDesc -> IE transition. 3686 It's originally: 3687 call *(%rax) 3688 3689 Change it to: 3690 xchg %ax, %ax. */ 3691 3692 bfd_put_8 (output_bfd, 0x66, contents + roff); 3693 bfd_put_8 (output_bfd, 0x90, contents + roff + 1); 3694 continue; 3695 } 3696 else 3697 BFD_ASSERT (FALSE); 3698 } 3699 break; 3700 3701 case R_X86_64_TLSLD: 3702 if (! elf_x86_64_tls_transition (info, input_bfd, 3703 input_section, contents, 3704 symtab_hdr, sym_hashes, 3705 &r_type, GOT_UNKNOWN, rel, 3706 relend, h, r_symndx, TRUE)) 3707 return FALSE; 3708 3709 if (r_type != R_X86_64_TLSLD) 3710 { 3711 /* LD->LE transition: 3712 leaq foo@tlsld(%rip), %rdi 3713 call __tls_get_addr@PLT 3714 For 64bit, we change it into: 3715 .word 0x6666; .byte 0x66; movq %fs:0, %rax 3716 For 32bit, we change it into: 3717 nopl 0x0(%rax); movl %fs:0, %eax 3718 Or 3719 leaq foo@tlsld(%rip), %rdi; 3720 call *__tls_get_addr@GOTPCREL(%rip) 3721 which may be converted to 3722 addr32 call __tls_get_addr 3723 For 64bit, we change it into: 3724 .word 0x6666; .word 0x6666; movq %fs:0, %rax 3725 For 32bit, we change it into: 3726 nopw 0x0(%rax); movl %fs:0, %eax 3727 For largepic, change: 3728 leaq foo@tlsgd(%rip), %rdi 3729 movabsq $__tls_get_addr@pltoff, %rax 3730 addq %rbx, %rax 3731 call *%rax 3732 into 3733 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1) 3734 movq %fs:0, %eax */ 3735 3736 BFD_ASSERT (r_type == R_X86_64_TPOFF32); 3737 if (ABI_64_P (output_bfd)) 3738 { 3739 if (contents[rel->r_offset + 5] == 0xb8) 3740 memcpy (contents + rel->r_offset - 3, 3741 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" 3742 "\x64\x48\x8b\x04\x25\0\0\0", 22); 3743 else if (contents[rel->r_offset + 4] == 0xff 3744 || contents[rel->r_offset + 4] == 0x67) 3745 memcpy (contents + rel->r_offset - 3, 3746 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 3747 13); 3748 else 3749 memcpy (contents + rel->r_offset - 3, 3750 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); 3751 } 3752 else 3753 { 3754 if (contents[rel->r_offset + 4] == 0xff) 3755 memcpy (contents + rel->r_offset - 3, 3756 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 3757 13); 3758 else 3759 memcpy (contents + rel->r_offset - 3, 3760 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); 3761 } 3762 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX 3763 and R_X86_64_PLTOFF64. */ 3764 rel++; 3765 wrel++; 3766 continue; 3767 } 3768 3769 if (htab->elf.sgot == NULL) 3770 abort (); 3771 3772 off = htab->tls_ld_or_ldm_got.offset; 3773 if (off & 1) 3774 off &= ~1; 3775 else 3776 { 3777 Elf_Internal_Rela outrel; 3778 3779 if (htab->elf.srelgot == NULL) 3780 abort (); 3781 3782 outrel.r_offset = (htab->elf.sgot->output_section->vma 3783 + htab->elf.sgot->output_offset + off); 3784 3785 bfd_put_64 (output_bfd, 0, 3786 htab->elf.sgot->contents + off); 3787 bfd_put_64 (output_bfd, 0, 3788 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 3789 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64); 3790 outrel.r_addend = 0; 3791 elf_append_rela (output_bfd, htab->elf.srelgot, 3792 &outrel); 3793 htab->tls_ld_or_ldm_got.offset |= 1; 3794 } 3795 relocation = htab->elf.sgot->output_section->vma 3796 + htab->elf.sgot->output_offset + off; 3797 unresolved_reloc = FALSE; 3798 break; 3799 3800 case R_X86_64_DTPOFF32: 3801 if (!bfd_link_executable (info) 3802 || (input_section->flags & SEC_CODE) == 0) 3803 relocation -= _bfd_x86_elf_dtpoff_base (info); 3804 else 3805 relocation = elf_x86_64_tpoff (info, relocation); 3806 break; 3807 3808 case R_X86_64_TPOFF32: 3809 case R_X86_64_TPOFF64: 3810 BFD_ASSERT (bfd_link_executable (info)); 3811 relocation = elf_x86_64_tpoff (info, relocation); 3812 break; 3813 3814 case R_X86_64_DTPOFF64: 3815 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0); 3816 relocation -= _bfd_x86_elf_dtpoff_base (info); 3817 break; 3818 3819 default: 3820 break; 3821 } 3822 3823 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 3824 because such sections are not SEC_ALLOC and thus ld.so will 3825 not process them. */ 3826 if (unresolved_reloc 3827 && !((input_section->flags & SEC_DEBUGGING) != 0 3828 && h->def_dynamic) 3829 && _bfd_elf_section_offset (output_bfd, info, input_section, 3830 rel->r_offset) != (bfd_vma) -1) 3831 { 3832 switch (r_type) 3833 { 3834 case R_X86_64_32S: 3835 sec = h->root.u.def.section; 3836 if ((info->nocopyreloc 3837 || (eh->def_protected 3838 && elf_has_no_copy_on_protected (h->root.u.def.section->owner))) 3839 && !(h->root.u.def.section->flags & SEC_CODE)) 3840 return elf_x86_64_need_pic (info, input_bfd, input_section, 3841 h, NULL, NULL, howto); 3842 /* Fall through. */ 3843 3844 default: 3845 _bfd_error_handler 3846 /* xgettext:c-format */ 3847 (_("%pB(%pA+%#" PRIx64 "): " 3848 "unresolvable %s relocation against symbol `%s'"), 3849 input_bfd, 3850 input_section, 3851 (uint64_t) rel->r_offset, 3852 howto->name, 3853 h->root.root.string); 3854 return FALSE; 3855 } 3856 } 3857 3858 do_relocation: 3859 r = _bfd_final_link_relocate (howto, input_bfd, input_section, 3860 contents, rel->r_offset, 3861 relocation, rel->r_addend); 3862 3863 check_relocation_error: 3864 if (r != bfd_reloc_ok) 3865 { 3866 const char *name; 3867 3868 if (h != NULL) 3869 name = h->root.root.string; 3870 else 3871 { 3872 name = bfd_elf_string_from_elf_section (input_bfd, 3873 symtab_hdr->sh_link, 3874 sym->st_name); 3875 if (name == NULL) 3876 return FALSE; 3877 if (*name == '\0') 3878 name = bfd_section_name (input_bfd, sec); 3879 } 3880 3881 if (r == bfd_reloc_overflow) 3882 { 3883 if (converted_reloc) 3884 { 3885 info->callbacks->einfo 3886 (_("%F%P: failed to convert GOTPCREL relocation; relink with --no-relax\n")); 3887 return FALSE; 3888 } 3889 (*info->callbacks->reloc_overflow) 3890 (info, (h ? &h->root : NULL), name, howto->name, 3891 (bfd_vma) 0, input_bfd, input_section, rel->r_offset); 3892 } 3893 else 3894 { 3895 _bfd_error_handler 3896 /* xgettext:c-format */ 3897 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"), 3898 input_bfd, input_section, 3899 (uint64_t) rel->r_offset, name, (int) r); 3900 return FALSE; 3901 } 3902 } 3903 3904 if (wrel != rel) 3905 *wrel = *rel; 3906 } 3907 3908 if (wrel != rel) 3909 { 3910 Elf_Internal_Shdr *rel_hdr; 3911 size_t deleted = rel - wrel; 3912 3913 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section); 3914 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted; 3915 if (rel_hdr->sh_size == 0) 3916 { 3917 /* It is too late to remove an empty reloc section. Leave 3918 one NONE reloc. 3919 ??? What is wrong with an empty section??? */ 3920 rel_hdr->sh_size = rel_hdr->sh_entsize; 3921 deleted -= 1; 3922 } 3923 rel_hdr = _bfd_elf_single_rel_hdr (input_section); 3924 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted; 3925 input_section->reloc_count -= deleted; 3926 } 3927 3928 return TRUE; 3929 } 3930 3931 /* Finish up dynamic symbol handling. We set the contents of various 3932 dynamic sections here. */ 3933 3934 static bfd_boolean 3935 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, 3936 struct bfd_link_info *info, 3937 struct elf_link_hash_entry *h, 3938 Elf_Internal_Sym *sym) 3939 { 3940 struct elf_x86_link_hash_table *htab; 3941 bfd_boolean use_plt_second; 3942 struct elf_x86_link_hash_entry *eh; 3943 bfd_boolean local_undefweak; 3944 3945 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 3946 if (htab == NULL) 3947 return FALSE; 3948 3949 /* Use the second PLT section only if there is .plt section. */ 3950 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL; 3951 3952 eh = (struct elf_x86_link_hash_entry *) h; 3953 if (eh->no_finish_dynamic_symbol) 3954 abort (); 3955 3956 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for 3957 resolved undefined weak symbols in executable so that their 3958 references have value 0 at run-time. */ 3959 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh); 3960 3961 if (h->plt.offset != (bfd_vma) -1) 3962 { 3963 bfd_vma plt_index; 3964 bfd_vma got_offset, plt_offset; 3965 Elf_Internal_Rela rela; 3966 bfd_byte *loc; 3967 asection *plt, *gotplt, *relplt, *resolved_plt; 3968 const struct elf_backend_data *bed; 3969 bfd_vma plt_got_pcrel_offset; 3970 3971 /* When building a static executable, use .iplt, .igot.plt and 3972 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 3973 if (htab->elf.splt != NULL) 3974 { 3975 plt = htab->elf.splt; 3976 gotplt = htab->elf.sgotplt; 3977 relplt = htab->elf.srelplt; 3978 } 3979 else 3980 { 3981 plt = htab->elf.iplt; 3982 gotplt = htab->elf.igotplt; 3983 relplt = htab->elf.irelplt; 3984 } 3985 3986 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak) 3987 3988 /* Get the index in the procedure linkage table which 3989 corresponds to this symbol. This is the index of this symbol 3990 in all the symbols for which we are making plt entries. The 3991 first entry in the procedure linkage table is reserved. 3992 3993 Get the offset into the .got table of the entry that 3994 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE 3995 bytes. The first three are reserved for the dynamic linker. 3996 3997 For static executables, we don't reserve anything. */ 3998 3999 if (plt == htab->elf.splt) 4000 { 4001 got_offset = (h->plt.offset / htab->plt.plt_entry_size 4002 - htab->plt.has_plt0); 4003 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE; 4004 } 4005 else 4006 { 4007 got_offset = h->plt.offset / htab->plt.plt_entry_size; 4008 got_offset = got_offset * GOT_ENTRY_SIZE; 4009 } 4010 4011 /* Fill in the entry in the procedure linkage table. */ 4012 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry, 4013 htab->plt.plt_entry_size); 4014 if (use_plt_second) 4015 { 4016 memcpy (htab->plt_second->contents + eh->plt_second.offset, 4017 htab->non_lazy_plt->plt_entry, 4018 htab->non_lazy_plt->plt_entry_size); 4019 4020 resolved_plt = htab->plt_second; 4021 plt_offset = eh->plt_second.offset; 4022 } 4023 else 4024 { 4025 resolved_plt = plt; 4026 plt_offset = h->plt.offset; 4027 } 4028 4029 /* Insert the relocation positions of the plt section. */ 4030 4031 /* Put offset the PC-relative instruction referring to the GOT entry, 4032 subtracting the size of that instruction. */ 4033 plt_got_pcrel_offset = (gotplt->output_section->vma 4034 + gotplt->output_offset 4035 + got_offset 4036 - resolved_plt->output_section->vma 4037 - resolved_plt->output_offset 4038 - plt_offset 4039 - htab->plt.plt_got_insn_size); 4040 4041 /* Check PC-relative offset overflow in PLT entry. */ 4042 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff) 4043 /* xgettext:c-format */ 4044 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"), 4045 output_bfd, h->root.root.string); 4046 4047 bfd_put_32 (output_bfd, plt_got_pcrel_offset, 4048 (resolved_plt->contents + plt_offset 4049 + htab->plt.plt_got_offset)); 4050 4051 /* Fill in the entry in the global offset table, initially this 4052 points to the second part of the PLT entry. Leave the entry 4053 as zero for undefined weak symbol in PIE. No PLT relocation 4054 against undefined weak symbol in PIE. */ 4055 if (!local_undefweak) 4056 { 4057 if (htab->plt.has_plt0) 4058 bfd_put_64 (output_bfd, (plt->output_section->vma 4059 + plt->output_offset 4060 + h->plt.offset 4061 + htab->lazy_plt->plt_lazy_offset), 4062 gotplt->contents + got_offset); 4063 4064 /* Fill in the entry in the .rela.plt section. */ 4065 rela.r_offset = (gotplt->output_section->vma 4066 + gotplt->output_offset 4067 + got_offset); 4068 if (PLT_LOCAL_IFUNC_P (info, h)) 4069 { 4070 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), 4071 h->root.root.string, 4072 h->root.u.def.section->owner); 4073 4074 /* If an STT_GNU_IFUNC symbol is locally defined, generate 4075 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */ 4076 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE); 4077 rela.r_addend = (h->root.u.def.value 4078 + h->root.u.def.section->output_section->vma 4079 + h->root.u.def.section->output_offset); 4080 /* R_X86_64_IRELATIVE comes last. */ 4081 plt_index = htab->next_irelative_index--; 4082 } 4083 else 4084 { 4085 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT); 4086 rela.r_addend = 0; 4087 plt_index = htab->next_jump_slot_index++; 4088 } 4089 4090 /* Don't fill the second and third slots in PLT entry for 4091 static executables nor without PLT0. */ 4092 if (plt == htab->elf.splt && htab->plt.has_plt0) 4093 { 4094 bfd_vma plt0_offset 4095 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end; 4096 4097 /* Put relocation index. */ 4098 bfd_put_32 (output_bfd, plt_index, 4099 (plt->contents + h->plt.offset 4100 + htab->lazy_plt->plt_reloc_offset)); 4101 4102 /* Put offset for jmp .PLT0 and check for overflow. We don't 4103 check relocation index for overflow since branch displacement 4104 will overflow first. */ 4105 if (plt0_offset > 0x80000000) 4106 /* xgettext:c-format */ 4107 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"), 4108 output_bfd, h->root.root.string); 4109 bfd_put_32 (output_bfd, - plt0_offset, 4110 (plt->contents + h->plt.offset 4111 + htab->lazy_plt->plt_plt_offset)); 4112 } 4113 4114 bed = get_elf_backend_data (output_bfd); 4115 loc = relplt->contents + plt_index * bed->s->sizeof_rela; 4116 bed->s->swap_reloca_out (output_bfd, &rela, loc); 4117 } 4118 } 4119 else if (eh->plt_got.offset != (bfd_vma) -1) 4120 { 4121 bfd_vma got_offset, plt_offset; 4122 asection *plt, *got; 4123 bfd_boolean got_after_plt; 4124 int32_t got_pcrel_offset; 4125 4126 /* Set the entry in the GOT procedure linkage table. */ 4127 plt = htab->plt_got; 4128 got = htab->elf.sgot; 4129 got_offset = h->got.offset; 4130 4131 if (got_offset == (bfd_vma) -1 4132 || (h->type == STT_GNU_IFUNC && h->def_regular) 4133 || plt == NULL 4134 || got == NULL) 4135 abort (); 4136 4137 /* Use the non-lazy PLT entry template for the GOT PLT since they 4138 are the identical. */ 4139 /* Fill in the entry in the GOT procedure linkage table. */ 4140 plt_offset = eh->plt_got.offset; 4141 memcpy (plt->contents + plt_offset, 4142 htab->non_lazy_plt->plt_entry, 4143 htab->non_lazy_plt->plt_entry_size); 4144 4145 /* Put offset the PC-relative instruction referring to the GOT 4146 entry, subtracting the size of that instruction. */ 4147 got_pcrel_offset = (got->output_section->vma 4148 + got->output_offset 4149 + got_offset 4150 - plt->output_section->vma 4151 - plt->output_offset 4152 - plt_offset 4153 - htab->non_lazy_plt->plt_got_insn_size); 4154 4155 /* Check PC-relative offset overflow in GOT PLT entry. */ 4156 got_after_plt = got->output_section->vma > plt->output_section->vma; 4157 if ((got_after_plt && got_pcrel_offset < 0) 4158 || (!got_after_plt && got_pcrel_offset > 0)) 4159 /* xgettext:c-format */ 4160 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"), 4161 output_bfd, h->root.root.string); 4162 4163 bfd_put_32 (output_bfd, got_pcrel_offset, 4164 (plt->contents + plt_offset 4165 + htab->non_lazy_plt->plt_got_offset)); 4166 } 4167 4168 if (!local_undefweak 4169 && !h->def_regular 4170 && (h->plt.offset != (bfd_vma) -1 4171 || eh->plt_got.offset != (bfd_vma) -1)) 4172 { 4173 /* Mark the symbol as undefined, rather than as defined in 4174 the .plt section. Leave the value if there were any 4175 relocations where pointer equality matters (this is a clue 4176 for the dynamic linker, to make function pointer 4177 comparisons work between an application and shared 4178 library), otherwise set it to zero. If a function is only 4179 called from a binary, there is no need to slow down 4180 shared libraries because of that. */ 4181 sym->st_shndx = SHN_UNDEF; 4182 if (!h->pointer_equality_needed) 4183 sym->st_value = 0; 4184 } 4185 4186 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym); 4187 4188 /* Don't generate dynamic GOT relocation against undefined weak 4189 symbol in executable. */ 4190 if (h->got.offset != (bfd_vma) -1 4191 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type) 4192 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE 4193 && !local_undefweak) 4194 { 4195 Elf_Internal_Rela rela; 4196 asection *relgot = htab->elf.srelgot; 4197 4198 /* This symbol has an entry in the global offset table. Set it 4199 up. */ 4200 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL) 4201 abort (); 4202 4203 rela.r_offset = (htab->elf.sgot->output_section->vma 4204 + htab->elf.sgot->output_offset 4205 + (h->got.offset &~ (bfd_vma) 1)); 4206 4207 /* If this is a static link, or it is a -Bsymbolic link and the 4208 symbol is defined locally or was forced to be local because 4209 of a version file, we just want to emit a RELATIVE reloc. 4210 The entry in the global offset table will already have been 4211 initialized in the relocate_section function. */ 4212 if (h->def_regular 4213 && h->type == STT_GNU_IFUNC) 4214 { 4215 if (h->plt.offset == (bfd_vma) -1) 4216 { 4217 /* STT_GNU_IFUNC is referenced without PLT. */ 4218 if (htab->elf.splt == NULL) 4219 { 4220 /* use .rel[a].iplt section to store .got relocations 4221 in static executable. */ 4222 relgot = htab->elf.irelplt; 4223 } 4224 if (SYMBOL_REFERENCES_LOCAL_P (info, h)) 4225 { 4226 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), 4227 h->root.root.string, 4228 h->root.u.def.section->owner); 4229 4230 rela.r_info = htab->r_info (0, 4231 R_X86_64_IRELATIVE); 4232 rela.r_addend = (h->root.u.def.value 4233 + h->root.u.def.section->output_section->vma 4234 + h->root.u.def.section->output_offset); 4235 } 4236 else 4237 goto do_glob_dat; 4238 } 4239 else if (bfd_link_pic (info)) 4240 { 4241 /* Generate R_X86_64_GLOB_DAT. */ 4242 goto do_glob_dat; 4243 } 4244 else 4245 { 4246 asection *plt; 4247 bfd_vma plt_offset; 4248 4249 if (!h->pointer_equality_needed) 4250 abort (); 4251 4252 /* For non-shared object, we can't use .got.plt, which 4253 contains the real function addres if we need pointer 4254 equality. We load the GOT entry with the PLT entry. */ 4255 if (htab->plt_second != NULL) 4256 { 4257 plt = htab->plt_second; 4258 plt_offset = eh->plt_second.offset; 4259 } 4260 else 4261 { 4262 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; 4263 plt_offset = h->plt.offset; 4264 } 4265 bfd_put_64 (output_bfd, (plt->output_section->vma 4266 + plt->output_offset 4267 + plt_offset), 4268 htab->elf.sgot->contents + h->got.offset); 4269 return TRUE; 4270 } 4271 } 4272 else if (bfd_link_pic (info) 4273 && SYMBOL_REFERENCES_LOCAL_P (info, h)) 4274 { 4275 if (!(h->def_regular || ELF_COMMON_DEF_P (h))) 4276 return FALSE; 4277 BFD_ASSERT((h->got.offset & 1) != 0); 4278 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE); 4279 rela.r_addend = (h->root.u.def.value 4280 + h->root.u.def.section->output_section->vma 4281 + h->root.u.def.section->output_offset); 4282 } 4283 else 4284 { 4285 BFD_ASSERT((h->got.offset & 1) == 0); 4286 do_glob_dat: 4287 bfd_put_64 (output_bfd, (bfd_vma) 0, 4288 htab->elf.sgot->contents + h->got.offset); 4289 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT); 4290 rela.r_addend = 0; 4291 } 4292 4293 elf_append_rela (output_bfd, relgot, &rela); 4294 } 4295 4296 if (h->needs_copy) 4297 { 4298 Elf_Internal_Rela rela; 4299 asection *s; 4300 4301 /* This symbol needs a copy reloc. Set it up. */ 4302 VERIFY_COPY_RELOC (h, htab) 4303 4304 rela.r_offset = (h->root.u.def.value 4305 + h->root.u.def.section->output_section->vma 4306 + h->root.u.def.section->output_offset); 4307 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY); 4308 rela.r_addend = 0; 4309 if (h->root.u.def.section == htab->elf.sdynrelro) 4310 s = htab->elf.sreldynrelro; 4311 else 4312 s = htab->elf.srelbss; 4313 elf_append_rela (output_bfd, s, &rela); 4314 } 4315 4316 return TRUE; 4317 } 4318 4319 /* Finish up local dynamic symbol handling. We set the contents of 4320 various dynamic sections here. */ 4321 4322 static bfd_boolean 4323 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf) 4324 { 4325 struct elf_link_hash_entry *h 4326 = (struct elf_link_hash_entry *) *slot; 4327 struct bfd_link_info *info 4328 = (struct bfd_link_info *) inf; 4329 4330 return elf_x86_64_finish_dynamic_symbol (info->output_bfd, 4331 info, h, NULL); 4332 } 4333 4334 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry 4335 here since undefined weak symbol may not be dynamic and may not be 4336 called for elf_x86_64_finish_dynamic_symbol. */ 4337 4338 static bfd_boolean 4339 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh, 4340 void *inf) 4341 { 4342 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh; 4343 struct bfd_link_info *info = (struct bfd_link_info *) inf; 4344 4345 if (h->root.type != bfd_link_hash_undefweak 4346 || h->dynindx != -1) 4347 return TRUE; 4348 4349 return elf_x86_64_finish_dynamic_symbol (info->output_bfd, 4350 info, h, NULL); 4351 } 4352 4353 /* Used to decide how to sort relocs in an optimal manner for the 4354 dynamic linker, before writing them out. */ 4355 4356 static enum elf_reloc_type_class 4357 elf_x86_64_reloc_type_class (const struct bfd_link_info *info, 4358 const asection *rel_sec ATTRIBUTE_UNUSED, 4359 const Elf_Internal_Rela *rela) 4360 { 4361 bfd *abfd = info->output_bfd; 4362 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 4363 struct elf_x86_link_hash_table *htab 4364 = elf_x86_hash_table (info, X86_64_ELF_DATA); 4365 4366 if (htab->elf.dynsym != NULL 4367 && htab->elf.dynsym->contents != NULL) 4368 { 4369 /* Check relocation against STT_GNU_IFUNC symbol if there are 4370 dynamic symbols. */ 4371 unsigned long r_symndx = htab->r_sym (rela->r_info); 4372 if (r_symndx != STN_UNDEF) 4373 { 4374 Elf_Internal_Sym sym; 4375 if (!bed->s->swap_symbol_in (abfd, 4376 (htab->elf.dynsym->contents 4377 + r_symndx * bed->s->sizeof_sym), 4378 0, &sym)) 4379 abort (); 4380 4381 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) 4382 return reloc_class_ifunc; 4383 } 4384 } 4385 4386 switch ((int) ELF32_R_TYPE (rela->r_info)) 4387 { 4388 case R_X86_64_IRELATIVE: 4389 return reloc_class_ifunc; 4390 case R_X86_64_RELATIVE: 4391 case R_X86_64_RELATIVE64: 4392 return reloc_class_relative; 4393 case R_X86_64_JUMP_SLOT: 4394 return reloc_class_plt; 4395 case R_X86_64_COPY: 4396 return reloc_class_copy; 4397 default: 4398 return reloc_class_normal; 4399 } 4400 } 4401 4402 /* Finish up the dynamic sections. */ 4403 4404 static bfd_boolean 4405 elf_x86_64_finish_dynamic_sections (bfd *output_bfd, 4406 struct bfd_link_info *info) 4407 { 4408 struct elf_x86_link_hash_table *htab; 4409 4410 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info); 4411 if (htab == NULL) 4412 return FALSE; 4413 4414 if (! htab->elf.dynamic_sections_created) 4415 return TRUE; 4416 4417 if (htab->elf.splt && htab->elf.splt->size > 0) 4418 { 4419 elf_section_data (htab->elf.splt->output_section) 4420 ->this_hdr.sh_entsize = htab->plt.plt_entry_size; 4421 4422 if (htab->plt.has_plt0) 4423 { 4424 /* Fill in the special first entry in the procedure linkage 4425 table. */ 4426 memcpy (htab->elf.splt->contents, 4427 htab->lazy_plt->plt0_entry, 4428 htab->lazy_plt->plt0_entry_size); 4429 /* Add offset for pushq GOT+8(%rip), since the instruction 4430 uses 6 bytes subtract this value. */ 4431 bfd_put_32 (output_bfd, 4432 (htab->elf.sgotplt->output_section->vma 4433 + htab->elf.sgotplt->output_offset 4434 + 8 4435 - htab->elf.splt->output_section->vma 4436 - htab->elf.splt->output_offset 4437 - 6), 4438 (htab->elf.splt->contents 4439 + htab->lazy_plt->plt0_got1_offset)); 4440 /* Add offset for the PC-relative instruction accessing 4441 GOT+16, subtracting the offset to the end of that 4442 instruction. */ 4443 bfd_put_32 (output_bfd, 4444 (htab->elf.sgotplt->output_section->vma 4445 + htab->elf.sgotplt->output_offset 4446 + 16 4447 - htab->elf.splt->output_section->vma 4448 - htab->elf.splt->output_offset 4449 - htab->lazy_plt->plt0_got2_insn_end), 4450 (htab->elf.splt->contents 4451 + htab->lazy_plt->plt0_got2_offset)); 4452 } 4453 4454 if (htab->tlsdesc_plt) 4455 { 4456 bfd_put_64 (output_bfd, (bfd_vma) 0, 4457 htab->elf.sgot->contents + htab->tlsdesc_got); 4458 4459 memcpy (htab->elf.splt->contents + htab->tlsdesc_plt, 4460 htab->lazy_plt->plt_tlsdesc_entry, 4461 htab->lazy_plt->plt_tlsdesc_entry_size); 4462 4463 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4 4464 bytes and the instruction uses 6 bytes, subtract these 4465 values. */ 4466 bfd_put_32 (output_bfd, 4467 (htab->elf.sgotplt->output_section->vma 4468 + htab->elf.sgotplt->output_offset 4469 + 8 4470 - htab->elf.splt->output_section->vma 4471 - htab->elf.splt->output_offset 4472 - htab->tlsdesc_plt 4473 - htab->lazy_plt->plt_tlsdesc_got1_insn_end), 4474 (htab->elf.splt->contents 4475 + htab->tlsdesc_plt 4476 + htab->lazy_plt->plt_tlsdesc_got1_offset)); 4477 /* Add offset for indirect branch via GOT+TDG, where TDG 4478 stands for htab->tlsdesc_got, subtracting the offset 4479 to the end of that instruction. */ 4480 bfd_put_32 (output_bfd, 4481 (htab->elf.sgot->output_section->vma 4482 + htab->elf.sgot->output_offset 4483 + htab->tlsdesc_got 4484 - htab->elf.splt->output_section->vma 4485 - htab->elf.splt->output_offset 4486 - htab->tlsdesc_plt 4487 - htab->lazy_plt->plt_tlsdesc_got2_insn_end), 4488 (htab->elf.splt->contents 4489 + htab->tlsdesc_plt 4490 + htab->lazy_plt->plt_tlsdesc_got2_offset)); 4491 } 4492 } 4493 4494 /* Fill PLT entries for undefined weak symbols in PIE. */ 4495 if (bfd_link_pie (info)) 4496 bfd_hash_traverse (&info->hash->table, 4497 elf_x86_64_pie_finish_undefweak_symbol, 4498 info); 4499 4500 return TRUE; 4501 } 4502 4503 /* Fill PLT/GOT entries and allocate dynamic relocations for local 4504 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table. 4505 It has to be done before elf_link_sort_relocs is called so that 4506 dynamic relocations are properly sorted. */ 4507 4508 static bfd_boolean 4509 elf_x86_64_output_arch_local_syms 4510 (bfd *output_bfd ATTRIBUTE_UNUSED, 4511 struct bfd_link_info *info, 4512 void *flaginfo ATTRIBUTE_UNUSED, 4513 int (*func) (void *, const char *, 4514 Elf_Internal_Sym *, 4515 asection *, 4516 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED) 4517 { 4518 struct elf_x86_link_hash_table *htab 4519 = elf_x86_hash_table (info, X86_64_ELF_DATA); 4520 if (htab == NULL) 4521 return FALSE; 4522 4523 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ 4524 htab_traverse (htab->loc_hash_table, 4525 elf_x86_64_finish_local_dynamic_symbol, 4526 info); 4527 4528 return TRUE; 4529 } 4530 4531 /* Forward declaration. */ 4532 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt; 4533 4534 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all 4535 dynamic relocations. */ 4536 4537 static long 4538 elf_x86_64_get_synthetic_symtab (bfd *abfd, 4539 long symcount ATTRIBUTE_UNUSED, 4540 asymbol **syms ATTRIBUTE_UNUSED, 4541 long dynsymcount, 4542 asymbol **dynsyms, 4543 asymbol **ret) 4544 { 4545 long count, i, n; 4546 int j; 4547 bfd_byte *plt_contents; 4548 long relsize; 4549 const struct elf_x86_lazy_plt_layout *lazy_plt; 4550 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt; 4551 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt; 4552 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt; 4553 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt; 4554 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt; 4555 asection *plt; 4556 enum elf_x86_plt_type plt_type; 4557 struct elf_x86_plt plts[] = 4558 { 4559 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 }, 4560 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }, 4561 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 }, 4562 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 }, 4563 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 } 4564 }; 4565 4566 *ret = NULL; 4567 4568 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) 4569 return 0; 4570 4571 if (dynsymcount <= 0) 4572 return 0; 4573 4574 relsize = bfd_get_dynamic_reloc_upper_bound (abfd); 4575 if (relsize <= 0) 4576 return -1; 4577 4578 if (get_elf_x86_backend_data (abfd)->target_os != is_nacl) 4579 { 4580 lazy_plt = &elf_x86_64_lazy_plt; 4581 non_lazy_plt = &elf_x86_64_non_lazy_plt; 4582 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt; 4583 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt; 4584 if (ABI_64_P (abfd)) 4585 { 4586 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; 4587 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; 4588 } 4589 else 4590 { 4591 lazy_ibt_plt = &elf_x32_lazy_ibt_plt; 4592 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; 4593 } 4594 } 4595 else 4596 { 4597 lazy_plt = &elf_x86_64_nacl_plt; 4598 non_lazy_plt = NULL; 4599 lazy_bnd_plt = NULL; 4600 non_lazy_bnd_plt = NULL; 4601 lazy_ibt_plt = NULL; 4602 non_lazy_ibt_plt = NULL; 4603 } 4604 4605 count = 0; 4606 for (j = 0; plts[j].name != NULL; j++) 4607 { 4608 plt = bfd_get_section_by_name (abfd, plts[j].name); 4609 if (plt == NULL || plt->size == 0) 4610 continue; 4611 4612 /* Get the PLT section contents. */ 4613 plt_contents = (bfd_byte *) bfd_malloc (plt->size); 4614 if (plt_contents == NULL) 4615 break; 4616 if (!bfd_get_section_contents (abfd, (asection *) plt, 4617 plt_contents, 0, plt->size)) 4618 { 4619 free (plt_contents); 4620 break; 4621 } 4622 4623 /* Check what kind of PLT it is. */ 4624 plt_type = plt_unknown; 4625 if (plts[j].type == plt_unknown 4626 && (plt->size >= (lazy_plt->plt_entry_size 4627 + lazy_plt->plt_entry_size))) 4628 { 4629 /* Match lazy PLT first. Need to check the first two 4630 instructions. */ 4631 if ((memcmp (plt_contents, lazy_plt->plt0_entry, 4632 lazy_plt->plt0_got1_offset) == 0) 4633 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6, 4634 2) == 0)) 4635 plt_type = plt_lazy; 4636 else if (lazy_bnd_plt != NULL 4637 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry, 4638 lazy_bnd_plt->plt0_got1_offset) == 0) 4639 && (memcmp (plt_contents + 6, 4640 lazy_bnd_plt->plt0_entry + 6, 3) == 0)) 4641 { 4642 plt_type = plt_lazy | plt_second; 4643 /* The fist entry in the lazy IBT PLT is the same as the 4644 lazy BND PLT. */ 4645 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size, 4646 lazy_ibt_plt->plt_entry, 4647 lazy_ibt_plt->plt_got_offset) == 0)) 4648 lazy_plt = lazy_ibt_plt; 4649 else 4650 lazy_plt = lazy_bnd_plt; 4651 } 4652 } 4653 4654 if (non_lazy_plt != NULL 4655 && (plt_type == plt_unknown || plt_type == plt_non_lazy) 4656 && plt->size >= non_lazy_plt->plt_entry_size) 4657 { 4658 /* Match non-lazy PLT. */ 4659 if (memcmp (plt_contents, non_lazy_plt->plt_entry, 4660 non_lazy_plt->plt_got_offset) == 0) 4661 plt_type = plt_non_lazy; 4662 } 4663 4664 if (plt_type == plt_unknown || plt_type == plt_second) 4665 { 4666 if (non_lazy_bnd_plt != NULL 4667 && plt->size >= non_lazy_bnd_plt->plt_entry_size 4668 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry, 4669 non_lazy_bnd_plt->plt_got_offset) == 0)) 4670 { 4671 /* Match BND PLT. */ 4672 plt_type = plt_second; 4673 non_lazy_plt = non_lazy_bnd_plt; 4674 } 4675 else if (non_lazy_ibt_plt != NULL 4676 && plt->size >= non_lazy_ibt_plt->plt_entry_size 4677 && (memcmp (plt_contents, 4678 non_lazy_ibt_plt->plt_entry, 4679 non_lazy_ibt_plt->plt_got_offset) == 0)) 4680 { 4681 /* Match IBT PLT. */ 4682 plt_type = plt_second; 4683 non_lazy_plt = non_lazy_ibt_plt; 4684 } 4685 } 4686 4687 if (plt_type == plt_unknown) 4688 { 4689 free (plt_contents); 4690 continue; 4691 } 4692 4693 plts[j].sec = plt; 4694 plts[j].type = plt_type; 4695 4696 if ((plt_type & plt_lazy)) 4697 { 4698 plts[j].plt_got_offset = lazy_plt->plt_got_offset; 4699 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size; 4700 plts[j].plt_entry_size = lazy_plt->plt_entry_size; 4701 /* Skip PLT0 in lazy PLT. */ 4702 i = 1; 4703 } 4704 else 4705 { 4706 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset; 4707 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size; 4708 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size; 4709 i = 0; 4710 } 4711 4712 /* Skip lazy PLT when the second PLT is used. */ 4713 if (plt_type == (plt_lazy | plt_second)) 4714 plts[j].count = 0; 4715 else 4716 { 4717 n = plt->size / plts[j].plt_entry_size; 4718 plts[j].count = n; 4719 count += n - i; 4720 } 4721 4722 plts[j].contents = plt_contents; 4723 } 4724 4725 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize, 4726 (bfd_vma) 0, plts, dynsyms, 4727 ret); 4728 } 4729 4730 /* Handle an x86-64 specific section when reading an object file. This 4731 is called when elfcode.h finds a section with an unknown type. */ 4732 4733 static bfd_boolean 4734 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, 4735 const char *name, int shindex) 4736 { 4737 if (hdr->sh_type != SHT_X86_64_UNWIND) 4738 return FALSE; 4739 4740 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 4741 return FALSE; 4742 4743 return TRUE; 4744 } 4745 4746 /* Hook called by the linker routine which adds symbols from an object 4747 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead 4748 of .bss. */ 4749 4750 static bfd_boolean 4751 elf_x86_64_add_symbol_hook (bfd *abfd, 4752 struct bfd_link_info *info ATTRIBUTE_UNUSED, 4753 Elf_Internal_Sym *sym, 4754 const char **namep ATTRIBUTE_UNUSED, 4755 flagword *flagsp ATTRIBUTE_UNUSED, 4756 asection **secp, 4757 bfd_vma *valp) 4758 { 4759 asection *lcomm; 4760 4761 switch (sym->st_shndx) 4762 { 4763 case SHN_X86_64_LCOMMON: 4764 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON"); 4765 if (lcomm == NULL) 4766 { 4767 lcomm = bfd_make_section_with_flags (abfd, 4768 "LARGE_COMMON", 4769 (SEC_ALLOC 4770 | SEC_IS_COMMON 4771 | SEC_LINKER_CREATED)); 4772 if (lcomm == NULL) 4773 return FALSE; 4774 elf_section_flags (lcomm) |= SHF_X86_64_LARGE; 4775 } 4776 *secp = lcomm; 4777 *valp = sym->st_size; 4778 return TRUE; 4779 } 4780 4781 return TRUE; 4782 } 4783 4784 4785 /* Given a BFD section, try to locate the corresponding ELF section 4786 index. */ 4787 4788 static bfd_boolean 4789 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED, 4790 asection *sec, int *index_return) 4791 { 4792 if (sec == &_bfd_elf_large_com_section) 4793 { 4794 *index_return = SHN_X86_64_LCOMMON; 4795 return TRUE; 4796 } 4797 return FALSE; 4798 } 4799 4800 /* Process a symbol. */ 4801 4802 static void 4803 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, 4804 asymbol *asym) 4805 { 4806 elf_symbol_type *elfsym = (elf_symbol_type *) asym; 4807 4808 switch (elfsym->internal_elf_sym.st_shndx) 4809 { 4810 case SHN_X86_64_LCOMMON: 4811 asym->section = &_bfd_elf_large_com_section; 4812 asym->value = elfsym->internal_elf_sym.st_size; 4813 /* Common symbol doesn't set BSF_GLOBAL. */ 4814 asym->flags &= ~BSF_GLOBAL; 4815 break; 4816 } 4817 } 4818 4819 static bfd_boolean 4820 elf_x86_64_common_definition (Elf_Internal_Sym *sym) 4821 { 4822 return (sym->st_shndx == SHN_COMMON 4823 || sym->st_shndx == SHN_X86_64_LCOMMON); 4824 } 4825 4826 static unsigned int 4827 elf_x86_64_common_section_index (asection *sec) 4828 { 4829 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0) 4830 return SHN_COMMON; 4831 else 4832 return SHN_X86_64_LCOMMON; 4833 } 4834 4835 static asection * 4836 elf_x86_64_common_section (asection *sec) 4837 { 4838 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0) 4839 return bfd_com_section_ptr; 4840 else 4841 return &_bfd_elf_large_com_section; 4842 } 4843 4844 static bfd_boolean 4845 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h, 4846 const Elf_Internal_Sym *sym, 4847 asection **psec, 4848 bfd_boolean newdef, 4849 bfd_boolean olddef, 4850 bfd *oldbfd, 4851 const asection *oldsec) 4852 { 4853 /* A normal common symbol and a large common symbol result in a 4854 normal common symbol. We turn the large common symbol into a 4855 normal one. */ 4856 if (!olddef 4857 && h->root.type == bfd_link_hash_common 4858 && !newdef 4859 && bfd_is_com_section (*psec) 4860 && oldsec != *psec) 4861 { 4862 if (sym->st_shndx == SHN_COMMON 4863 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0) 4864 { 4865 h->root.u.c.p->section 4866 = bfd_make_section_old_way (oldbfd, "COMMON"); 4867 h->root.u.c.p->section->flags = SEC_ALLOC; 4868 } 4869 else if (sym->st_shndx == SHN_X86_64_LCOMMON 4870 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0) 4871 *psec = bfd_com_section_ptr; 4872 } 4873 4874 return TRUE; 4875 } 4876 4877 static int 4878 elf_x86_64_additional_program_headers (bfd *abfd, 4879 struct bfd_link_info *info ATTRIBUTE_UNUSED) 4880 { 4881 asection *s; 4882 int count = 0; 4883 4884 /* Check to see if we need a large readonly segment. */ 4885 s = bfd_get_section_by_name (abfd, ".lrodata"); 4886 if (s && (s->flags & SEC_LOAD)) 4887 count++; 4888 4889 /* Check to see if we need a large data segment. Since .lbss sections 4890 is placed right after the .bss section, there should be no need for 4891 a large data segment just because of .lbss. */ 4892 s = bfd_get_section_by_name (abfd, ".ldata"); 4893 if (s && (s->flags & SEC_LOAD)) 4894 count++; 4895 4896 return count; 4897 } 4898 4899 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */ 4900 4901 static bfd_boolean 4902 elf_x86_64_relocs_compatible (const bfd_target *input, 4903 const bfd_target *output) 4904 { 4905 return ((xvec_get_elf_backend_data (input)->s->elfclass 4906 == xvec_get_elf_backend_data (output)->s->elfclass) 4907 && _bfd_elf_relocs_compatible (input, output)); 4908 } 4909 4910 /* Set up x86-64 GNU properties. Return the first relocatable ELF input 4911 with GNU properties if found. Otherwise, return NULL. */ 4912 4913 static bfd * 4914 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info) 4915 { 4916 struct elf_x86_init_table init_table; 4917 4918 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit 4919 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit 4920 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit) 4921 != (int) R_X86_64_GNU_VTINHERIT) 4922 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit) 4923 != (int) R_X86_64_GNU_VTENTRY)) 4924 abort (); 4925 4926 /* This is unused for x86-64. */ 4927 init_table.plt0_pad_byte = 0x90; 4928 4929 if (get_elf_x86_backend_data (info->output_bfd)->target_os != is_nacl) 4930 { 4931 if (info->bndplt) 4932 { 4933 init_table.lazy_plt = &elf_x86_64_lazy_bnd_plt; 4934 init_table.non_lazy_plt = &elf_x86_64_non_lazy_bnd_plt; 4935 } 4936 else 4937 { 4938 init_table.lazy_plt = &elf_x86_64_lazy_plt; 4939 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt; 4940 } 4941 4942 if (ABI_64_P (info->output_bfd)) 4943 { 4944 init_table.lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; 4945 init_table.non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; 4946 } 4947 else 4948 { 4949 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt; 4950 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; 4951 } 4952 } 4953 else 4954 { 4955 init_table.lazy_plt = &elf_x86_64_nacl_plt; 4956 init_table.non_lazy_plt = NULL; 4957 init_table.lazy_ibt_plt = NULL; 4958 init_table.non_lazy_ibt_plt = NULL; 4959 } 4960 4961 if (ABI_64_P (info->output_bfd)) 4962 { 4963 init_table.r_info = elf64_r_info; 4964 init_table.r_sym = elf64_r_sym; 4965 } 4966 else 4967 { 4968 init_table.r_info = elf32_r_info; 4969 init_table.r_sym = elf32_r_sym; 4970 } 4971 4972 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table); 4973 } 4974 4975 static const struct bfd_elf_special_section 4976 elf_x86_64_special_sections[]= 4977 { 4978 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 4979 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, 4980 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE}, 4981 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 4982 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 4983 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, 4984 { NULL, 0, 0, 0, 0 } 4985 }; 4986 4987 #define TARGET_LITTLE_SYM x86_64_elf64_vec 4988 #define TARGET_LITTLE_NAME "elf64-x86-64" 4989 #define ELF_ARCH bfd_arch_i386 4990 #define ELF_TARGET_ID X86_64_ELF_DATA 4991 #define ELF_MACHINE_CODE EM_X86_64 4992 #if DEFAULT_LD_Z_SEPARATE_CODE 4993 # define ELF_MAXPAGESIZE 0x1000 4994 #else 4995 # define ELF_MAXPAGESIZE 0x200000 4996 #endif 4997 #define ELF_MINPAGESIZE 0x1000 4998 #define ELF_COMMONPAGESIZE 0x1000 4999 5000 #define elf_backend_can_gc_sections 1 5001 #define elf_backend_can_refcount 1 5002 #define elf_backend_want_got_plt 1 5003 #define elf_backend_plt_readonly 1 5004 #define elf_backend_want_plt_sym 0 5005 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3) 5006 #define elf_backend_rela_normal 1 5007 #define elf_backend_plt_alignment 4 5008 #define elf_backend_extern_protected_data 1 5009 #define elf_backend_caches_rawsize 1 5010 #define elf_backend_dtrel_excludes_plt 1 5011 #define elf_backend_want_dynrelro 1 5012 5013 #define elf_info_to_howto elf_x86_64_info_to_howto 5014 5015 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup 5016 #define bfd_elf64_bfd_reloc_name_lookup \ 5017 elf_x86_64_reloc_name_lookup 5018 5019 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible 5020 #define elf_backend_check_relocs elf_x86_64_check_relocs 5021 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections 5022 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections 5023 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol 5024 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms 5025 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus 5026 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo 5027 #ifdef CORE_HEADER 5028 #define elf_backend_write_core_note elf_x86_64_write_core_note 5029 #endif 5030 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class 5031 #define elf_backend_relocate_section elf_x86_64_relocate_section 5032 #define elf_backend_init_index_section _bfd_elf_init_1_index_section 5033 #define elf_backend_object_p elf64_x86_64_elf_object_p 5034 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab 5035 5036 #define elf_backend_section_from_shdr \ 5037 elf_x86_64_section_from_shdr 5038 5039 #define elf_backend_section_from_bfd_section \ 5040 elf_x86_64_elf_section_from_bfd_section 5041 #define elf_backend_add_symbol_hook \ 5042 elf_x86_64_add_symbol_hook 5043 #define elf_backend_symbol_processing \ 5044 elf_x86_64_symbol_processing 5045 #define elf_backend_common_section_index \ 5046 elf_x86_64_common_section_index 5047 #define elf_backend_common_section \ 5048 elf_x86_64_common_section 5049 #define elf_backend_common_definition \ 5050 elf_x86_64_common_definition 5051 #define elf_backend_merge_symbol \ 5052 elf_x86_64_merge_symbol 5053 #define elf_backend_special_sections \ 5054 elf_x86_64_special_sections 5055 #define elf_backend_additional_program_headers \ 5056 elf_x86_64_additional_program_headers 5057 #define elf_backend_setup_gnu_properties \ 5058 elf_x86_64_link_setup_gnu_properties 5059 #define elf_backend_hide_symbol \ 5060 _bfd_x86_elf_hide_symbol 5061 5062 #undef elf64_bed 5063 #define elf64_bed elf64_x86_64_bed 5064 5065 #include "elf64-target.h" 5066 5067 /* CloudABI support. */ 5068 5069 #undef TARGET_LITTLE_SYM 5070 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec 5071 #undef TARGET_LITTLE_NAME 5072 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi" 5073 5074 #undef ELF_OSABI 5075 #define ELF_OSABI ELFOSABI_CLOUDABI 5076 5077 #undef elf64_bed 5078 #define elf64_bed elf64_x86_64_cloudabi_bed 5079 5080 #include "elf64-target.h" 5081 5082 /* FreeBSD support. */ 5083 5084 #undef TARGET_LITTLE_SYM 5085 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec 5086 #undef TARGET_LITTLE_NAME 5087 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd" 5088 5089 #undef ELF_OSABI 5090 #define ELF_OSABI ELFOSABI_FREEBSD 5091 5092 #undef elf64_bed 5093 #define elf64_bed elf64_x86_64_fbsd_bed 5094 5095 #include "elf64-target.h" 5096 5097 /* Solaris 2 support. */ 5098 5099 #undef TARGET_LITTLE_SYM 5100 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec 5101 #undef TARGET_LITTLE_NAME 5102 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2" 5103 5104 static const struct elf_x86_backend_data elf_x86_64_solaris_arch_bed = 5105 { 5106 is_solaris /* os */ 5107 }; 5108 5109 #undef elf_backend_arch_data 5110 #define elf_backend_arch_data &elf_x86_64_solaris_arch_bed 5111 5112 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE 5113 objects won't be recognized. */ 5114 #undef ELF_OSABI 5115 5116 #undef elf64_bed 5117 #define elf64_bed elf64_x86_64_sol2_bed 5118 5119 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte 5120 boundary. */ 5121 #undef elf_backend_static_tls_alignment 5122 #define elf_backend_static_tls_alignment 16 5123 5124 /* The Solaris 2 ABI requires a plt symbol on all platforms. 5125 5126 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output 5127 File, p.63. */ 5128 #undef elf_backend_want_plt_sym 5129 #define elf_backend_want_plt_sym 1 5130 5131 #undef elf_backend_strtab_flags 5132 #define elf_backend_strtab_flags SHF_STRINGS 5133 5134 static bfd_boolean 5135 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED, 5136 bfd *obfd ATTRIBUTE_UNUSED, 5137 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED, 5138 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED) 5139 { 5140 /* PR 19938: FIXME: Need to add code for setting the sh_info 5141 and sh_link fields of Solaris specific section types. */ 5142 return FALSE; 5143 } 5144 5145 #undef elf_backend_copy_special_section_fields 5146 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields 5147 5148 #include "elf64-target.h" 5149 5150 /* Native Client support. */ 5151 5152 static bfd_boolean 5153 elf64_x86_64_nacl_elf_object_p (bfd *abfd) 5154 { 5155 /* Set the right machine number for a NaCl x86-64 ELF64 file. */ 5156 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64_nacl); 5157 return TRUE; 5158 } 5159 5160 #undef TARGET_LITTLE_SYM 5161 #define TARGET_LITTLE_SYM x86_64_elf64_nacl_vec 5162 #undef TARGET_LITTLE_NAME 5163 #define TARGET_LITTLE_NAME "elf64-x86-64-nacl" 5164 #undef elf64_bed 5165 #define elf64_bed elf64_x86_64_nacl_bed 5166 5167 #undef ELF_MAXPAGESIZE 5168 #undef ELF_MINPAGESIZE 5169 #undef ELF_COMMONPAGESIZE 5170 #define ELF_MAXPAGESIZE 0x10000 5171 #define ELF_MINPAGESIZE 0x10000 5172 #define ELF_COMMONPAGESIZE 0x10000 5173 5174 /* Restore defaults. */ 5175 #undef ELF_OSABI 5176 #undef elf_backend_static_tls_alignment 5177 #undef elf_backend_want_plt_sym 5178 #define elf_backend_want_plt_sym 0 5179 #undef elf_backend_strtab_flags 5180 #undef elf_backend_copy_special_section_fields 5181 5182 /* NaCl uses substantially different PLT entries for the same effects. */ 5183 5184 #undef elf_backend_plt_alignment 5185 #define elf_backend_plt_alignment 5 5186 #define NACL_PLT_ENTRY_SIZE 64 5187 #define NACLMASK 0xe0 /* 32-byte alignment mask. */ 5188 5189 static const bfd_byte elf_x86_64_nacl_plt0_entry[NACL_PLT_ENTRY_SIZE] = 5190 { 5191 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 5192 0x4c, 0x8b, 0x1d, 16, 0, 0, 0, /* mov GOT+16(%rip), %r11 */ 5193 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ 5194 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ 5195 0x41, 0xff, 0xe3, /* jmpq *%r11 */ 5196 5197 /* 9-byte nop sequence to pad out to the next 32-byte boundary. */ 5198 0x66, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw 0x0(%rax,%rax,1) */ 5199 5200 /* 32 bytes of nop to pad out to the standard size. */ 5201 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 5202 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 5203 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 5204 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 5205 0x66, /* excess data16 prefix */ 5206 0x90 /* nop */ 5207 }; 5208 5209 static const bfd_byte elf_x86_64_nacl_plt_entry[NACL_PLT_ENTRY_SIZE] = 5210 { 5211 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, /* mov name@GOTPCREL(%rip),%r11 */ 5212 0x41, 0x83, 0xe3, NACLMASK, /* and $-32, %r11d */ 5213 0x4d, 0x01, 0xfb, /* add %r15, %r11 */ 5214 0x41, 0xff, 0xe3, /* jmpq *%r11 */ 5215 5216 /* 15-byte nop sequence to pad out to the next 32-byte boundary. */ 5217 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 5218 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 5219 5220 /* Lazy GOT entries point here (32-byte aligned). */ 5221 0x68, /* pushq immediate */ 5222 0, 0, 0, 0, /* replaced with index into relocation table. */ 5223 0xe9, /* jmp relative */ 5224 0, 0, 0, 0, /* replaced with offset to start of .plt0. */ 5225 5226 /* 22 bytes of nop to pad out to the standard size. */ 5227 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, /* excess data16 prefixes */ 5228 0x2e, 0x0f, 0x1f, 0x84, 0, 0, 0, 0, 0, /* nopw %cs:0x0(%rax,%rax,1) */ 5229 0x0f, 0x1f, 0x80, 0, 0, 0, 0, /* nopl 0x0(%rax) */ 5230 }; 5231 5232 /* .eh_frame covering the .plt section. */ 5233 5234 static const bfd_byte elf_x86_64_nacl_eh_frame_plt[] = 5235 { 5236 #if (PLT_CIE_LENGTH != 20 \ 5237 || PLT_FDE_LENGTH != 36 \ 5238 || PLT_FDE_START_OFFSET != 4 + PLT_CIE_LENGTH + 8 \ 5239 || PLT_FDE_LEN_OFFSET != 4 + PLT_CIE_LENGTH + 12) 5240 # error "Need elf_x86_backend_data parameters for eh_frame_plt offsets!" 5241 #endif 5242 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 5243 0, 0, 0, 0, /* CIE ID */ 5244 1, /* CIE version */ 5245 'z', 'R', 0, /* Augmentation string */ 5246 1, /* Code alignment factor */ 5247 0x78, /* Data alignment factor */ 5248 16, /* Return address column */ 5249 1, /* Augmentation size */ 5250 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 5251 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 5252 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 5253 DW_CFA_nop, DW_CFA_nop, 5254 5255 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 5256 PLT_CIE_LENGTH + 8, 0, 0, 0,/* CIE pointer */ 5257 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 5258 0, 0, 0, 0, /* .plt size goes here */ 5259 0, /* Augmentation size */ 5260 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 5261 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 5262 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 5263 DW_CFA_advance_loc + 58, /* DW_CFA_advance_loc: 58 to __PLT__+64 */ 5264 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 5265 13, /* Block length */ 5266 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 5267 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 5268 DW_OP_const1u, 63, DW_OP_and, DW_OP_const1u, 37, DW_OP_ge, 5269 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 5270 DW_CFA_nop, DW_CFA_nop 5271 }; 5272 5273 static const struct elf_x86_lazy_plt_layout elf_x86_64_nacl_plt = 5274 { 5275 elf_x86_64_nacl_plt0_entry, /* plt0_entry */ 5276 NACL_PLT_ENTRY_SIZE, /* plt0_entry_size */ 5277 elf_x86_64_nacl_plt_entry, /* plt_entry */ 5278 NACL_PLT_ENTRY_SIZE, /* plt_entry_size */ 5279 elf_x86_64_nacl_plt0_entry, /* plt_tlsdesc_entry */ 5280 NACL_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 5281 2, /* plt_tlsdesc_got1_offset */ 5282 9, /* plt_tlsdesc_got2_offset */ 5283 6, /* plt_tlsdesc_got1_insn_end */ 5284 13, /* plt_tlsdesc_got2_insn_end */ 5285 2, /* plt0_got1_offset */ 5286 9, /* plt0_got2_offset */ 5287 13, /* plt0_got2_insn_end */ 5288 3, /* plt_got_offset */ 5289 33, /* plt_reloc_offset */ 5290 38, /* plt_plt_offset */ 5291 7, /* plt_got_insn_size */ 5292 42, /* plt_plt_insn_end */ 5293 32, /* plt_lazy_offset */ 5294 elf_x86_64_nacl_plt0_entry, /* pic_plt0_entry */ 5295 elf_x86_64_nacl_plt_entry, /* pic_plt_entry */ 5296 elf_x86_64_nacl_eh_frame_plt, /* eh_frame_plt */ 5297 sizeof (elf_x86_64_nacl_eh_frame_plt) /* eh_frame_plt_size */ 5298 }; 5299 5300 static const struct elf_x86_backend_data elf_x86_64_nacl_arch_bed = 5301 { 5302 is_nacl /* os */ 5303 }; 5304 5305 #undef elf_backend_arch_data 5306 #define elf_backend_arch_data &elf_x86_64_nacl_arch_bed 5307 5308 #undef elf_backend_object_p 5309 #define elf_backend_object_p elf64_x86_64_nacl_elf_object_p 5310 #undef elf_backend_modify_segment_map 5311 #define elf_backend_modify_segment_map nacl_modify_segment_map 5312 #undef elf_backend_modify_program_headers 5313 #define elf_backend_modify_program_headers nacl_modify_program_headers 5314 #undef elf_backend_final_write_processing 5315 #define elf_backend_final_write_processing nacl_final_write_processing 5316 5317 #include "elf64-target.h" 5318 5319 /* Native Client x32 support. */ 5320 5321 static bfd_boolean 5322 elf32_x86_64_nacl_elf_object_p (bfd *abfd) 5323 { 5324 /* Set the right machine number for a NaCl x86-64 ELF32 file. */ 5325 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32_nacl); 5326 return TRUE; 5327 } 5328 5329 #undef TARGET_LITTLE_SYM 5330 #define TARGET_LITTLE_SYM x86_64_elf32_nacl_vec 5331 #undef TARGET_LITTLE_NAME 5332 #define TARGET_LITTLE_NAME "elf32-x86-64-nacl" 5333 #undef elf32_bed 5334 #define elf32_bed elf32_x86_64_nacl_bed 5335 5336 #define bfd_elf32_bfd_reloc_type_lookup \ 5337 elf_x86_64_reloc_type_lookup 5338 #define bfd_elf32_bfd_reloc_name_lookup \ 5339 elf_x86_64_reloc_name_lookup 5340 #define bfd_elf32_get_synthetic_symtab \ 5341 elf_x86_64_get_synthetic_symtab 5342 5343 #undef elf_backend_object_p 5344 #define elf_backend_object_p \ 5345 elf32_x86_64_nacl_elf_object_p 5346 5347 #undef elf_backend_bfd_from_remote_memory 5348 #define elf_backend_bfd_from_remote_memory \ 5349 _bfd_elf32_bfd_from_remote_memory 5350 5351 #undef elf_backend_size_info 5352 #define elf_backend_size_info \ 5353 _bfd_elf32_size_info 5354 5355 #undef elf32_bed 5356 #define elf32_bed elf32_x86_64_bed 5357 5358 #include "elf32-target.h" 5359 5360 /* Restore defaults. */ 5361 #undef elf_backend_object_p 5362 #define elf_backend_object_p elf64_x86_64_elf_object_p 5363 #undef elf_backend_bfd_from_remote_memory 5364 #undef elf_backend_size_info 5365 #undef elf_backend_modify_segment_map 5366 #undef elf_backend_modify_program_headers 5367 #undef elf_backend_final_write_processing 5368 5369 /* Intel L1OM support. */ 5370 5371 static bfd_boolean 5372 elf64_l1om_elf_object_p (bfd *abfd) 5373 { 5374 /* Set the right machine number for an L1OM elf64 file. */ 5375 bfd_default_set_arch_mach (abfd, bfd_arch_l1om, bfd_mach_l1om); 5376 return TRUE; 5377 } 5378 5379 #undef TARGET_LITTLE_SYM 5380 #define TARGET_LITTLE_SYM l1om_elf64_vec 5381 #undef TARGET_LITTLE_NAME 5382 #define TARGET_LITTLE_NAME "elf64-l1om" 5383 #undef ELF_ARCH 5384 #define ELF_ARCH bfd_arch_l1om 5385 5386 #undef ELF_MACHINE_CODE 5387 #define ELF_MACHINE_CODE EM_L1OM 5388 5389 #undef ELF_OSABI 5390 5391 #undef elf64_bed 5392 #define elf64_bed elf64_l1om_bed 5393 5394 #undef elf_backend_object_p 5395 #define elf_backend_object_p elf64_l1om_elf_object_p 5396 5397 /* Restore defaults. */ 5398 #undef ELF_MAXPAGESIZE 5399 #undef ELF_MINPAGESIZE 5400 #undef ELF_COMMONPAGESIZE 5401 #if DEFAULT_LD_Z_SEPARATE_CODE 5402 # define ELF_MAXPAGESIZE 0x1000 5403 #else 5404 # define ELF_MAXPAGESIZE 0x200000 5405 #endif 5406 #define ELF_MINPAGESIZE 0x1000 5407 #define ELF_COMMONPAGESIZE 0x1000 5408 #undef elf_backend_plt_alignment 5409 #define elf_backend_plt_alignment 4 5410 #undef elf_backend_arch_data 5411 #define elf_backend_arch_data &elf_x86_64_arch_bed 5412 5413 #include "elf64-target.h" 5414 5415 /* FreeBSD L1OM support. */ 5416 5417 #undef TARGET_LITTLE_SYM 5418 #define TARGET_LITTLE_SYM l1om_elf64_fbsd_vec 5419 #undef TARGET_LITTLE_NAME 5420 #define TARGET_LITTLE_NAME "elf64-l1om-freebsd" 5421 5422 #undef ELF_OSABI 5423 #define ELF_OSABI ELFOSABI_FREEBSD 5424 5425 #undef elf64_bed 5426 #define elf64_bed elf64_l1om_fbsd_bed 5427 5428 #include "elf64-target.h" 5429 5430 /* Intel K1OM support. */ 5431 5432 static bfd_boolean 5433 elf64_k1om_elf_object_p (bfd *abfd) 5434 { 5435 /* Set the right machine number for an K1OM elf64 file. */ 5436 bfd_default_set_arch_mach (abfd, bfd_arch_k1om, bfd_mach_k1om); 5437 return TRUE; 5438 } 5439 5440 #undef TARGET_LITTLE_SYM 5441 #define TARGET_LITTLE_SYM k1om_elf64_vec 5442 #undef TARGET_LITTLE_NAME 5443 #define TARGET_LITTLE_NAME "elf64-k1om" 5444 #undef ELF_ARCH 5445 #define ELF_ARCH bfd_arch_k1om 5446 5447 #undef ELF_MACHINE_CODE 5448 #define ELF_MACHINE_CODE EM_K1OM 5449 5450 #undef ELF_OSABI 5451 5452 #undef elf64_bed 5453 #define elf64_bed elf64_k1om_bed 5454 5455 #undef elf_backend_object_p 5456 #define elf_backend_object_p elf64_k1om_elf_object_p 5457 5458 #undef elf_backend_static_tls_alignment 5459 5460 #undef elf_backend_want_plt_sym 5461 #define elf_backend_want_plt_sym 0 5462 5463 #include "elf64-target.h" 5464 5465 /* FreeBSD K1OM support. */ 5466 5467 #undef TARGET_LITTLE_SYM 5468 #define TARGET_LITTLE_SYM k1om_elf64_fbsd_vec 5469 #undef TARGET_LITTLE_NAME 5470 #define TARGET_LITTLE_NAME "elf64-k1om-freebsd" 5471 5472 #undef ELF_OSABI 5473 #define ELF_OSABI ELFOSABI_FREEBSD 5474 5475 #undef elf64_bed 5476 #define elf64_bed elf64_k1om_fbsd_bed 5477 5478 #include "elf64-target.h" 5479 5480 /* 32bit x86-64 support. */ 5481 5482 #undef TARGET_LITTLE_SYM 5483 #define TARGET_LITTLE_SYM x86_64_elf32_vec 5484 #undef TARGET_LITTLE_NAME 5485 #define TARGET_LITTLE_NAME "elf32-x86-64" 5486 #undef elf32_bed 5487 5488 #undef ELF_ARCH 5489 #define ELF_ARCH bfd_arch_i386 5490 5491 #undef ELF_MACHINE_CODE 5492 #define ELF_MACHINE_CODE EM_X86_64 5493 5494 #undef ELF_OSABI 5495 5496 #undef elf_backend_object_p 5497 #define elf_backend_object_p \ 5498 elf32_x86_64_elf_object_p 5499 5500 #undef elf_backend_bfd_from_remote_memory 5501 #define elf_backend_bfd_from_remote_memory \ 5502 _bfd_elf32_bfd_from_remote_memory 5503 5504 #undef elf_backend_size_info 5505 #define elf_backend_size_info \ 5506 _bfd_elf32_size_info 5507 5508 #include "elf32-target.h" 5509