1 /* X86-64 specific support for ELF 2 Copyright (C) 2000-2024 Free Software Foundation, Inc. 3 Contributed by Jan Hubicka <jh@suse.cz>. 4 5 This file is part of BFD, the Binary File Descriptor library. 6 7 This program is free software; you can redistribute it and/or modify 8 it under the terms of the GNU General Public License as published by 9 the Free Software Foundation; either version 3 of the License, or 10 (at your option) any later version. 11 12 This program is distributed in the hope that it will be useful, 13 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 GNU General Public License for more details. 16 17 You should have received a copy of the GNU General Public License 18 along with this program; if not, write to the Free Software 19 Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, 20 MA 02110-1301, USA. */ 21 22 #include "elfxx-x86.h" 23 #include "dwarf2.h" 24 #include "libiberty.h" 25 #include "sframe.h" 26 27 #include "opcode/i386.h" 28 29 #ifdef CORE_HEADER 30 #include <stdarg.h> 31 #include CORE_HEADER 32 #endif 33 34 /* In case we're on a 32-bit machine, construct a 64-bit "-1" value. */ 35 #define MINUS_ONE (~ (bfd_vma) 0) 36 37 /* Since both 32-bit and 64-bit x86-64 encode relocation type in the 38 identical manner, we use ELF32_R_TYPE instead of ELF64_R_TYPE to get 39 relocation type. We also use ELF_ST_TYPE instead of ELF64_ST_TYPE 40 since they are the same. */ 41 42 /* The relocation "howto" table. Order of fields: 43 type, rightshift, size, bitsize, pc_relative, bitpos, complain_on_overflow, 44 special_function, name, partial_inplace, src_mask, dst_mask, pcrel_offset. */ 45 static reloc_howto_type x86_64_elf_howto_table[] = 46 { 47 HOWTO(R_X86_64_NONE, 0, 0, 0, false, 0, complain_overflow_dont, 48 bfd_elf_generic_reloc, "R_X86_64_NONE", false, 0, 0x00000000, 49 false), 50 HOWTO(R_X86_64_64, 0, 8, 64, false, 0, complain_overflow_dont, 51 bfd_elf_generic_reloc, "R_X86_64_64", false, 0, MINUS_ONE, 52 false), 53 HOWTO(R_X86_64_PC32, 0, 4, 32, true, 0, complain_overflow_signed, 54 bfd_elf_generic_reloc, "R_X86_64_PC32", false, 0, 0xffffffff, 55 true), 56 HOWTO(R_X86_64_GOT32, 0, 4, 32, false, 0, complain_overflow_signed, 57 bfd_elf_generic_reloc, "R_X86_64_GOT32", false, 0, 0xffffffff, 58 false), 59 HOWTO(R_X86_64_PLT32, 0, 4, 32, true, 0, complain_overflow_signed, 60 bfd_elf_generic_reloc, "R_X86_64_PLT32", false, 0, 0xffffffff, 61 true), 62 HOWTO(R_X86_64_COPY, 0, 4, 32, false, 0, complain_overflow_bitfield, 63 bfd_elf_generic_reloc, "R_X86_64_COPY", false, 0, 0xffffffff, 64 false), 65 HOWTO(R_X86_64_GLOB_DAT, 0, 8, 64, false, 0, complain_overflow_dont, 66 bfd_elf_generic_reloc, "R_X86_64_GLOB_DAT", false, 0, MINUS_ONE, 67 false), 68 HOWTO(R_X86_64_JUMP_SLOT, 0, 8, 64, false, 0, complain_overflow_dont, 69 bfd_elf_generic_reloc, "R_X86_64_JUMP_SLOT", false, 0, MINUS_ONE, 70 false), 71 HOWTO(R_X86_64_RELATIVE, 0, 8, 64, false, 0, complain_overflow_dont, 72 bfd_elf_generic_reloc, "R_X86_64_RELATIVE", false, 0, MINUS_ONE, 73 false), 74 HOWTO(R_X86_64_GOTPCREL, 0, 4, 32, true, 0, complain_overflow_signed, 75 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL", false, 0, 0xffffffff, 76 true), 77 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_unsigned, 78 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff, 79 false), 80 HOWTO(R_X86_64_32S, 0, 4, 32, false, 0, complain_overflow_signed, 81 bfd_elf_generic_reloc, "R_X86_64_32S", false, 0, 0xffffffff, 82 false), 83 HOWTO(R_X86_64_16, 0, 2, 16, false, 0, complain_overflow_bitfield, 84 bfd_elf_generic_reloc, "R_X86_64_16", false, 0, 0xffff, false), 85 HOWTO(R_X86_64_PC16, 0, 2, 16, true, 0, complain_overflow_bitfield, 86 bfd_elf_generic_reloc, "R_X86_64_PC16", false, 0, 0xffff, true), 87 HOWTO(R_X86_64_8, 0, 1, 8, false, 0, complain_overflow_bitfield, 88 bfd_elf_generic_reloc, "R_X86_64_8", false, 0, 0xff, false), 89 HOWTO(R_X86_64_PC8, 0, 1, 8, true, 0, complain_overflow_signed, 90 bfd_elf_generic_reloc, "R_X86_64_PC8", false, 0, 0xff, true), 91 HOWTO(R_X86_64_DTPMOD64, 0, 8, 64, false, 0, complain_overflow_dont, 92 bfd_elf_generic_reloc, "R_X86_64_DTPMOD64", false, 0, MINUS_ONE, 93 false), 94 HOWTO(R_X86_64_DTPOFF64, 0, 8, 64, false, 0, complain_overflow_dont, 95 bfd_elf_generic_reloc, "R_X86_64_DTPOFF64", false, 0, MINUS_ONE, 96 false), 97 HOWTO(R_X86_64_TPOFF64, 0, 8, 64, false, 0, complain_overflow_dont, 98 bfd_elf_generic_reloc, "R_X86_64_TPOFF64", false, 0, MINUS_ONE, 99 false), 100 HOWTO(R_X86_64_TLSGD, 0, 4, 32, true, 0, complain_overflow_signed, 101 bfd_elf_generic_reloc, "R_X86_64_TLSGD", false, 0, 0xffffffff, 102 true), 103 HOWTO(R_X86_64_TLSLD, 0, 4, 32, true, 0, complain_overflow_signed, 104 bfd_elf_generic_reloc, "R_X86_64_TLSLD", false, 0, 0xffffffff, 105 true), 106 HOWTO(R_X86_64_DTPOFF32, 0, 4, 32, false, 0, complain_overflow_signed, 107 bfd_elf_generic_reloc, "R_X86_64_DTPOFF32", false, 0, 0xffffffff, 108 false), 109 HOWTO(R_X86_64_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed, 110 bfd_elf_generic_reloc, "R_X86_64_GOTTPOFF", false, 0, 0xffffffff, 111 true), 112 HOWTO(R_X86_64_TPOFF32, 0, 4, 32, false, 0, complain_overflow_signed, 113 bfd_elf_generic_reloc, "R_X86_64_TPOFF32", false, 0, 0xffffffff, 114 false), 115 HOWTO(R_X86_64_PC64, 0, 8, 64, true, 0, complain_overflow_dont, 116 bfd_elf_generic_reloc, "R_X86_64_PC64", false, 0, MINUS_ONE, 117 true), 118 HOWTO(R_X86_64_GOTOFF64, 0, 8, 64, false, 0, complain_overflow_dont, 119 bfd_elf_generic_reloc, "R_X86_64_GOTOFF64", false, 0, MINUS_ONE, 120 false), 121 HOWTO(R_X86_64_GOTPC32, 0, 4, 32, true, 0, complain_overflow_signed, 122 bfd_elf_generic_reloc, "R_X86_64_GOTPC32", false, 0, 0xffffffff, 123 true), 124 HOWTO(R_X86_64_GOT64, 0, 8, 64, false, 0, complain_overflow_signed, 125 bfd_elf_generic_reloc, "R_X86_64_GOT64", false, 0, MINUS_ONE, 126 false), 127 HOWTO(R_X86_64_GOTPCREL64, 0, 8, 64, true, 0, complain_overflow_signed, 128 bfd_elf_generic_reloc, "R_X86_64_GOTPCREL64", false, 0, MINUS_ONE, 129 true), 130 HOWTO(R_X86_64_GOTPC64, 0, 8, 64, true, 0, complain_overflow_signed, 131 bfd_elf_generic_reloc, "R_X86_64_GOTPC64", false, 0, MINUS_ONE, 132 true), 133 HOWTO(R_X86_64_GOTPLT64, 0, 8, 64, false, 0, complain_overflow_signed, 134 bfd_elf_generic_reloc, "R_X86_64_GOTPLT64", false, 0, MINUS_ONE, 135 false), 136 HOWTO(R_X86_64_PLTOFF64, 0, 8, 64, false, 0, complain_overflow_signed, 137 bfd_elf_generic_reloc, "R_X86_64_PLTOFF64", false, 0, MINUS_ONE, 138 false), 139 HOWTO(R_X86_64_SIZE32, 0, 4, 32, false, 0, complain_overflow_unsigned, 140 bfd_elf_generic_reloc, "R_X86_64_SIZE32", false, 0, 0xffffffff, 141 false), 142 HOWTO(R_X86_64_SIZE64, 0, 8, 64, false, 0, complain_overflow_dont, 143 bfd_elf_generic_reloc, "R_X86_64_SIZE64", false, 0, MINUS_ONE, 144 false), 145 HOWTO(R_X86_64_GOTPC32_TLSDESC, 0, 4, 32, true, 0, 146 complain_overflow_bitfield, bfd_elf_generic_reloc, 147 "R_X86_64_GOTPC32_TLSDESC", false, 0, 0xffffffff, true), 148 HOWTO(R_X86_64_TLSDESC_CALL, 0, 0, 0, false, 0, 149 complain_overflow_dont, bfd_elf_generic_reloc, 150 "R_X86_64_TLSDESC_CALL", 151 false, 0, 0, false), 152 HOWTO(R_X86_64_TLSDESC, 0, 8, 64, false, 0, 153 complain_overflow_dont, bfd_elf_generic_reloc, 154 "R_X86_64_TLSDESC", false, 0, MINUS_ONE, false), 155 HOWTO(R_X86_64_IRELATIVE, 0, 8, 64, false, 0, complain_overflow_dont, 156 bfd_elf_generic_reloc, "R_X86_64_IRELATIVE", false, 0, MINUS_ONE, 157 false), 158 HOWTO(R_X86_64_RELATIVE64, 0, 8, 64, false, 0, complain_overflow_dont, 159 bfd_elf_generic_reloc, "R_X86_64_RELATIVE64", false, 0, MINUS_ONE, 160 false), 161 HOWTO(R_X86_64_PC32_BND, 0, 4, 32, true, 0, complain_overflow_signed, 162 bfd_elf_generic_reloc, "R_X86_64_PC32_BND", false, 0, 0xffffffff, 163 true), 164 HOWTO(R_X86_64_PLT32_BND, 0, 4, 32, true, 0, complain_overflow_signed, 165 bfd_elf_generic_reloc, "R_X86_64_PLT32_BND", false, 0, 0xffffffff, 166 true), 167 HOWTO(R_X86_64_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed, 168 bfd_elf_generic_reloc, "R_X86_64_GOTPCRELX", false, 0, 0xffffffff, 169 true), 170 HOWTO(R_X86_64_REX_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed, 171 bfd_elf_generic_reloc, "R_X86_64_REX_GOTPCRELX", false, 0, 0xffffffff, 172 true), 173 HOWTO(R_X86_64_CODE_4_GOTPCRELX, 0, 4, 32, true, 0, complain_overflow_signed, 174 bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTPCRELX", false, 0, 0xffffffff, 175 true), 176 HOWTO(R_X86_64_CODE_4_GOTTPOFF, 0, 4, 32, true, 0, complain_overflow_signed, 177 bfd_elf_generic_reloc, "R_X86_64_CODE_4_GOTTPOFF", false, 0, 0xffffffff, 178 true), 179 HOWTO(R_X86_64_CODE_4_GOTPC32_TLSDESC, 0, 4, 32, true, 0, 180 complain_overflow_bitfield, bfd_elf_generic_reloc, 181 "R_X86_64_CODE_4_GOTPC32_TLSDESC", false, 0, 0xffffffff, true), 182 HOWTO(R_X86_64_CODE_5_GOTPCRELX, 0, 4, 32, true, 0, 183 complain_overflow_signed, bfd_elf_generic_reloc, 184 "R_X86_64_CODE_5_GOTPCRELX", false, 0, 0xffffffff, true), 185 HOWTO(R_X86_64_CODE_5_GOTTPOFF, 0, 4, 32, true, 0, 186 complain_overflow_signed, bfd_elf_generic_reloc, 187 "R_X86_64_CODE_5_GOTTPOFF", false, 0, 0xffffffff, true), 188 HOWTO(R_X86_64_CODE_5_GOTPC32_TLSDESC, 0, 4, 32, true, 0, 189 complain_overflow_bitfield, bfd_elf_generic_reloc, 190 "R_X86_64_CODE_5_GOTPC32_TLSDESC", false, 0, 0xffffffff, true), 191 HOWTO(R_X86_64_CODE_6_GOTPCRELX, 0, 4, 32, true, 0, 192 complain_overflow_signed, bfd_elf_generic_reloc, 193 "R_X86_64_CODE_6_GOTPCRELX", false, 0, 0xffffffff, true), 194 HOWTO(R_X86_64_CODE_6_GOTTPOFF, 0, 4, 32, true, 0, 195 complain_overflow_signed, bfd_elf_generic_reloc, 196 "R_X86_64_CODE_6_GOTTPOFF", false, 0, 0xffffffff, true), 197 HOWTO(R_X86_64_CODE_6_GOTPC32_TLSDESC, 0, 4, 32, true, 0, 198 complain_overflow_bitfield, bfd_elf_generic_reloc, 199 "R_X86_64_CODE_6_GOTPC32_TLSDESC", false, 0, 0xffffffff, true), 200 201 /* We have a gap in the reloc numbers here. 202 R_X86_64_standard counts the number up to this point, and 203 R_X86_64_vt_offset is the value to subtract from a reloc type of 204 R_X86_64_GNU_VT* to form an index into this table. */ 205 #define R_X86_64_standard (R_X86_64_CODE_6_GOTPC32_TLSDESC + 1) 206 #define R_X86_64_vt_offset (R_X86_64_GNU_VTINHERIT - R_X86_64_standard) 207 208 /* GNU extension to record C++ vtable hierarchy. */ 209 HOWTO (R_X86_64_GNU_VTINHERIT, 0, 8, 0, false, 0, complain_overflow_dont, 210 NULL, "R_X86_64_GNU_VTINHERIT", false, 0, 0, false), 211 212 /* GNU extension to record C++ vtable member usage. */ 213 HOWTO (R_X86_64_GNU_VTENTRY, 0, 8, 0, false, 0, complain_overflow_dont, 214 _bfd_elf_rel_vtable_reloc_fn, "R_X86_64_GNU_VTENTRY", false, 0, 0, 215 false), 216 217 /* Use complain_overflow_bitfield on R_X86_64_32 for x32. */ 218 HOWTO(R_X86_64_32, 0, 4, 32, false, 0, complain_overflow_bitfield, 219 bfd_elf_generic_reloc, "R_X86_64_32", false, 0, 0xffffffff, 220 false) 221 }; 222 223 /* Map BFD relocs to the x86_64 elf relocs. */ 224 struct elf_reloc_map 225 { 226 bfd_reloc_code_real_type bfd_reloc_val; 227 unsigned char elf_reloc_val; 228 }; 229 230 static const struct elf_reloc_map x86_64_reloc_map[] = 231 { 232 { BFD_RELOC_NONE, R_X86_64_NONE, }, 233 { BFD_RELOC_64, R_X86_64_64, }, 234 { BFD_RELOC_32_PCREL, R_X86_64_PC32, }, 235 { BFD_RELOC_X86_64_GOT32, R_X86_64_GOT32,}, 236 { BFD_RELOC_X86_64_PLT32, R_X86_64_PLT32,}, 237 { BFD_RELOC_X86_64_COPY, R_X86_64_COPY, }, 238 { BFD_RELOC_X86_64_GLOB_DAT, R_X86_64_GLOB_DAT, }, 239 { BFD_RELOC_X86_64_JUMP_SLOT, R_X86_64_JUMP_SLOT, }, 240 { BFD_RELOC_X86_64_RELATIVE, R_X86_64_RELATIVE, }, 241 { BFD_RELOC_X86_64_GOTPCREL, R_X86_64_GOTPCREL, }, 242 { BFD_RELOC_32, R_X86_64_32, }, 243 { BFD_RELOC_X86_64_32S, R_X86_64_32S, }, 244 { BFD_RELOC_16, R_X86_64_16, }, 245 { BFD_RELOC_16_PCREL, R_X86_64_PC16, }, 246 { BFD_RELOC_8, R_X86_64_8, }, 247 { BFD_RELOC_8_PCREL, R_X86_64_PC8, }, 248 { BFD_RELOC_X86_64_DTPMOD64, R_X86_64_DTPMOD64, }, 249 { BFD_RELOC_X86_64_DTPOFF64, R_X86_64_DTPOFF64, }, 250 { BFD_RELOC_X86_64_TPOFF64, R_X86_64_TPOFF64, }, 251 { BFD_RELOC_X86_64_TLSGD, R_X86_64_TLSGD, }, 252 { BFD_RELOC_X86_64_TLSLD, R_X86_64_TLSLD, }, 253 { BFD_RELOC_X86_64_DTPOFF32, R_X86_64_DTPOFF32, }, 254 { BFD_RELOC_X86_64_GOTTPOFF, R_X86_64_GOTTPOFF, }, 255 { BFD_RELOC_X86_64_TPOFF32, R_X86_64_TPOFF32, }, 256 { BFD_RELOC_64_PCREL, R_X86_64_PC64, }, 257 { BFD_RELOC_X86_64_GOTOFF64, R_X86_64_GOTOFF64, }, 258 { BFD_RELOC_X86_64_GOTPC32, R_X86_64_GOTPC32, }, 259 { BFD_RELOC_X86_64_GOT64, R_X86_64_GOT64, }, 260 { BFD_RELOC_X86_64_GOTPCREL64,R_X86_64_GOTPCREL64, }, 261 { BFD_RELOC_X86_64_GOTPC64, R_X86_64_GOTPC64, }, 262 { BFD_RELOC_X86_64_GOTPLT64, R_X86_64_GOTPLT64, }, 263 { BFD_RELOC_X86_64_PLTOFF64, R_X86_64_PLTOFF64, }, 264 { BFD_RELOC_SIZE32, R_X86_64_SIZE32, }, 265 { BFD_RELOC_SIZE64, R_X86_64_SIZE64, }, 266 { BFD_RELOC_X86_64_GOTPC32_TLSDESC, R_X86_64_GOTPC32_TLSDESC, }, 267 { BFD_RELOC_X86_64_TLSDESC_CALL, R_X86_64_TLSDESC_CALL, }, 268 { BFD_RELOC_X86_64_TLSDESC, R_X86_64_TLSDESC, }, 269 { BFD_RELOC_X86_64_IRELATIVE, R_X86_64_IRELATIVE, }, 270 { BFD_RELOC_X86_64_PC32_BND, R_X86_64_PC32_BND, }, 271 { BFD_RELOC_X86_64_PLT32_BND, R_X86_64_PLT32_BND, }, 272 { BFD_RELOC_X86_64_GOTPCRELX, R_X86_64_GOTPCRELX, }, 273 { BFD_RELOC_X86_64_REX_GOTPCRELX, R_X86_64_REX_GOTPCRELX, }, 274 { BFD_RELOC_X86_64_CODE_4_GOTPCRELX, R_X86_64_CODE_4_GOTPCRELX, }, 275 { BFD_RELOC_X86_64_CODE_4_GOTTPOFF, R_X86_64_CODE_4_GOTTPOFF, }, 276 { BFD_RELOC_X86_64_CODE_4_GOTPC32_TLSDESC, R_X86_64_CODE_4_GOTPC32_TLSDESC, }, 277 { BFD_RELOC_X86_64_CODE_5_GOTPCRELX, R_X86_64_CODE_5_GOTPCRELX, }, 278 { BFD_RELOC_X86_64_CODE_5_GOTTPOFF, R_X86_64_CODE_5_GOTTPOFF, }, 279 { BFD_RELOC_X86_64_CODE_5_GOTPC32_TLSDESC, R_X86_64_CODE_5_GOTPC32_TLSDESC, }, 280 { BFD_RELOC_X86_64_CODE_6_GOTPCRELX, R_X86_64_CODE_6_GOTPCRELX, }, 281 { BFD_RELOC_X86_64_CODE_6_GOTTPOFF, R_X86_64_CODE_6_GOTTPOFF, }, 282 { BFD_RELOC_X86_64_CODE_6_GOTPC32_TLSDESC, R_X86_64_CODE_6_GOTPC32_TLSDESC, }, 283 { BFD_RELOC_VTABLE_INHERIT, R_X86_64_GNU_VTINHERIT, }, 284 { BFD_RELOC_VTABLE_ENTRY, R_X86_64_GNU_VTENTRY, }, 285 }; 286 287 static reloc_howto_type * 288 elf_x86_64_rtype_to_howto (bfd *abfd, unsigned r_type) 289 { 290 unsigned i; 291 292 if (r_type == (unsigned int) R_X86_64_32) 293 { 294 if (ABI_64_P (abfd)) 295 i = r_type; 296 else 297 i = ARRAY_SIZE (x86_64_elf_howto_table) - 1; 298 } 299 else if (r_type < (unsigned int) R_X86_64_GNU_VTINHERIT 300 || r_type >= (unsigned int) R_X86_64_max) 301 { 302 if (r_type >= (unsigned int) R_X86_64_standard) 303 { 304 /* xgettext:c-format */ 305 _bfd_error_handler (_("%pB: unsupported relocation type %#x"), 306 abfd, r_type); 307 bfd_set_error (bfd_error_bad_value); 308 return NULL; 309 } 310 i = r_type; 311 } 312 else 313 i = r_type - (unsigned int) R_X86_64_vt_offset; 314 BFD_ASSERT (x86_64_elf_howto_table[i].type == r_type); 315 return &x86_64_elf_howto_table[i]; 316 } 317 318 /* Given a BFD reloc type, return a HOWTO structure. */ 319 static reloc_howto_type * 320 elf_x86_64_reloc_type_lookup (bfd *abfd, 321 bfd_reloc_code_real_type code) 322 { 323 unsigned int i; 324 325 for (i = 0; i < sizeof (x86_64_reloc_map) / sizeof (struct elf_reloc_map); 326 i++) 327 { 328 if (x86_64_reloc_map[i].bfd_reloc_val == code) 329 return elf_x86_64_rtype_to_howto (abfd, 330 x86_64_reloc_map[i].elf_reloc_val); 331 } 332 return NULL; 333 } 334 335 static reloc_howto_type * 336 elf_x86_64_reloc_name_lookup (bfd *abfd, 337 const char *r_name) 338 { 339 unsigned int i; 340 341 if (!ABI_64_P (abfd) && strcasecmp (r_name, "R_X86_64_32") == 0) 342 { 343 /* Get x32 R_X86_64_32. */ 344 reloc_howto_type *reloc 345 = &x86_64_elf_howto_table[ARRAY_SIZE (x86_64_elf_howto_table) - 1]; 346 BFD_ASSERT (reloc->type == (unsigned int) R_X86_64_32); 347 return reloc; 348 } 349 350 for (i = 0; i < ARRAY_SIZE (x86_64_elf_howto_table); i++) 351 if (x86_64_elf_howto_table[i].name != NULL 352 && strcasecmp (x86_64_elf_howto_table[i].name, r_name) == 0) 353 return &x86_64_elf_howto_table[i]; 354 355 return NULL; 356 } 357 358 /* Given an x86_64 ELF reloc type, fill in an arelent structure. */ 359 360 static bool 361 elf_x86_64_info_to_howto (bfd *abfd, arelent *cache_ptr, 362 Elf_Internal_Rela *dst) 363 { 364 unsigned r_type; 365 366 r_type = ELF32_R_TYPE (dst->r_info); 367 cache_ptr->howto = elf_x86_64_rtype_to_howto (abfd, r_type); 368 if (cache_ptr->howto == NULL) 369 return false; 370 BFD_ASSERT (r_type == cache_ptr->howto->type || cache_ptr->howto->type == R_X86_64_NONE); 371 return true; 372 } 373 374 /* Support for core dump NOTE sections. */ 375 static bool 376 elf_x86_64_grok_prstatus (bfd *abfd, Elf_Internal_Note *note) 377 { 378 int offset; 379 size_t size; 380 381 switch (note->descsz) 382 { 383 default: 384 return false; 385 386 case 296: /* sizeof(istruct elf_prstatus) on Linux/x32 */ 387 /* pr_cursig */ 388 elf_tdata (abfd)->core->signal = bfd_get_16 (abfd, note->descdata + 12); 389 390 /* pr_pid */ 391 elf_tdata (abfd)->core->lwpid = bfd_get_32 (abfd, note->descdata + 24); 392 393 /* pr_reg */ 394 offset = 72; 395 size = 216; 396 397 break; 398 399 case 336: /* sizeof(istruct elf_prstatus) on Linux/x86_64 */ 400 /* pr_cursig */ 401 elf_tdata (abfd)->core->signal 402 = bfd_get_16 (abfd, note->descdata + 12); 403 404 /* pr_pid */ 405 elf_tdata (abfd)->core->lwpid 406 = bfd_get_32 (abfd, note->descdata + 32); 407 408 /* pr_reg */ 409 offset = 112; 410 size = 216; 411 412 break; 413 } 414 415 /* Make a ".reg/999" section. */ 416 return _bfd_elfcore_make_pseudosection (abfd, ".reg", 417 size, note->descpos + offset); 418 } 419 420 static bool 421 elf_x86_64_grok_psinfo (bfd *abfd, Elf_Internal_Note *note) 422 { 423 switch (note->descsz) 424 { 425 default: 426 return false; 427 428 case 124: 429 /* sizeof (struct elf_external_linux_prpsinfo32_ugid16). */ 430 elf_tdata (abfd)->core->pid 431 = bfd_get_32 (abfd, note->descdata + 12); 432 elf_tdata (abfd)->core->program 433 = _bfd_elfcore_strndup (abfd, note->descdata + 28, 16); 434 elf_tdata (abfd)->core->command 435 = _bfd_elfcore_strndup (abfd, note->descdata + 44, 80); 436 break; 437 438 case 128: 439 /* sizeof (struct elf_external_linux_prpsinfo32_ugid32). */ 440 elf_tdata (abfd)->core->pid 441 = bfd_get_32 (abfd, note->descdata + 12); 442 elf_tdata (abfd)->core->program 443 = _bfd_elfcore_strndup (abfd, note->descdata + 32, 16); 444 elf_tdata (abfd)->core->command 445 = _bfd_elfcore_strndup (abfd, note->descdata + 48, 80); 446 break; 447 448 case 136: 449 /* sizeof (struct elf_prpsinfo) on Linux/x86_64. */ 450 elf_tdata (abfd)->core->pid 451 = bfd_get_32 (abfd, note->descdata + 24); 452 elf_tdata (abfd)->core->program 453 = _bfd_elfcore_strndup (abfd, note->descdata + 40, 16); 454 elf_tdata (abfd)->core->command 455 = _bfd_elfcore_strndup (abfd, note->descdata + 56, 80); 456 } 457 458 /* Note that for some reason, a spurious space is tacked 459 onto the end of the args in some (at least one anyway) 460 implementations, so strip it off if it exists. */ 461 462 { 463 char *command = elf_tdata (abfd)->core->command; 464 int n = strlen (command); 465 466 if (0 < n && command[n - 1] == ' ') 467 command[n - 1] = '\0'; 468 } 469 470 return true; 471 } 472 473 #ifdef CORE_HEADER 474 # if GCC_VERSION >= 8000 475 # pragma GCC diagnostic push 476 # pragma GCC diagnostic ignored "-Wstringop-truncation" 477 # endif 478 static char * 479 elf_x86_64_write_core_note (bfd *abfd, char *buf, int *bufsiz, 480 int note_type, ...) 481 { 482 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 483 va_list ap; 484 const char *fname, *psargs; 485 long pid; 486 int cursig; 487 const void *gregs; 488 489 switch (note_type) 490 { 491 default: 492 return NULL; 493 494 case NT_PRPSINFO: 495 va_start (ap, note_type); 496 fname = va_arg (ap, const char *); 497 psargs = va_arg (ap, const char *); 498 va_end (ap); 499 500 if (bed->s->elfclass == ELFCLASS32) 501 { 502 prpsinfo32_t data; 503 memset (&data, 0, sizeof (data)); 504 strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); 505 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); 506 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 507 &data, sizeof (data)); 508 } 509 else 510 { 511 prpsinfo64_t data; 512 memset (&data, 0, sizeof (data)); 513 strncpy (data.pr_fname, fname, sizeof (data.pr_fname)); 514 strncpy (data.pr_psargs, psargs, sizeof (data.pr_psargs)); 515 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 516 &data, sizeof (data)); 517 } 518 /* NOTREACHED */ 519 520 case NT_PRSTATUS: 521 va_start (ap, note_type); 522 pid = va_arg (ap, long); 523 cursig = va_arg (ap, int); 524 gregs = va_arg (ap, const void *); 525 va_end (ap); 526 527 if (bed->s->elfclass == ELFCLASS32) 528 { 529 if (bed->elf_machine_code == EM_X86_64) 530 { 531 prstatusx32_t prstat; 532 memset (&prstat, 0, sizeof (prstat)); 533 prstat.pr_pid = pid; 534 prstat.pr_cursig = cursig; 535 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 536 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 537 &prstat, sizeof (prstat)); 538 } 539 else 540 { 541 prstatus32_t prstat; 542 memset (&prstat, 0, sizeof (prstat)); 543 prstat.pr_pid = pid; 544 prstat.pr_cursig = cursig; 545 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 546 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 547 &prstat, sizeof (prstat)); 548 } 549 } 550 else 551 { 552 prstatus64_t prstat; 553 memset (&prstat, 0, sizeof (prstat)); 554 prstat.pr_pid = pid; 555 prstat.pr_cursig = cursig; 556 memcpy (&prstat.pr_reg, gregs, sizeof (prstat.pr_reg)); 557 return elfcore_write_note (abfd, buf, bufsiz, "CORE", note_type, 558 &prstat, sizeof (prstat)); 559 } 560 } 561 /* NOTREACHED */ 562 } 563 # if GCC_VERSION >= 8000 564 # pragma GCC diagnostic pop 565 # endif 566 #endif 567 568 /* Functions for the x86-64 ELF linker. */ 569 570 /* The size in bytes of an entry in the global offset table. */ 571 572 #define GOT_ENTRY_SIZE 8 573 574 /* The size in bytes of an entry in the lazy procedure linkage table. */ 575 576 #define LAZY_PLT_ENTRY_SIZE 16 577 578 /* The size in bytes of an entry in the non-lazy procedure linkage 579 table. */ 580 581 #define NON_LAZY_PLT_ENTRY_SIZE 8 582 583 /* The first entry in a lazy procedure linkage table looks like this. 584 See the SVR4 ABI i386 supplement and the x86-64 ABI to see how this 585 works. */ 586 587 static const bfd_byte elf_x86_64_lazy_plt0_entry[LAZY_PLT_ENTRY_SIZE] = 588 { 589 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 590 0xff, 0x25, 16, 0, 0, 0, /* jmpq *GOT+16(%rip) */ 591 0x0f, 0x1f, 0x40, 0x00 /* nopl 0(%rax) */ 592 }; 593 594 /* Subsequent entries in a lazy procedure linkage table look like this. */ 595 596 static const bfd_byte elf_x86_64_lazy_plt_entry[LAZY_PLT_ENTRY_SIZE] = 597 { 598 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 599 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 600 0x68, /* pushq immediate */ 601 0, 0, 0, 0, /* replaced with index into relocation table. */ 602 0xe9, /* jmp relative */ 603 0, 0, 0, 0 /* replaced with offset to start of .plt0. */ 604 }; 605 606 /* The first entry in a lazy procedure linkage table with BND prefix 607 like this. */ 608 609 static const bfd_byte elf_x86_64_lazy_bnd_plt0_entry[LAZY_PLT_ENTRY_SIZE] = 610 { 611 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 612 0xf2, 0xff, 0x25, 16, 0, 0, 0, /* bnd jmpq *GOT+16(%rip) */ 613 0x0f, 0x1f, 0 /* nopl (%rax) */ 614 }; 615 616 /* Subsequent entries for branches with BND prefx in a lazy procedure 617 linkage table look like this. */ 618 619 static const bfd_byte elf_x86_64_lazy_bnd_plt_entry[LAZY_PLT_ENTRY_SIZE] = 620 { 621 0x68, 0, 0, 0, 0, /* pushq immediate */ 622 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ 623 0x0f, 0x1f, 0x44, 0, 0 /* nopl 0(%rax,%rax,1) */ 624 }; 625 626 /* The first entry in the IBT-enabled lazy procedure linkage table is the 627 the same as the lazy PLT with BND prefix so that bound registers are 628 preserved when control is passed to dynamic linker. Subsequent 629 entries for a IBT-enabled lazy procedure linkage table look like 630 this. */ 631 632 static const bfd_byte elf_x86_64_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 633 { 634 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 635 0x68, 0, 0, 0, 0, /* pushq immediate */ 636 0xf2, 0xe9, 0, 0, 0, 0, /* bnd jmpq relative */ 637 0x90 /* nop */ 638 }; 639 640 /* The first entry in the x32 IBT-enabled lazy procedure linkage table 641 is the same as the normal lazy PLT. Subsequent entries for an 642 x32 IBT-enabled lazy procedure linkage table look like this. */ 643 644 static const bfd_byte elf_x32_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 645 { 646 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 647 0x68, 0, 0, 0, 0, /* pushq immediate */ 648 0xe9, 0, 0, 0, 0, /* jmpq relative */ 649 0x66, 0x90 /* xchg %ax,%ax */ 650 }; 651 652 /* Entries in the non-lazey procedure linkage table look like this. */ 653 654 static const bfd_byte elf_x86_64_non_lazy_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = 655 { 656 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 657 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 658 0x66, 0x90 /* xchg %ax,%ax */ 659 }; 660 661 /* Entries for branches with BND prefix in the non-lazey procedure 662 linkage table look like this. */ 663 664 static const bfd_byte elf_x86_64_non_lazy_bnd_plt_entry[NON_LAZY_PLT_ENTRY_SIZE] = 665 { 666 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ 667 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 668 0x90 /* nop */ 669 }; 670 671 /* Entries for branches with IBT-enabled in the non-lazey procedure 672 linkage table look like this. They have the same size as the lazy 673 PLT entry. */ 674 675 static const bfd_byte elf_x86_64_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 676 { 677 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 678 0xf2, 0xff, 0x25, /* bnd jmpq *name@GOTPC(%rip) */ 679 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 680 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopl 0x0(%rax,%rax,1) */ 681 }; 682 683 /* Entries for branches with IBT-enabled in the x32 non-lazey procedure 684 linkage table look like this. They have the same size as the lazy 685 PLT entry. */ 686 687 static const bfd_byte elf_x32_non_lazy_ibt_plt_entry[LAZY_PLT_ENTRY_SIZE] = 688 { 689 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 690 0xff, 0x25, /* jmpq *name@GOTPC(%rip) */ 691 0, 0, 0, 0, /* replaced with offset to this symbol in .got. */ 692 0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00 /* nopw 0x0(%rax,%rax,1) */ 693 }; 694 695 /* The TLSDESC entry in a lazy procedure linkage table. */ 696 static const bfd_byte elf_x86_64_tlsdesc_plt_entry[LAZY_PLT_ENTRY_SIZE] = 697 { 698 0xf3, 0x0f, 0x1e, 0xfa, /* endbr64 */ 699 0xff, 0x35, 8, 0, 0, 0, /* pushq GOT+8(%rip) */ 700 0xff, 0x25, 16, 0, 0, 0 /* jmpq *GOT+TDG(%rip) */ 701 }; 702 703 /* .eh_frame covering the lazy .plt section. */ 704 705 static const bfd_byte elf_x86_64_eh_frame_lazy_plt[] = 706 { 707 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 708 0, 0, 0, 0, /* CIE ID */ 709 1, /* CIE version */ 710 'z', 'R', 0, /* Augmentation string */ 711 1, /* Code alignment factor */ 712 0x78, /* Data alignment factor */ 713 16, /* Return address column */ 714 1, /* Augmentation size */ 715 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 716 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 717 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 718 DW_CFA_nop, DW_CFA_nop, 719 720 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 721 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 722 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 723 0, 0, 0, 0, /* .plt size goes here */ 724 0, /* Augmentation size */ 725 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 726 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 727 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 728 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 729 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 730 11, /* Block length */ 731 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 732 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 733 DW_OP_lit15, DW_OP_and, DW_OP_lit11, DW_OP_ge, 734 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 735 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 736 }; 737 738 /* .eh_frame covering the lazy BND .plt section. */ 739 740 static const bfd_byte elf_x86_64_eh_frame_lazy_bnd_plt[] = 741 { 742 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 743 0, 0, 0, 0, /* CIE ID */ 744 1, /* CIE version */ 745 'z', 'R', 0, /* Augmentation string */ 746 1, /* Code alignment factor */ 747 0x78, /* Data alignment factor */ 748 16, /* Return address column */ 749 1, /* Augmentation size */ 750 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 751 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 752 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 753 DW_CFA_nop, DW_CFA_nop, 754 755 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 756 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 757 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 758 0, 0, 0, 0, /* .plt size goes here */ 759 0, /* Augmentation size */ 760 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 761 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 762 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 763 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 764 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 765 11, /* Block length */ 766 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 767 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 768 DW_OP_lit15, DW_OP_and, DW_OP_lit5, DW_OP_ge, 769 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 770 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 771 }; 772 773 /* .eh_frame covering the lazy .plt section with IBT-enabled. */ 774 775 static const bfd_byte elf_x86_64_eh_frame_lazy_ibt_plt[] = 776 { 777 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 778 0, 0, 0, 0, /* CIE ID */ 779 1, /* CIE version */ 780 'z', 'R', 0, /* Augmentation string */ 781 1, /* Code alignment factor */ 782 0x78, /* Data alignment factor */ 783 16, /* Return address column */ 784 1, /* Augmentation size */ 785 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 786 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 787 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 788 DW_CFA_nop, DW_CFA_nop, 789 790 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 791 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 792 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 793 0, 0, 0, 0, /* .plt size goes here */ 794 0, /* Augmentation size */ 795 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 796 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 797 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 798 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 799 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 800 11, /* Block length */ 801 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 802 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 803 DW_OP_lit15, DW_OP_and, DW_OP_lit10, DW_OP_ge, 804 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 805 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 806 }; 807 808 /* .eh_frame covering the x32 lazy .plt section with IBT-enabled. */ 809 810 static const bfd_byte elf_x32_eh_frame_lazy_ibt_plt[] = 811 { 812 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 813 0, 0, 0, 0, /* CIE ID */ 814 1, /* CIE version */ 815 'z', 'R', 0, /* Augmentation string */ 816 1, /* Code alignment factor */ 817 0x78, /* Data alignment factor */ 818 16, /* Return address column */ 819 1, /* Augmentation size */ 820 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 821 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 822 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 823 DW_CFA_nop, DW_CFA_nop, 824 825 PLT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 826 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 827 0, 0, 0, 0, /* R_X86_64_PC32 .plt goes here */ 828 0, 0, 0, 0, /* .plt size goes here */ 829 0, /* Augmentation size */ 830 DW_CFA_def_cfa_offset, 16, /* DW_CFA_def_cfa_offset: 16 */ 831 DW_CFA_advance_loc + 6, /* DW_CFA_advance_loc: 6 to __PLT__+6 */ 832 DW_CFA_def_cfa_offset, 24, /* DW_CFA_def_cfa_offset: 24 */ 833 DW_CFA_advance_loc + 10, /* DW_CFA_advance_loc: 10 to __PLT__+16 */ 834 DW_CFA_def_cfa_expression, /* DW_CFA_def_cfa_expression */ 835 11, /* Block length */ 836 DW_OP_breg7, 8, /* DW_OP_breg7 (rsp): 8 */ 837 DW_OP_breg16, 0, /* DW_OP_breg16 (rip): 0 */ 838 DW_OP_lit15, DW_OP_and, DW_OP_lit9, DW_OP_ge, 839 DW_OP_lit3, DW_OP_shl, DW_OP_plus, 840 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 841 }; 842 843 /* .eh_frame covering the non-lazy .plt section. */ 844 845 static const bfd_byte elf_x86_64_eh_frame_non_lazy_plt[] = 846 { 847 #define PLT_GOT_FDE_LENGTH 20 848 PLT_CIE_LENGTH, 0, 0, 0, /* CIE length */ 849 0, 0, 0, 0, /* CIE ID */ 850 1, /* CIE version */ 851 'z', 'R', 0, /* Augmentation string */ 852 1, /* Code alignment factor */ 853 0x78, /* Data alignment factor */ 854 16, /* Return address column */ 855 1, /* Augmentation size */ 856 DW_EH_PE_pcrel | DW_EH_PE_sdata4, /* FDE encoding */ 857 DW_CFA_def_cfa, 7, 8, /* DW_CFA_def_cfa: r7 (rsp) ofs 8 */ 858 DW_CFA_offset + 16, 1, /* DW_CFA_offset: r16 (rip) at cfa-8 */ 859 DW_CFA_nop, DW_CFA_nop, 860 861 PLT_GOT_FDE_LENGTH, 0, 0, 0, /* FDE length */ 862 PLT_CIE_LENGTH + 8, 0, 0, 0, /* CIE pointer */ 863 0, 0, 0, 0, /* the start of non-lazy .plt goes here */ 864 0, 0, 0, 0, /* non-lazy .plt size goes here */ 865 0, /* Augmentation size */ 866 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, DW_CFA_nop, 867 DW_CFA_nop, DW_CFA_nop, DW_CFA_nop 868 }; 869 870 static const sframe_frame_row_entry elf_x86_64_sframe_null_fre = 871 { 872 0, 873 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */ 874 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */ 875 }; 876 877 /* .sframe FRE covering the .plt section entry. */ 878 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre1 = 879 { 880 0, /* SFrame FRE start address. */ 881 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */ 882 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */ 883 }; 884 885 /* .sframe FRE covering the .plt section entry. */ 886 static const sframe_frame_row_entry elf_x86_64_sframe_plt0_fre2 = 887 { 888 6, /* SFrame FRE start address. */ 889 {24, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */ 890 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */ 891 }; 892 893 /* .sframe FRE covering the .plt section entry. */ 894 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre1 = 895 { 896 0, /* SFrame FRE start address. */ 897 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */ 898 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */ 899 }; 900 901 /* .sframe FRE covering the .plt section entry. */ 902 static const sframe_frame_row_entry elf_x86_64_sframe_pltn_fre2 = 903 { 904 11, /* SFrame FRE start address. */ 905 {16, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */ 906 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */ 907 }; 908 909 /* .sframe FRE covering the second .plt section entry. */ 910 static const sframe_frame_row_entry elf_x86_64_sframe_sec_pltn_fre1 = 911 { 912 0, /* SFrame FRE start address. */ 913 {8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, /* 12 bytes. */ 914 SFRAME_V1_FRE_INFO (SFRAME_BASE_REG_SP, 1, SFRAME_FRE_OFFSET_1B) /* FRE info. */ 915 }; 916 917 /* SFrame helper object for non-lazy PLT. Also used for IBT enabled PLT. */ 918 static const struct elf_x86_sframe_plt elf_x86_64_sframe_non_lazy_plt = 919 { 920 LAZY_PLT_ENTRY_SIZE, 921 2, /* Number of FREs for PLT0. */ 922 /* Array of SFrame FREs for plt0. */ 923 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 }, 924 LAZY_PLT_ENTRY_SIZE, 925 1, /* Number of FREs for PLTn. */ 926 /* Array of SFrame FREs for plt. */ 927 { &elf_x86_64_sframe_sec_pltn_fre1, &elf_x86_64_sframe_null_fre }, 928 0, 929 0, /* There is no second PLT necessary. */ 930 { &elf_x86_64_sframe_null_fre } 931 }; 932 933 /* SFrame helper object for lazy PLT. Also used for IBT enabled PLT. */ 934 static const struct elf_x86_sframe_plt elf_x86_64_sframe_plt = 935 { 936 LAZY_PLT_ENTRY_SIZE, 937 2, /* Number of FREs for PLT0. */ 938 /* Array of SFrame FREs for plt0. */ 939 { &elf_x86_64_sframe_plt0_fre1, &elf_x86_64_sframe_plt0_fre2 }, 940 LAZY_PLT_ENTRY_SIZE, 941 2, /* Number of FREs for PLTn. */ 942 /* Array of SFrame FREs for plt. */ 943 { &elf_x86_64_sframe_pltn_fre1, &elf_x86_64_sframe_pltn_fre2 }, 944 NON_LAZY_PLT_ENTRY_SIZE, 945 1, /* Number of FREs for PLTn for second PLT. */ 946 /* FREs for second plt (stack trace info for .plt.got is 947 identical). Used when IBT or non-lazy PLT is in effect. */ 948 { &elf_x86_64_sframe_sec_pltn_fre1 } 949 }; 950 951 /* These are the standard parameters. */ 952 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_plt = 953 { 954 elf_x86_64_lazy_plt0_entry, /* plt0_entry */ 955 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 956 elf_x86_64_lazy_plt_entry, /* plt_entry */ 957 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 958 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 959 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 960 6, /* plt_tlsdesc_got1_offset */ 961 12, /* plt_tlsdesc_got2_offset */ 962 10, /* plt_tlsdesc_got1_insn_end */ 963 16, /* plt_tlsdesc_got2_insn_end */ 964 2, /* plt0_got1_offset */ 965 8, /* plt0_got2_offset */ 966 12, /* plt0_got2_insn_end */ 967 2, /* plt_got_offset */ 968 7, /* plt_reloc_offset */ 969 12, /* plt_plt_offset */ 970 6, /* plt_got_insn_size */ 971 LAZY_PLT_ENTRY_SIZE, /* plt_plt_insn_end */ 972 6, /* plt_lazy_offset */ 973 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ 974 elf_x86_64_lazy_plt_entry, /* pic_plt_entry */ 975 elf_x86_64_eh_frame_lazy_plt, /* eh_frame_plt */ 976 sizeof (elf_x86_64_eh_frame_lazy_plt) /* eh_frame_plt_size */ 977 }; 978 979 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_plt = 980 { 981 elf_x86_64_non_lazy_plt_entry, /* plt_entry */ 982 elf_x86_64_non_lazy_plt_entry, /* pic_plt_entry */ 983 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 984 2, /* plt_got_offset */ 985 6, /* plt_got_insn_size */ 986 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 987 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 988 }; 989 990 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_bnd_plt = 991 { 992 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ 993 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 994 elf_x86_64_lazy_bnd_plt_entry, /* plt_entry */ 995 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 996 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 997 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 998 6, /* plt_tlsdesc_got1_offset */ 999 12, /* plt_tlsdesc_got2_offset */ 1000 10, /* plt_tlsdesc_got1_insn_end */ 1001 16, /* plt_tlsdesc_got2_insn_end */ 1002 2, /* plt0_got1_offset */ 1003 1+8, /* plt0_got2_offset */ 1004 1+12, /* plt0_got2_insn_end */ 1005 1+2, /* plt_got_offset */ 1006 1, /* plt_reloc_offset */ 1007 7, /* plt_plt_offset */ 1008 1+6, /* plt_got_insn_size */ 1009 11, /* plt_plt_insn_end */ 1010 0, /* plt_lazy_offset */ 1011 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ 1012 elf_x86_64_lazy_bnd_plt_entry, /* pic_plt_entry */ 1013 elf_x86_64_eh_frame_lazy_bnd_plt, /* eh_frame_plt */ 1014 sizeof (elf_x86_64_eh_frame_lazy_bnd_plt) /* eh_frame_plt_size */ 1015 }; 1016 1017 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_bnd_plt = 1018 { 1019 elf_x86_64_non_lazy_bnd_plt_entry, /* plt_entry */ 1020 elf_x86_64_non_lazy_bnd_plt_entry, /* pic_plt_entry */ 1021 NON_LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 1022 1+2, /* plt_got_offset */ 1023 1+6, /* plt_got_insn_size */ 1024 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 1025 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 1026 }; 1027 1028 static const struct elf_x86_lazy_plt_layout elf_x86_64_lazy_ibt_plt = 1029 { 1030 elf_x86_64_lazy_bnd_plt0_entry, /* plt0_entry */ 1031 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 1032 elf_x86_64_lazy_ibt_plt_entry, /* plt_entry */ 1033 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 1034 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 1035 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 1036 6, /* plt_tlsdesc_got1_offset */ 1037 12, /* plt_tlsdesc_got2_offset */ 1038 10, /* plt_tlsdesc_got1_insn_end */ 1039 16, /* plt_tlsdesc_got2_insn_end */ 1040 2, /* plt0_got1_offset */ 1041 1+8, /* plt0_got2_offset */ 1042 1+12, /* plt0_got2_insn_end */ 1043 4+1+2, /* plt_got_offset */ 1044 4+1, /* plt_reloc_offset */ 1045 4+1+6, /* plt_plt_offset */ 1046 4+1+6, /* plt_got_insn_size */ 1047 4+1+5+5, /* plt_plt_insn_end */ 1048 0, /* plt_lazy_offset */ 1049 elf_x86_64_lazy_bnd_plt0_entry, /* pic_plt0_entry */ 1050 elf_x86_64_lazy_ibt_plt_entry, /* pic_plt_entry */ 1051 elf_x86_64_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ 1052 sizeof (elf_x86_64_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ 1053 }; 1054 1055 static const struct elf_x86_lazy_plt_layout elf_x32_lazy_ibt_plt = 1056 { 1057 elf_x86_64_lazy_plt0_entry, /* plt0_entry */ 1058 LAZY_PLT_ENTRY_SIZE, /* plt0_entry_size */ 1059 elf_x32_lazy_ibt_plt_entry, /* plt_entry */ 1060 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 1061 elf_x86_64_tlsdesc_plt_entry, /* plt_tlsdesc_entry */ 1062 LAZY_PLT_ENTRY_SIZE, /* plt_tlsdesc_entry_size */ 1063 6, /* plt_tlsdesc_got1_offset */ 1064 12, /* plt_tlsdesc_got2_offset */ 1065 10, /* plt_tlsdesc_got1_insn_end */ 1066 16, /* plt_tlsdesc_got2_insn_end */ 1067 2, /* plt0_got1_offset */ 1068 8, /* plt0_got2_offset */ 1069 12, /* plt0_got2_insn_end */ 1070 4+2, /* plt_got_offset */ 1071 4+1, /* plt_reloc_offset */ 1072 4+6, /* plt_plt_offset */ 1073 4+6, /* plt_got_insn_size */ 1074 4+5+5, /* plt_plt_insn_end */ 1075 0, /* plt_lazy_offset */ 1076 elf_x86_64_lazy_plt0_entry, /* pic_plt0_entry */ 1077 elf_x32_lazy_ibt_plt_entry, /* pic_plt_entry */ 1078 elf_x32_eh_frame_lazy_ibt_plt, /* eh_frame_plt */ 1079 sizeof (elf_x32_eh_frame_lazy_ibt_plt) /* eh_frame_plt_size */ 1080 }; 1081 1082 static const struct elf_x86_non_lazy_plt_layout elf_x86_64_non_lazy_ibt_plt = 1083 { 1084 elf_x86_64_non_lazy_ibt_plt_entry, /* plt_entry */ 1085 elf_x86_64_non_lazy_ibt_plt_entry, /* pic_plt_entry */ 1086 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 1087 4+1+2, /* plt_got_offset */ 1088 4+1+6, /* plt_got_insn_size */ 1089 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 1090 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 1091 }; 1092 1093 static const struct elf_x86_non_lazy_plt_layout elf_x32_non_lazy_ibt_plt = 1094 { 1095 elf_x32_non_lazy_ibt_plt_entry, /* plt_entry */ 1096 elf_x32_non_lazy_ibt_plt_entry, /* pic_plt_entry */ 1097 LAZY_PLT_ENTRY_SIZE, /* plt_entry_size */ 1098 4+2, /* plt_got_offset */ 1099 4+6, /* plt_got_insn_size */ 1100 elf_x86_64_eh_frame_non_lazy_plt, /* eh_frame_plt */ 1101 sizeof (elf_x86_64_eh_frame_non_lazy_plt) /* eh_frame_plt_size */ 1102 }; 1103 1104 static bool 1105 elf64_x86_64_elf_object_p (bfd *abfd) 1106 { 1107 /* Set the right machine number for an x86-64 elf64 file. */ 1108 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x86_64); 1109 return true; 1110 } 1111 1112 static bool 1113 elf32_x86_64_elf_object_p (bfd *abfd) 1114 { 1115 /* Set the right machine number for an x86-64 elf32 file. */ 1116 bfd_default_set_arch_mach (abfd, bfd_arch_i386, bfd_mach_x64_32); 1117 return true; 1118 } 1119 1120 /* Return TRUE if the TLS access code sequence support transition 1121 from R_TYPE. */ 1122 1123 static bool 1124 elf_x86_64_check_tls_transition (bfd *abfd, 1125 struct bfd_link_info *info, 1126 asection *sec, 1127 bfd_byte *contents, 1128 Elf_Internal_Shdr *symtab_hdr, 1129 struct elf_link_hash_entry **sym_hashes, 1130 unsigned int r_type, 1131 const Elf_Internal_Rela *rel, 1132 const Elf_Internal_Rela *relend) 1133 { 1134 unsigned int val; 1135 unsigned long r_symndx; 1136 bool largepic = false; 1137 struct elf_link_hash_entry *h; 1138 bfd_vma offset; 1139 struct elf_x86_link_hash_table *htab; 1140 bfd_byte *call; 1141 bool indirect_call; 1142 1143 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 1144 offset = rel->r_offset; 1145 switch (r_type) 1146 { 1147 case R_X86_64_TLSGD: 1148 case R_X86_64_TLSLD: 1149 if ((rel + 1) >= relend) 1150 return false; 1151 1152 if (r_type == R_X86_64_TLSGD) 1153 { 1154 /* Check transition from GD access model. For 64bit, only 1155 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 1156 .word 0x6666; rex64; call __tls_get_addr@PLT 1157 or 1158 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 1159 .byte 0x66; rex64 1160 call *__tls_get_addr@GOTPCREL(%rip) 1161 which may be converted to 1162 addr32 call __tls_get_addr 1163 can transit to different access model. For 32bit, only 1164 leaq foo@tlsgd(%rip), %rdi 1165 .word 0x6666; rex64; call __tls_get_addr@PLT 1166 or 1167 leaq foo@tlsgd(%rip), %rdi 1168 .byte 0x66; rex64 1169 call *__tls_get_addr@GOTPCREL(%rip) 1170 which may be converted to 1171 addr32 call __tls_get_addr 1172 can transit to different access model. For largepic, 1173 we also support: 1174 leaq foo@tlsgd(%rip), %rdi 1175 movabsq $__tls_get_addr@pltoff, %rax 1176 addq $r15, %rax 1177 call *%rax 1178 or 1179 leaq foo@tlsgd(%rip), %rdi 1180 movabsq $__tls_get_addr@pltoff, %rax 1181 addq $rbx, %rax 1182 call *%rax */ 1183 1184 static const unsigned char leaq[] = { 0x66, 0x48, 0x8d, 0x3d }; 1185 1186 if ((offset + 12) > sec->size) 1187 return false; 1188 1189 call = contents + offset + 4; 1190 if (call[0] != 0x66 1191 || !((call[1] == 0x48 1192 && call[2] == 0xff 1193 && call[3] == 0x15) 1194 || (call[1] == 0x48 1195 && call[2] == 0x67 1196 && call[3] == 0xe8) 1197 || (call[1] == 0x66 1198 && call[2] == 0x48 1199 && call[3] == 0xe8))) 1200 { 1201 if (!ABI_64_P (abfd) 1202 || (offset + 19) > sec->size 1203 || offset < 3 1204 || memcmp (call - 7, leaq + 1, 3) != 0 1205 || memcmp (call, "\x48\xb8", 2) != 0 1206 || call[11] != 0x01 1207 || call[13] != 0xff 1208 || call[14] != 0xd0 1209 || !((call[10] == 0x48 && call[12] == 0xd8) 1210 || (call[10] == 0x4c && call[12] == 0xf8))) 1211 return false; 1212 largepic = true; 1213 } 1214 else if (ABI_64_P (abfd)) 1215 { 1216 if (offset < 4 1217 || memcmp (contents + offset - 4, leaq, 4) != 0) 1218 return false; 1219 } 1220 else 1221 { 1222 if (offset < 3 1223 || memcmp (contents + offset - 3, leaq + 1, 3) != 0) 1224 return false; 1225 } 1226 indirect_call = call[2] == 0xff; 1227 } 1228 else 1229 { 1230 /* Check transition from LD access model. Only 1231 leaq foo@tlsld(%rip), %rdi; 1232 call __tls_get_addr@PLT 1233 or 1234 leaq foo@tlsld(%rip), %rdi; 1235 call *__tls_get_addr@GOTPCREL(%rip) 1236 which may be converted to 1237 addr32 call __tls_get_addr 1238 can transit to different access model. For largepic 1239 we also support: 1240 leaq foo@tlsld(%rip), %rdi 1241 movabsq $__tls_get_addr@pltoff, %rax 1242 addq $r15, %rax 1243 call *%rax 1244 or 1245 leaq foo@tlsld(%rip), %rdi 1246 movabsq $__tls_get_addr@pltoff, %rax 1247 addq $rbx, %rax 1248 call *%rax */ 1249 1250 static const unsigned char lea[] = { 0x48, 0x8d, 0x3d }; 1251 1252 if (offset < 3 || (offset + 9) > sec->size) 1253 return false; 1254 1255 if (memcmp (contents + offset - 3, lea, 3) != 0) 1256 return false; 1257 1258 call = contents + offset + 4; 1259 if (!(call[0] == 0xe8 1260 || (call[0] == 0xff && call[1] == 0x15) 1261 || (call[0] == 0x67 && call[1] == 0xe8))) 1262 { 1263 if (!ABI_64_P (abfd) 1264 || (offset + 19) > sec->size 1265 || memcmp (call, "\x48\xb8", 2) != 0 1266 || call[11] != 0x01 1267 || call[13] != 0xff 1268 || call[14] != 0xd0 1269 || !((call[10] == 0x48 && call[12] == 0xd8) 1270 || (call[10] == 0x4c && call[12] == 0xf8))) 1271 return false; 1272 largepic = true; 1273 } 1274 indirect_call = call[0] == 0xff; 1275 } 1276 1277 r_symndx = htab->r_sym (rel[1].r_info); 1278 if (r_symndx < symtab_hdr->sh_info) 1279 return false; 1280 1281 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 1282 if (h == NULL 1283 || !((struct elf_x86_link_hash_entry *) h)->tls_get_addr) 1284 return false; 1285 else 1286 { 1287 r_type = (ELF32_R_TYPE (rel[1].r_info) 1288 & ~R_X86_64_converted_reloc_bit); 1289 if (largepic) 1290 return r_type == R_X86_64_PLTOFF64; 1291 else if (indirect_call) 1292 return (r_type == R_X86_64_GOTPCRELX || r_type == R_X86_64_GOTPCREL); 1293 else 1294 return (r_type == R_X86_64_PC32 || r_type == R_X86_64_PLT32); 1295 } 1296 1297 case R_X86_64_CODE_4_GOTTPOFF: 1298 /* Check transition from IE access model: 1299 mov foo@gottpoff(%rip), %reg 1300 add foo@gottpoff(%rip), %reg 1301 where reg is one of r16 to r31. */ 1302 1303 if (offset < 4 1304 || (offset + 4) > sec->size 1305 || contents[offset - 4] != 0xd5) 1306 return false; 1307 1308 goto check_gottpoff; 1309 1310 case R_X86_64_CODE_6_GOTTPOFF: 1311 /* Check transition from IE access model: 1312 add %reg1, foo@gottpoff(%rip), %reg2 1313 where reg1/reg2 are one of r16 to r31. */ 1314 1315 if (offset < 6 1316 || (offset + 4) > sec->size 1317 || contents[offset - 6] != 0x62) 1318 return false; 1319 1320 val = bfd_get_8 (abfd, contents + offset - 2); 1321 if (val != 0x01 && val != 0x03) 1322 return false; 1323 1324 val = bfd_get_8 (abfd, contents + offset - 1); 1325 return (val & 0xc7) == 5; 1326 1327 case R_X86_64_GOTTPOFF: 1328 /* Check transition from IE access model: 1329 mov foo@gottpoff(%rip), %reg 1330 add foo@gottpoff(%rip), %reg 1331 */ 1332 1333 /* Check REX prefix first. */ 1334 if (offset >= 3 && (offset + 4) <= sec->size) 1335 { 1336 val = bfd_get_8 (abfd, contents + offset - 3); 1337 if (val != 0x48 && val != 0x4c) 1338 { 1339 /* X32 may have 0x44 REX prefix or no REX prefix. */ 1340 if (ABI_64_P (abfd)) 1341 return false; 1342 } 1343 } 1344 else 1345 { 1346 /* X32 may not have any REX prefix. */ 1347 if (ABI_64_P (abfd)) 1348 return false; 1349 if (offset < 2 || (offset + 3) > sec->size) 1350 return false; 1351 } 1352 1353 check_gottpoff: 1354 val = bfd_get_8 (abfd, contents + offset - 2); 1355 if (val != 0x8b && val != 0x03) 1356 return false; 1357 1358 val = bfd_get_8 (abfd, contents + offset - 1); 1359 return (val & 0xc7) == 5; 1360 1361 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 1362 /* Check transition from GDesc access model: 1363 lea x@tlsdesc(%rip), %reg 1364 where reg is one of r16 to r31. */ 1365 1366 if (offset < 4 1367 || (offset + 4) > sec->size 1368 || contents[offset - 4] != 0xd5) 1369 return false; 1370 1371 goto check_tlsdesc; 1372 1373 case R_X86_64_GOTPC32_TLSDESC: 1374 /* Check transition from GDesc access model: 1375 leaq x@tlsdesc(%rip), %rax <--- LP64 mode. 1376 rex leal x@tlsdesc(%rip), %eax <--- X32 mode. 1377 1378 Make sure it's a leaq adding rip to a 32-bit offset 1379 into any register, although it's probably almost always 1380 going to be rax. */ 1381 1382 if (offset < 3 || (offset + 4) > sec->size) 1383 return false; 1384 1385 val = bfd_get_8 (abfd, contents + offset - 3); 1386 val &= 0xfb; 1387 if (val != 0x48 && (ABI_64_P (abfd) || val != 0x40)) 1388 return false; 1389 1390 check_tlsdesc: 1391 if (bfd_get_8 (abfd, contents + offset - 2) != 0x8d) 1392 return false; 1393 1394 val = bfd_get_8 (abfd, contents + offset - 1); 1395 return (val & 0xc7) == 0x05; 1396 1397 case R_X86_64_TLSDESC_CALL: 1398 /* Check transition from GDesc access model: 1399 call *x@tlsdesc(%rax) <--- LP64 mode. 1400 call *x@tlsdesc(%eax) <--- X32 mode. 1401 */ 1402 if (offset + 2 <= sec->size) 1403 { 1404 unsigned int prefix; 1405 call = contents + offset; 1406 prefix = 0; 1407 if (!ABI_64_P (abfd)) 1408 { 1409 /* Check for call *x@tlsdesc(%eax). */ 1410 if (call[0] == 0x67) 1411 { 1412 prefix = 1; 1413 if (offset + 3 > sec->size) 1414 return false; 1415 } 1416 } 1417 /* Make sure that it's a call *x@tlsdesc(%rax). */ 1418 return call[prefix] == 0xff && call[1 + prefix] == 0x10; 1419 } 1420 1421 return false; 1422 1423 default: 1424 abort (); 1425 } 1426 } 1427 1428 /* Return TRUE if the TLS access transition is OK or no transition 1429 will be performed. Update R_TYPE if there is a transition. */ 1430 1431 static bool 1432 elf_x86_64_tls_transition (struct bfd_link_info *info, bfd *abfd, 1433 asection *sec, bfd_byte *contents, 1434 Elf_Internal_Shdr *symtab_hdr, 1435 struct elf_link_hash_entry **sym_hashes, 1436 unsigned int *r_type, int tls_type, 1437 const Elf_Internal_Rela *rel, 1438 const Elf_Internal_Rela *relend, 1439 struct elf_link_hash_entry *h, 1440 unsigned long r_symndx, 1441 bool from_relocate_section) 1442 { 1443 unsigned int from_type = *r_type; 1444 unsigned int to_type = from_type; 1445 bool check = true; 1446 1447 /* Skip TLS transition for functions. */ 1448 if (h != NULL 1449 && (h->type == STT_FUNC 1450 || h->type == STT_GNU_IFUNC)) 1451 return true; 1452 1453 switch (from_type) 1454 { 1455 case R_X86_64_TLSGD: 1456 case R_X86_64_GOTPC32_TLSDESC: 1457 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 1458 case R_X86_64_TLSDESC_CALL: 1459 case R_X86_64_GOTTPOFF: 1460 case R_X86_64_CODE_4_GOTTPOFF: 1461 case R_X86_64_CODE_6_GOTTPOFF: 1462 if (bfd_link_executable (info)) 1463 { 1464 if (h == NULL) 1465 to_type = R_X86_64_TPOFF32; 1466 else 1467 to_type = R_X86_64_GOTTPOFF; 1468 } 1469 1470 /* When we are called from elf_x86_64_relocate_section, there may 1471 be additional transitions based on TLS_TYPE. */ 1472 if (from_relocate_section) 1473 { 1474 unsigned int new_to_type = to_type; 1475 1476 if (TLS_TRANSITION_IE_TO_LE_P (info, h, tls_type)) 1477 new_to_type = R_X86_64_TPOFF32; 1478 1479 if (to_type == R_X86_64_TLSGD 1480 || to_type == R_X86_64_GOTPC32_TLSDESC 1481 || to_type == R_X86_64_CODE_4_GOTPC32_TLSDESC 1482 || to_type == R_X86_64_TLSDESC_CALL) 1483 { 1484 if (tls_type == GOT_TLS_IE) 1485 new_to_type = R_X86_64_GOTTPOFF; 1486 } 1487 1488 /* We checked the transition before when we were called from 1489 elf_x86_64_scan_relocs. We only want to check the new 1490 transition which hasn't been checked before. */ 1491 check = new_to_type != to_type && from_type == to_type; 1492 to_type = new_to_type; 1493 } 1494 1495 break; 1496 1497 case R_X86_64_TLSLD: 1498 if (bfd_link_executable (info)) 1499 to_type = R_X86_64_TPOFF32; 1500 break; 1501 1502 default: 1503 return true; 1504 } 1505 1506 /* Return TRUE if there is no transition. */ 1507 if (from_type == to_type 1508 || (from_type == R_X86_64_CODE_4_GOTTPOFF 1509 && to_type == R_X86_64_GOTTPOFF) 1510 || (from_type == R_X86_64_CODE_6_GOTTPOFF 1511 && to_type == R_X86_64_GOTTPOFF)) 1512 return true; 1513 1514 /* Check if the transition can be performed. */ 1515 if (check 1516 && ! elf_x86_64_check_tls_transition (abfd, info, sec, contents, 1517 symtab_hdr, sym_hashes, 1518 from_type, rel, relend)) 1519 { 1520 reloc_howto_type *from, *to; 1521 const char *name; 1522 1523 from = elf_x86_64_rtype_to_howto (abfd, from_type); 1524 to = elf_x86_64_rtype_to_howto (abfd, to_type); 1525 1526 if (from == NULL || to == NULL) 1527 return false; 1528 1529 if (h) 1530 name = h->root.root.string; 1531 else 1532 { 1533 struct elf_x86_link_hash_table *htab; 1534 1535 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 1536 if (htab == NULL) 1537 name = "*unknown*"; 1538 else 1539 { 1540 Elf_Internal_Sym *isym; 1541 1542 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache, 1543 abfd, r_symndx); 1544 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, NULL); 1545 } 1546 } 1547 1548 _bfd_error_handler 1549 /* xgettext:c-format */ 1550 (_("%pB: TLS transition from %s to %s against `%s' at %#" PRIx64 1551 " in section `%pA' failed"), 1552 abfd, from->name, to->name, name, (uint64_t) rel->r_offset, sec); 1553 bfd_set_error (bfd_error_bad_value); 1554 return false; 1555 } 1556 1557 *r_type = to_type; 1558 return true; 1559 } 1560 1561 static bool 1562 elf_x86_64_need_pic (struct bfd_link_info *info, 1563 bfd *input_bfd, asection *sec, 1564 struct elf_link_hash_entry *h, 1565 Elf_Internal_Shdr *symtab_hdr, 1566 Elf_Internal_Sym *isym, 1567 reloc_howto_type *howto) 1568 { 1569 const char *v = ""; 1570 const char *und = ""; 1571 const char *pic = ""; 1572 const char *object; 1573 1574 const char *name; 1575 if (h) 1576 { 1577 name = h->root.root.string; 1578 switch (ELF_ST_VISIBILITY (h->other)) 1579 { 1580 case STV_HIDDEN: 1581 v = _("hidden symbol "); 1582 break; 1583 case STV_INTERNAL: 1584 v = _("internal symbol "); 1585 break; 1586 case STV_PROTECTED: 1587 v = _("protected symbol "); 1588 break; 1589 default: 1590 if (((struct elf_x86_link_hash_entry *) h)->def_protected) 1591 v = _("protected symbol "); 1592 else 1593 v = _("symbol "); 1594 pic = NULL; 1595 break; 1596 } 1597 1598 if (!SYMBOL_DEFINED_NON_SHARED_P (h) && !h->def_dynamic) 1599 und = _("undefined "); 1600 } 1601 else 1602 { 1603 name = bfd_elf_sym_name (input_bfd, symtab_hdr, isym, NULL); 1604 pic = NULL; 1605 } 1606 1607 if (bfd_link_dll (info)) 1608 { 1609 object = _("a shared object"); 1610 if (!pic) 1611 pic = _("; recompile with -fPIC"); 1612 } 1613 else 1614 { 1615 if (bfd_link_pie (info)) 1616 object = _("a PIE object"); 1617 else 1618 object = _("a PDE object"); 1619 if (!pic) 1620 pic = _("; recompile with -fPIE"); 1621 } 1622 1623 /* xgettext:c-format */ 1624 _bfd_error_handler (_("%pB: relocation %s against %s%s`%s' can " 1625 "not be used when making %s%s"), 1626 input_bfd, howto->name, und, v, name, 1627 object, pic); 1628 bfd_set_error (bfd_error_bad_value); 1629 sec->check_relocs_failed = 1; 1630 return false; 1631 } 1632 1633 /* With the local symbol, foo, we convert 1634 mov foo@GOTPCREL(%rip), %reg 1635 to 1636 lea foo(%rip), %reg 1637 and convert 1638 call/jmp *foo@GOTPCREL(%rip) 1639 to 1640 nop call foo/jmp foo nop 1641 When PIC is false, convert 1642 test %reg, foo@GOTPCREL(%rip) 1643 to 1644 test $foo, %reg 1645 and convert 1646 binop foo@GOTPCREL(%rip), %reg 1647 to 1648 binop $foo, %reg 1649 where binop is one of adc, add, and, cmp, or, sbb, sub, xor 1650 instructions. */ 1651 1652 static bool 1653 elf_x86_64_convert_load_reloc (bfd *abfd, 1654 bfd_byte *contents, 1655 unsigned int *r_type_p, 1656 Elf_Internal_Rela *irel, 1657 struct elf_link_hash_entry *h, 1658 bool *converted, 1659 struct bfd_link_info *link_info) 1660 { 1661 struct elf_x86_link_hash_table *htab; 1662 bool is_pic; 1663 bool no_overflow; 1664 bool relocx; 1665 bool to_reloc_pc32; 1666 bool abs_symbol; 1667 bool local_ref; 1668 asection *tsec; 1669 bfd_signed_vma raddend; 1670 unsigned int opcode; 1671 unsigned int modrm; 1672 unsigned int r_type = *r_type_p; 1673 unsigned int r_symndx; 1674 bfd_vma roff = irel->r_offset; 1675 bfd_vma abs_relocation; 1676 1677 if (roff < (r_type == R_X86_64_CODE_4_GOTPCRELX 1678 ? 4 : (r_type == R_X86_64_REX_GOTPCRELX ? 3 : 2))) 1679 return true; 1680 1681 raddend = irel->r_addend; 1682 /* Addend for 32-bit PC-relative relocation must be -4. */ 1683 if (raddend != -4) 1684 return true; 1685 1686 htab = elf_x86_hash_table (link_info, X86_64_ELF_DATA); 1687 is_pic = bfd_link_pic (link_info); 1688 1689 if (r_type == R_X86_64_CODE_4_GOTPCRELX) 1690 { 1691 /* Skip if this isn't a REX2 instruction. */ 1692 opcode = bfd_get_8 (abfd, contents + roff - 4); 1693 if (opcode != 0xd5) 1694 return true; 1695 1696 relocx = true; 1697 } 1698 else 1699 relocx = (r_type == R_X86_64_GOTPCRELX 1700 || r_type == R_X86_64_REX_GOTPCRELX); 1701 1702 /* TRUE if --no-relax is used. */ 1703 no_overflow = link_info->disable_target_specific_optimizations > 1; 1704 1705 r_symndx = htab->r_sym (irel->r_info); 1706 1707 opcode = bfd_get_8 (abfd, contents + roff - 2); 1708 1709 /* Convert mov to lea since it has been done for a while. */ 1710 if (opcode != 0x8b) 1711 { 1712 /* Only convert R_X86_64_GOTPCRELX, R_X86_64_REX_GOTPCRELX 1713 and R_X86_64_CODE_4_GOTPCRELX for call, jmp or one of adc, 1714 add, and, cmp, or, sbb, sub, test, xor instructions. */ 1715 if (!relocx) 1716 return true; 1717 } 1718 1719 /* We convert only to R_X86_64_PC32: 1720 1. Branch. 1721 2. R_X86_64_GOTPCREL since we can't modify REX byte. 1722 3. no_overflow is true. 1723 4. PIC. 1724 */ 1725 to_reloc_pc32 = (opcode == 0xff 1726 || !relocx 1727 || no_overflow 1728 || is_pic); 1729 1730 abs_symbol = false; 1731 abs_relocation = 0; 1732 1733 /* Get the symbol referred to by the reloc. */ 1734 if (h == NULL) 1735 { 1736 Elf_Internal_Sym *isym 1737 = bfd_sym_from_r_symndx (&htab->elf.sym_cache, abfd, r_symndx); 1738 1739 /* Skip relocation against undefined symbols. */ 1740 if (isym->st_shndx == SHN_UNDEF) 1741 return true; 1742 1743 local_ref = true; 1744 if (isym->st_shndx == SHN_ABS) 1745 { 1746 tsec = bfd_abs_section_ptr; 1747 abs_symbol = true; 1748 abs_relocation = isym->st_value; 1749 } 1750 else if (isym->st_shndx == SHN_COMMON) 1751 tsec = bfd_com_section_ptr; 1752 else if (isym->st_shndx == SHN_X86_64_LCOMMON) 1753 tsec = &_bfd_elf_large_com_section; 1754 else 1755 tsec = bfd_section_from_elf_index (abfd, isym->st_shndx); 1756 } 1757 else 1758 { 1759 /* Undefined weak symbol is only bound locally in executable 1760 and its reference is resolved as 0 without relocation 1761 overflow. We can only perform this optimization for 1762 GOTPCRELX relocations since we need to modify REX byte. 1763 It is OK convert mov with R_X86_64_GOTPCREL to 1764 R_X86_64_PC32. */ 1765 struct elf_x86_link_hash_entry *eh = elf_x86_hash_entry (h); 1766 1767 abs_symbol = ABS_SYMBOL_P (h); 1768 abs_relocation = h->root.u.def.value; 1769 1770 /* NB: Also set linker_def via SYMBOL_REFERENCES_LOCAL_P. */ 1771 local_ref = SYMBOL_REFERENCES_LOCAL_P (link_info, h); 1772 if ((relocx || opcode == 0x8b) 1773 && (h->root.type == bfd_link_hash_undefweak 1774 && !eh->linker_def 1775 && local_ref)) 1776 { 1777 if (opcode == 0xff) 1778 { 1779 /* Skip for branch instructions since R_X86_64_PC32 1780 may overflow. */ 1781 if (no_overflow) 1782 return true; 1783 } 1784 else if (relocx) 1785 { 1786 /* For non-branch instructions, we can convert to 1787 R_X86_64_32/R_X86_64_32S since we know if there 1788 is a REX byte. */ 1789 to_reloc_pc32 = false; 1790 } 1791 1792 /* Since we don't know the current PC when PIC is true, 1793 we can't convert to R_X86_64_PC32. */ 1794 if (to_reloc_pc32 && is_pic) 1795 return true; 1796 1797 goto convert; 1798 } 1799 /* Avoid optimizing GOTPCREL relocations againt _DYNAMIC since 1800 ld.so may use its link-time address. */ 1801 else if (h->start_stop 1802 || eh->linker_def 1803 || ((h->def_regular 1804 || h->root.type == bfd_link_hash_defined 1805 || h->root.type == bfd_link_hash_defweak) 1806 && h != htab->elf.hdynamic 1807 && local_ref)) 1808 { 1809 /* bfd_link_hash_new or bfd_link_hash_undefined is 1810 set by an assignment in a linker script in 1811 bfd_elf_record_link_assignment. start_stop is set 1812 on __start_SECNAME/__stop_SECNAME which mark section 1813 SECNAME. */ 1814 if (h->start_stop 1815 || eh->linker_def 1816 || (h->def_regular 1817 && (h->root.type == bfd_link_hash_new 1818 || h->root.type == bfd_link_hash_undefined 1819 || ((h->root.type == bfd_link_hash_defined 1820 || h->root.type == bfd_link_hash_defweak) 1821 && h->root.u.def.section == bfd_und_section_ptr)))) 1822 { 1823 /* Skip since R_X86_64_32/R_X86_64_32S may overflow. */ 1824 if (no_overflow) 1825 return true; 1826 goto convert; 1827 } 1828 tsec = h->root.u.def.section; 1829 } 1830 else 1831 return true; 1832 } 1833 1834 /* Don't convert GOTPCREL relocation against large section. */ 1835 if (elf_section_data (tsec) != NULL 1836 && (elf_section_flags (tsec) & SHF_X86_64_LARGE) != 0) 1837 return true; 1838 1839 /* Skip since R_X86_64_PC32/R_X86_64_32/R_X86_64_32S may overflow. */ 1840 if (no_overflow) 1841 return true; 1842 1843 convert: 1844 if (opcode == 0xff) 1845 { 1846 /* We have "call/jmp *foo@GOTPCREL(%rip)". */ 1847 unsigned int nop; 1848 unsigned int disp; 1849 bfd_vma nop_offset; 1850 1851 /* Convert R_X86_64_GOTPCRELX and R_X86_64_REX_GOTPCRELX to 1852 R_X86_64_PC32. */ 1853 modrm = bfd_get_8 (abfd, contents + roff - 1); 1854 if (modrm == 0x25) 1855 { 1856 /* Convert to "jmp foo nop". */ 1857 modrm = 0xe9; 1858 nop = NOP_OPCODE; 1859 nop_offset = irel->r_offset + 3; 1860 disp = bfd_get_32 (abfd, contents + irel->r_offset); 1861 irel->r_offset -= 1; 1862 bfd_put_32 (abfd, disp, contents + irel->r_offset); 1863 } 1864 else 1865 { 1866 struct elf_x86_link_hash_entry *eh 1867 = (struct elf_x86_link_hash_entry *) h; 1868 1869 /* Convert to "nop call foo". ADDR_PREFIX_OPCODE 1870 is a nop prefix. */ 1871 modrm = 0xe8; 1872 /* To support TLS optimization, always use addr32 prefix for 1873 "call *__tls_get_addr@GOTPCREL(%rip)". */ 1874 if (eh && eh->tls_get_addr) 1875 { 1876 nop = 0x67; 1877 nop_offset = irel->r_offset - 2; 1878 } 1879 else 1880 { 1881 nop = htab->params->call_nop_byte; 1882 if (htab->params->call_nop_as_suffix) 1883 { 1884 nop_offset = irel->r_offset + 3; 1885 disp = bfd_get_32 (abfd, contents + irel->r_offset); 1886 irel->r_offset -= 1; 1887 bfd_put_32 (abfd, disp, contents + irel->r_offset); 1888 } 1889 else 1890 nop_offset = irel->r_offset - 2; 1891 } 1892 } 1893 bfd_put_8 (abfd, nop, contents + nop_offset); 1894 bfd_put_8 (abfd, modrm, contents + irel->r_offset - 1); 1895 r_type = R_X86_64_PC32; 1896 } 1897 else 1898 { 1899 unsigned int rex = 0; 1900 unsigned int rex_mask = REX_R; 1901 unsigned int rex2 = 0; 1902 unsigned int rex2_mask = REX_R | REX_R << 4; 1903 bool rex_w = false; 1904 1905 if (r_type == R_X86_64_CODE_4_GOTPCRELX) 1906 { 1907 rex2 = bfd_get_8 (abfd, contents + roff - 3); 1908 rex_w = (rex2 & REX_W) != 0; 1909 } 1910 else if (r_type == R_X86_64_REX_GOTPCRELX) 1911 { 1912 rex = bfd_get_8 (abfd, contents + roff - 3); 1913 rex_w = (rex & REX_W) != 0; 1914 } 1915 1916 if (opcode == 0x8b) 1917 { 1918 if (abs_symbol && local_ref && relocx) 1919 to_reloc_pc32 = false; 1920 1921 if (to_reloc_pc32) 1922 { 1923 /* Convert "mov foo@GOTPCREL(%rip), %reg" to 1924 "lea foo(%rip), %reg". */ 1925 opcode = 0x8d; 1926 r_type = R_X86_64_PC32; 1927 } 1928 else 1929 { 1930 /* Convert "mov foo@GOTPCREL(%rip), %reg" to 1931 "mov $foo, %reg". */ 1932 opcode = 0xc7; 1933 modrm = bfd_get_8 (abfd, contents + roff - 1); 1934 modrm = 0xc0 | (modrm & 0x38) >> 3; 1935 if (rex_w && ABI_64_P (link_info->output_bfd)) 1936 { 1937 /* Keep the REX_W bit in REX byte for LP64. */ 1938 r_type = R_X86_64_32S; 1939 goto rewrite_modrm_rex; 1940 } 1941 else 1942 { 1943 /* If the REX_W bit in REX byte isn't needed, 1944 use R_X86_64_32 and clear the W bit to avoid 1945 sign-extend imm32 to imm64. */ 1946 r_type = R_X86_64_32; 1947 /* Clear the W bit in REX byte and REX2 payload. */ 1948 rex_mask |= REX_W; 1949 rex2_mask |= REX_W; 1950 goto rewrite_modrm_rex; 1951 } 1952 } 1953 } 1954 else 1955 { 1956 /* R_X86_64_PC32 isn't supported. */ 1957 if (to_reloc_pc32) 1958 return true; 1959 1960 modrm = bfd_get_8 (abfd, contents + roff - 1); 1961 if (opcode == 0x85) 1962 { 1963 /* Convert "test %reg, foo@GOTPCREL(%rip)" to 1964 "test $foo, %reg". */ 1965 modrm = 0xc0 | (modrm & 0x38) >> 3; 1966 opcode = 0xf7; 1967 } 1968 else 1969 { 1970 /* Convert "binop foo@GOTPCREL(%rip), %reg" to 1971 "binop $foo, %reg". */ 1972 modrm = 0xc0 | (modrm & 0x38) >> 3 | (opcode & 0x3c); 1973 opcode = 0x81; 1974 } 1975 1976 /* Use R_X86_64_32 with 32-bit operand to avoid relocation 1977 overflow when sign-extending imm32 to imm64. */ 1978 r_type = rex_w ? R_X86_64_32S : R_X86_64_32; 1979 1980 rewrite_modrm_rex: 1981 if (abs_relocation) 1982 { 1983 /* Check if R_X86_64_32S/R_X86_64_32 fits. */ 1984 if (r_type == R_X86_64_32S) 1985 { 1986 if ((abs_relocation + 0x80000000) > 0xffffffff) 1987 return true; 1988 } 1989 else 1990 { 1991 if (abs_relocation > 0xffffffff) 1992 return true; 1993 } 1994 } 1995 1996 bfd_put_8 (abfd, modrm, contents + roff - 1); 1997 1998 if (rex) 1999 { 2000 /* Move the R bit to the B bit in REX byte. */ 2001 rex = (rex & ~rex_mask) | (rex & REX_R) >> 2; 2002 bfd_put_8 (abfd, rex, contents + roff - 3); 2003 } 2004 else if (rex2) 2005 { 2006 /* Move the R bits to the B bits in REX2 payload byte. */ 2007 rex2 = ((rex2 & ~rex2_mask) 2008 | (rex2 & (REX_R | REX_R << 4)) >> 2); 2009 bfd_put_8 (abfd, rex2, contents + roff - 3); 2010 } 2011 2012 /* No addend for R_X86_64_32/R_X86_64_32S relocations. */ 2013 irel->r_addend = 0; 2014 } 2015 2016 bfd_put_8 (abfd, opcode, contents + roff - 2); 2017 } 2018 2019 *r_type_p = r_type; 2020 irel->r_info = htab->r_info (r_symndx, 2021 r_type | R_X86_64_converted_reloc_bit); 2022 2023 *converted = true; 2024 2025 return true; 2026 } 2027 2028 /* Look through the relocs for a section during the first phase, and 2029 calculate needed space in the global offset table, and procedure 2030 linkage table. */ 2031 2032 static bool 2033 elf_x86_64_scan_relocs (bfd *abfd, struct bfd_link_info *info, 2034 asection *sec, 2035 const Elf_Internal_Rela *relocs) 2036 { 2037 struct elf_x86_link_hash_table *htab; 2038 Elf_Internal_Shdr *symtab_hdr; 2039 struct elf_link_hash_entry **sym_hashes; 2040 const Elf_Internal_Rela *rel; 2041 const Elf_Internal_Rela *rel_end; 2042 bfd_byte *contents; 2043 bool converted; 2044 2045 if (bfd_link_relocatable (info)) 2046 return true; 2047 2048 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 2049 if (htab == NULL) 2050 { 2051 sec->check_relocs_failed = 1; 2052 return false; 2053 } 2054 2055 BFD_ASSERT (is_x86_elf (abfd, htab)); 2056 2057 /* Get the section contents. */ 2058 if (elf_section_data (sec)->this_hdr.contents != NULL) 2059 contents = elf_section_data (sec)->this_hdr.contents; 2060 else if (!_bfd_elf_mmap_section_contents (abfd, sec, &contents)) 2061 { 2062 sec->check_relocs_failed = 1; 2063 return false; 2064 } 2065 2066 symtab_hdr = &elf_symtab_hdr (abfd); 2067 sym_hashes = elf_sym_hashes (abfd); 2068 2069 converted = false; 2070 2071 rel_end = relocs + sec->reloc_count; 2072 for (rel = relocs; rel < rel_end; rel++) 2073 { 2074 unsigned int r_type; 2075 unsigned int r_symndx; 2076 struct elf_link_hash_entry *h; 2077 struct elf_x86_link_hash_entry *eh; 2078 Elf_Internal_Sym *isym; 2079 const char *name; 2080 bool size_reloc; 2081 bool converted_reloc; 2082 bool no_dynreloc; 2083 2084 r_symndx = htab->r_sym (rel->r_info); 2085 r_type = ELF32_R_TYPE (rel->r_info); 2086 2087 /* Don't check R_X86_64_NONE. */ 2088 if (r_type == R_X86_64_NONE) 2089 continue; 2090 2091 if (r_symndx >= NUM_SHDR_ENTRIES (symtab_hdr)) 2092 { 2093 /* xgettext:c-format */ 2094 _bfd_error_handler (_("%pB: bad symbol index: %d"), 2095 abfd, r_symndx); 2096 goto error_return; 2097 } 2098 2099 if (r_symndx < symtab_hdr->sh_info) 2100 { 2101 /* A local symbol. */ 2102 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache, 2103 abfd, r_symndx); 2104 if (isym == NULL) 2105 goto error_return; 2106 2107 /* Check relocation against local STT_GNU_IFUNC symbol. */ 2108 if (ELF_ST_TYPE (isym->st_info) == STT_GNU_IFUNC) 2109 { 2110 h = _bfd_elf_x86_get_local_sym_hash (htab, abfd, rel, 2111 true); 2112 if (h == NULL) 2113 goto error_return; 2114 2115 /* Fake a STT_GNU_IFUNC symbol. */ 2116 h->root.root.string = bfd_elf_sym_name (abfd, symtab_hdr, 2117 isym, NULL); 2118 h->type = STT_GNU_IFUNC; 2119 h->def_regular = 1; 2120 h->ref_regular = 1; 2121 h->forced_local = 1; 2122 h->root.type = bfd_link_hash_defined; 2123 } 2124 else 2125 h = NULL; 2126 } 2127 else 2128 { 2129 isym = NULL; 2130 h = sym_hashes[r_symndx - symtab_hdr->sh_info]; 2131 while (h->root.type == bfd_link_hash_indirect 2132 || h->root.type == bfd_link_hash_warning) 2133 h = (struct elf_link_hash_entry *) h->root.u.i.link; 2134 } 2135 2136 /* Check invalid x32 relocations. */ 2137 if (!ABI_64_P (abfd)) 2138 switch (r_type) 2139 { 2140 default: 2141 break; 2142 2143 case R_X86_64_DTPOFF64: 2144 case R_X86_64_TPOFF64: 2145 case R_X86_64_PC64: 2146 case R_X86_64_GOTOFF64: 2147 case R_X86_64_GOT64: 2148 case R_X86_64_GOTPCREL64: 2149 case R_X86_64_GOTPC64: 2150 case R_X86_64_GOTPLT64: 2151 case R_X86_64_PLTOFF64: 2152 { 2153 if (h) 2154 name = h->root.root.string; 2155 else 2156 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, 2157 NULL); 2158 _bfd_error_handler 2159 /* xgettext:c-format */ 2160 (_("%pB: relocation %s against symbol `%s' isn't " 2161 "supported in x32 mode"), abfd, 2162 x86_64_elf_howto_table[r_type].name, name); 2163 bfd_set_error (bfd_error_bad_value); 2164 goto error_return; 2165 } 2166 break; 2167 } 2168 2169 eh = (struct elf_x86_link_hash_entry *) h; 2170 2171 if (h != NULL) 2172 { 2173 /* It is referenced by a non-shared object. */ 2174 h->ref_regular = 1; 2175 } 2176 2177 converted_reloc = false; 2178 if ((r_type == R_X86_64_GOTPCREL 2179 || r_type == R_X86_64_GOTPCRELX 2180 || r_type == R_X86_64_REX_GOTPCRELX 2181 || r_type == R_X86_64_CODE_4_GOTPCRELX) 2182 && (h == NULL || h->type != STT_GNU_IFUNC)) 2183 { 2184 Elf_Internal_Rela *irel = (Elf_Internal_Rela *) rel; 2185 if (!elf_x86_64_convert_load_reloc (abfd, contents, &r_type, 2186 irel, h, &converted_reloc, 2187 info)) 2188 goto error_return; 2189 2190 if (converted_reloc) 2191 converted = true; 2192 } 2193 2194 if (!_bfd_elf_x86_valid_reloc_p (sec, info, htab, rel, h, isym, 2195 symtab_hdr, &no_dynreloc)) 2196 return false; 2197 2198 if (! elf_x86_64_tls_transition (info, abfd, sec, contents, 2199 symtab_hdr, sym_hashes, 2200 &r_type, GOT_UNKNOWN, 2201 rel, rel_end, h, r_symndx, false)) 2202 goto error_return; 2203 2204 /* Check if _GLOBAL_OFFSET_TABLE_ is referenced. */ 2205 if (h == htab->elf.hgot) 2206 htab->got_referenced = true; 2207 2208 switch (r_type) 2209 { 2210 case R_X86_64_TLSLD: 2211 htab->tls_ld_or_ldm_got.refcount = 1; 2212 goto create_got; 2213 2214 case R_X86_64_TPOFF32: 2215 if (!bfd_link_executable (info) && ABI_64_P (abfd)) 2216 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, 2217 &x86_64_elf_howto_table[r_type]); 2218 if (eh != NULL) 2219 eh->zero_undefweak &= 0x2; 2220 break; 2221 2222 case R_X86_64_GOTTPOFF: 2223 case R_X86_64_CODE_4_GOTTPOFF: 2224 case R_X86_64_CODE_6_GOTTPOFF: 2225 if (!bfd_link_executable (info)) 2226 info->flags |= DF_STATIC_TLS; 2227 /* Fall through */ 2228 2229 case R_X86_64_GOT32: 2230 case R_X86_64_GOTPCREL: 2231 case R_X86_64_GOTPCRELX: 2232 case R_X86_64_REX_GOTPCRELX: 2233 case R_X86_64_CODE_4_GOTPCRELX: 2234 case R_X86_64_TLSGD: 2235 case R_X86_64_GOT64: 2236 case R_X86_64_GOTPCREL64: 2237 case R_X86_64_GOTPLT64: 2238 case R_X86_64_GOTPC32_TLSDESC: 2239 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 2240 case R_X86_64_TLSDESC_CALL: 2241 /* This symbol requires a global offset table entry. */ 2242 { 2243 int tls_type, old_tls_type; 2244 2245 switch (r_type) 2246 { 2247 default: 2248 tls_type = GOT_NORMAL; 2249 if (h) 2250 { 2251 if (ABS_SYMBOL_P (h)) 2252 tls_type = GOT_ABS; 2253 } 2254 else if (isym->st_shndx == SHN_ABS) 2255 tls_type = GOT_ABS; 2256 break; 2257 case R_X86_64_TLSGD: 2258 tls_type = GOT_TLS_GD; 2259 break; 2260 case R_X86_64_GOTTPOFF: 2261 case R_X86_64_CODE_4_GOTTPOFF: 2262 case R_X86_64_CODE_6_GOTTPOFF: 2263 tls_type = GOT_TLS_IE; 2264 break; 2265 case R_X86_64_GOTPC32_TLSDESC: 2266 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 2267 case R_X86_64_TLSDESC_CALL: 2268 tls_type = GOT_TLS_GDESC; 2269 break; 2270 } 2271 2272 if (h != NULL) 2273 { 2274 h->got.refcount = 1; 2275 old_tls_type = eh->tls_type; 2276 } 2277 else 2278 { 2279 bfd_signed_vma *local_got_refcounts; 2280 2281 if (!elf_x86_allocate_local_got_info (abfd, 2282 symtab_hdr->sh_info)) 2283 goto error_return; 2284 2285 /* This is a global offset table entry for a local symbol. */ 2286 local_got_refcounts = elf_local_got_refcounts (abfd); 2287 local_got_refcounts[r_symndx] = 1; 2288 old_tls_type 2289 = elf_x86_local_got_tls_type (abfd) [r_symndx]; 2290 } 2291 2292 /* If a TLS symbol is accessed using IE at least once, 2293 there is no point to use dynamic model for it. */ 2294 if (old_tls_type != tls_type && old_tls_type != GOT_UNKNOWN 2295 && (! GOT_TLS_GD_ANY_P (old_tls_type) 2296 || tls_type != GOT_TLS_IE)) 2297 { 2298 if (old_tls_type == GOT_TLS_IE && GOT_TLS_GD_ANY_P (tls_type)) 2299 tls_type = old_tls_type; 2300 else if (GOT_TLS_GD_ANY_P (old_tls_type) 2301 && GOT_TLS_GD_ANY_P (tls_type)) 2302 tls_type |= old_tls_type; 2303 else 2304 { 2305 if (h) 2306 name = h->root.root.string; 2307 else 2308 name = bfd_elf_sym_name (abfd, symtab_hdr, 2309 isym, NULL); 2310 _bfd_error_handler 2311 /* xgettext:c-format */ 2312 (_("%pB: '%s' accessed both as normal and" 2313 " thread local symbol"), 2314 abfd, name); 2315 bfd_set_error (bfd_error_bad_value); 2316 goto error_return; 2317 } 2318 } 2319 2320 if (old_tls_type != tls_type) 2321 { 2322 if (eh != NULL) 2323 eh->tls_type = tls_type; 2324 else 2325 elf_x86_local_got_tls_type (abfd) [r_symndx] = tls_type; 2326 } 2327 } 2328 /* Fall through */ 2329 2330 case R_X86_64_GOTOFF64: 2331 case R_X86_64_GOTPC32: 2332 case R_X86_64_GOTPC64: 2333 create_got: 2334 if (eh != NULL) 2335 eh->zero_undefweak &= 0x2; 2336 break; 2337 2338 case R_X86_64_PLT32: 2339 /* This symbol requires a procedure linkage table entry. We 2340 actually build the entry in adjust_dynamic_symbol, 2341 because this might be a case of linking PIC code which is 2342 never referenced by a dynamic object, in which case we 2343 don't need to generate a procedure linkage table entry 2344 after all. */ 2345 2346 /* If this is a local symbol, we resolve it directly without 2347 creating a procedure linkage table entry. */ 2348 if (h == NULL) 2349 continue; 2350 2351 eh->zero_undefweak &= 0x2; 2352 h->needs_plt = 1; 2353 h->plt.refcount = 1; 2354 break; 2355 2356 case R_X86_64_PLTOFF64: 2357 /* This tries to form the 'address' of a function relative 2358 to GOT. For global symbols we need a PLT entry. */ 2359 if (h != NULL) 2360 { 2361 h->needs_plt = 1; 2362 h->plt.refcount = 1; 2363 } 2364 goto create_got; 2365 2366 case R_X86_64_SIZE32: 2367 case R_X86_64_SIZE64: 2368 size_reloc = true; 2369 goto do_size; 2370 2371 case R_X86_64_32: 2372 if (!ABI_64_P (abfd)) 2373 goto pointer; 2374 /* Fall through. */ 2375 case R_X86_64_8: 2376 case R_X86_64_16: 2377 case R_X86_64_32S: 2378 /* Check relocation overflow as these relocs may lead to 2379 run-time relocation overflow. Don't error out for 2380 sections we don't care about, such as debug sections or 2381 when relocation overflow check is disabled. */ 2382 if (!htab->params->no_reloc_overflow_check 2383 && !converted_reloc 2384 && (bfd_link_pic (info) 2385 || (bfd_link_executable (info) 2386 && h != NULL 2387 && !h->def_regular 2388 && h->def_dynamic 2389 && (sec->flags & SEC_READONLY) == 0))) 2390 return elf_x86_64_need_pic (info, abfd, sec, h, symtab_hdr, isym, 2391 &x86_64_elf_howto_table[r_type]); 2392 /* Fall through. */ 2393 2394 case R_X86_64_PC8: 2395 case R_X86_64_PC16: 2396 case R_X86_64_PC32: 2397 case R_X86_64_PC64: 2398 case R_X86_64_64: 2399 pointer: 2400 if (eh != NULL && (sec->flags & SEC_CODE) != 0) 2401 eh->zero_undefweak |= 0x2; 2402 /* We are called after all symbols have been resolved. Only 2403 relocation against STT_GNU_IFUNC symbol must go through 2404 PLT. */ 2405 if (h != NULL 2406 && (bfd_link_executable (info) 2407 || h->type == STT_GNU_IFUNC)) 2408 { 2409 bool func_pointer_ref = false; 2410 2411 if (r_type == R_X86_64_PC32) 2412 { 2413 /* Since something like ".long foo - ." may be used 2414 as pointer, make sure that PLT is used if foo is 2415 a function defined in a shared library. */ 2416 if ((sec->flags & SEC_CODE) == 0) 2417 { 2418 h->pointer_equality_needed = 1; 2419 if (bfd_link_pie (info) 2420 && h->type == STT_FUNC 2421 && !h->def_regular 2422 && h->def_dynamic) 2423 { 2424 h->needs_plt = 1; 2425 h->plt.refcount = 1; 2426 } 2427 } 2428 } 2429 else if (r_type != R_X86_64_PC64) 2430 { 2431 /* At run-time, R_X86_64_64 can be resolved for both 2432 x86-64 and x32. But R_X86_64_32 and R_X86_64_32S 2433 can only be resolved for x32. Function pointer 2434 reference doesn't need PLT for pointer equality. */ 2435 if ((sec->flags & SEC_READONLY) == 0 2436 && (r_type == R_X86_64_64 2437 || (!ABI_64_P (abfd) 2438 && (r_type == R_X86_64_32 2439 || r_type == R_X86_64_32S)))) 2440 func_pointer_ref = true; 2441 2442 /* IFUNC symbol needs pointer equality in PDE so that 2443 function pointer reference will be resolved to its 2444 PLT entry directly. */ 2445 if (!func_pointer_ref 2446 || (bfd_link_pde (info) 2447 && h->type == STT_GNU_IFUNC)) 2448 h->pointer_equality_needed = 1; 2449 } 2450 2451 if (!func_pointer_ref) 2452 { 2453 /* If this reloc is in a read-only section, we might 2454 need a copy reloc. We can't check reliably at this 2455 stage whether the section is read-only, as input 2456 sections have not yet been mapped to output sections. 2457 Tentatively set the flag for now, and correct in 2458 adjust_dynamic_symbol. */ 2459 h->non_got_ref = 1; 2460 2461 if (!elf_has_indirect_extern_access (sec->owner)) 2462 eh->non_got_ref_without_indirect_extern_access = 1; 2463 2464 /* We may need a .plt entry if the symbol is a function 2465 defined in a shared lib or is a function referenced 2466 from the code or read-only section. */ 2467 if (!h->def_regular 2468 || (sec->flags & (SEC_CODE | SEC_READONLY)) != 0) 2469 h->plt.refcount = 1; 2470 2471 if (htab->elf.target_os != is_solaris 2472 && h->pointer_equality_needed 2473 && h->type == STT_FUNC 2474 && eh->def_protected 2475 && !SYMBOL_DEFINED_NON_SHARED_P (h) 2476 && h->def_dynamic) 2477 { 2478 /* Disallow non-canonical reference to canonical 2479 protected function. */ 2480 _bfd_error_handler 2481 /* xgettext:c-format */ 2482 (_("%pB: non-canonical reference to canonical " 2483 "protected function `%s' in %pB"), 2484 abfd, h->root.root.string, 2485 h->root.u.def.section->owner); 2486 bfd_set_error (bfd_error_bad_value); 2487 goto error_return; 2488 } 2489 } 2490 } 2491 2492 size_reloc = false; 2493 do_size: 2494 if (!no_dynreloc 2495 && NEED_DYNAMIC_RELOCATION_P (true, info, true, h, sec, 2496 r_type, 2497 htab->pointer_r_type)) 2498 { 2499 struct elf_dyn_relocs *p; 2500 struct elf_dyn_relocs **head; 2501 2502 /* If this is a global symbol, we count the number of 2503 relocations we need for this symbol. */ 2504 if (h != NULL) 2505 head = &h->dyn_relocs; 2506 else 2507 { 2508 /* Track dynamic relocs needed for local syms too. 2509 We really need local syms available to do this 2510 easily. Oh well. */ 2511 asection *s; 2512 void **vpp; 2513 2514 isym = bfd_sym_from_r_symndx (&htab->elf.sym_cache, 2515 abfd, r_symndx); 2516 if (isym == NULL) 2517 goto error_return; 2518 2519 s = bfd_section_from_elf_index (abfd, isym->st_shndx); 2520 if (s == NULL) 2521 s = sec; 2522 2523 /* Beware of type punned pointers vs strict aliasing 2524 rules. */ 2525 vpp = &(elf_section_data (s)->local_dynrel); 2526 head = (struct elf_dyn_relocs **)vpp; 2527 } 2528 2529 p = *head; 2530 if (p == NULL || p->sec != sec) 2531 { 2532 size_t amt = sizeof *p; 2533 2534 p = ((struct elf_dyn_relocs *) 2535 bfd_alloc (htab->elf.dynobj, amt)); 2536 if (p == NULL) 2537 goto error_return; 2538 p->next = *head; 2539 *head = p; 2540 p->sec = sec; 2541 p->count = 0; 2542 p->pc_count = 0; 2543 } 2544 2545 p->count += 1; 2546 /* Count size relocation as PC-relative relocation. */ 2547 if (X86_PCREL_TYPE_P (true, r_type) || size_reloc) 2548 p->pc_count += 1; 2549 } 2550 break; 2551 2552 case R_X86_64_CODE_5_GOTPCRELX: 2553 case R_X86_64_CODE_5_GOTTPOFF: 2554 case R_X86_64_CODE_5_GOTPC32_TLSDESC: 2555 case R_X86_64_CODE_6_GOTPCRELX: 2556 case R_X86_64_CODE_6_GOTPC32_TLSDESC: 2557 { 2558 /* These relocations are added only for completeness and 2559 aren't be used. */ 2560 if (h) 2561 name = h->root.root.string; 2562 else 2563 name = bfd_elf_sym_name (abfd, symtab_hdr, isym, 2564 NULL); 2565 _bfd_error_handler 2566 /* xgettext:c-format */ 2567 (_("%pB: unsupported relocation %s against symbol `%s'"), 2568 abfd, x86_64_elf_howto_table[r_type].name, name); 2569 } 2570 break; 2571 2572 /* This relocation describes the C++ object vtable hierarchy. 2573 Reconstruct it for later use during GC. */ 2574 case R_X86_64_GNU_VTINHERIT: 2575 if (!bfd_elf_gc_record_vtinherit (abfd, sec, h, rel->r_offset)) 2576 goto error_return; 2577 break; 2578 2579 /* This relocation describes which C++ vtable entries are actually 2580 used. Record for later use during GC. */ 2581 case R_X86_64_GNU_VTENTRY: 2582 if (!bfd_elf_gc_record_vtentry (abfd, sec, h, rel->r_addend)) 2583 goto error_return; 2584 break; 2585 2586 default: 2587 break; 2588 } 2589 } 2590 2591 if (elf_section_data (sec)->this_hdr.contents != contents) 2592 { 2593 if (!converted) 2594 _bfd_elf_munmap_section_contents (sec, contents); 2595 else 2596 { 2597 /* Cache the section contents for elf_link_input_bfd if any 2598 load is converted or --no-keep-memory isn't used. */ 2599 elf_section_data (sec)->this_hdr.contents = contents; 2600 info->cache_size += sec->size; 2601 } 2602 } 2603 2604 /* Cache relocations if any load is converted. */ 2605 if (elf_section_data (sec)->relocs != relocs && converted) 2606 elf_section_data (sec)->relocs = (Elf_Internal_Rela *) relocs; 2607 2608 return true; 2609 2610 error_return: 2611 if (elf_section_data (sec)->this_hdr.contents != contents) 2612 _bfd_elf_munmap_section_contents (sec, contents); 2613 sec->check_relocs_failed = 1; 2614 return false; 2615 } 2616 2617 static bool 2618 elf_x86_64_early_size_sections (bfd *output_bfd, struct bfd_link_info *info) 2619 { 2620 bfd *abfd; 2621 2622 /* Scan relocations after rel_from_abs has been set on __ehdr_start. */ 2623 for (abfd = info->input_bfds; 2624 abfd != (bfd *) NULL; 2625 abfd = abfd->link.next) 2626 if (bfd_get_flavour (abfd) == bfd_target_elf_flavour 2627 && !_bfd_elf_link_iterate_on_relocs (abfd, info, 2628 elf_x86_64_scan_relocs)) 2629 return false; 2630 2631 return _bfd_x86_elf_early_size_sections (output_bfd, info); 2632 } 2633 2634 /* Return the relocation value for @tpoff relocation 2635 if STT_TLS virtual address is ADDRESS. */ 2636 2637 static bfd_vma 2638 elf_x86_64_tpoff (struct bfd_link_info *info, bfd_vma address) 2639 { 2640 struct elf_link_hash_table *htab = elf_hash_table (info); 2641 const struct elf_backend_data *bed = get_elf_backend_data (info->output_bfd); 2642 bfd_vma static_tls_size; 2643 2644 /* If tls_segment is NULL, we should have signalled an error already. */ 2645 if (htab->tls_sec == NULL) 2646 return 0; 2647 2648 /* Consider special static TLS alignment requirements. */ 2649 static_tls_size = BFD_ALIGN (htab->tls_size, bed->static_tls_alignment); 2650 return address - static_tls_size - htab->tls_sec->vma; 2651 } 2652 2653 /* Relocate an x86_64 ELF section. */ 2654 2655 static int 2656 elf_x86_64_relocate_section (bfd *output_bfd, 2657 struct bfd_link_info *info, 2658 bfd *input_bfd, 2659 asection *input_section, 2660 bfd_byte *contents, 2661 Elf_Internal_Rela *relocs, 2662 Elf_Internal_Sym *local_syms, 2663 asection **local_sections) 2664 { 2665 struct elf_x86_link_hash_table *htab; 2666 Elf_Internal_Shdr *symtab_hdr; 2667 struct elf_link_hash_entry **sym_hashes; 2668 bfd_vma *local_got_offsets; 2669 bfd_vma *local_tlsdesc_gotents; 2670 Elf_Internal_Rela *rel; 2671 Elf_Internal_Rela *wrel; 2672 Elf_Internal_Rela *relend; 2673 unsigned int plt_entry_size; 2674 bool status; 2675 2676 /* Skip if check_relocs or scan_relocs failed. */ 2677 if (input_section->check_relocs_failed) 2678 return false; 2679 2680 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 2681 if (htab == NULL) 2682 return false; 2683 2684 if (!is_x86_elf (input_bfd, htab)) 2685 { 2686 bfd_set_error (bfd_error_wrong_format); 2687 return false; 2688 } 2689 2690 plt_entry_size = htab->plt.plt_entry_size; 2691 symtab_hdr = &elf_symtab_hdr (input_bfd); 2692 sym_hashes = elf_sym_hashes (input_bfd); 2693 local_got_offsets = elf_local_got_offsets (input_bfd); 2694 local_tlsdesc_gotents = elf_x86_local_tlsdesc_gotent (input_bfd); 2695 2696 _bfd_x86_elf_set_tls_module_base (info); 2697 2698 status = true; 2699 rel = wrel = relocs; 2700 relend = relocs + input_section->reloc_count; 2701 for (; rel < relend; wrel++, rel++) 2702 { 2703 unsigned int r_type, r_type_tls; 2704 reloc_howto_type *howto; 2705 unsigned long r_symndx; 2706 struct elf_link_hash_entry *h; 2707 struct elf_x86_link_hash_entry *eh; 2708 Elf_Internal_Sym *sym; 2709 asection *sec; 2710 bfd_vma off, offplt, plt_offset; 2711 bfd_vma relocation; 2712 bool unresolved_reloc; 2713 bfd_reloc_status_type r; 2714 int tls_type; 2715 asection *base_got, *resolved_plt; 2716 bfd_vma st_size; 2717 bool resolved_to_zero; 2718 bool relative_reloc; 2719 bool converted_reloc; 2720 bool need_copy_reloc_in_pie; 2721 bool no_copyreloc_p; 2722 2723 r_type = ELF32_R_TYPE (rel->r_info); 2724 if (r_type == (int) R_X86_64_GNU_VTINHERIT 2725 || r_type == (int) R_X86_64_GNU_VTENTRY) 2726 { 2727 if (wrel != rel) 2728 *wrel = *rel; 2729 continue; 2730 } 2731 2732 r_symndx = htab->r_sym (rel->r_info); 2733 converted_reloc = (r_type & R_X86_64_converted_reloc_bit) != 0; 2734 if (converted_reloc) 2735 { 2736 r_type &= ~R_X86_64_converted_reloc_bit; 2737 rel->r_info = htab->r_info (r_symndx, r_type); 2738 } 2739 2740 howto = elf_x86_64_rtype_to_howto (input_bfd, r_type); 2741 if (howto == NULL) 2742 return _bfd_unrecognized_reloc (input_bfd, input_section, r_type); 2743 2744 h = NULL; 2745 sym = NULL; 2746 sec = NULL; 2747 unresolved_reloc = false; 2748 if (r_symndx < symtab_hdr->sh_info) 2749 { 2750 sym = local_syms + r_symndx; 2751 sec = local_sections[r_symndx]; 2752 2753 relocation = _bfd_elf_rela_local_sym (output_bfd, sym, 2754 &sec, rel); 2755 st_size = sym->st_size; 2756 2757 /* Relocate against local STT_GNU_IFUNC symbol. */ 2758 if (!bfd_link_relocatable (info) 2759 && ELF_ST_TYPE (sym->st_info) == STT_GNU_IFUNC) 2760 { 2761 h = _bfd_elf_x86_get_local_sym_hash (htab, input_bfd, 2762 rel, false); 2763 if (h == NULL) 2764 abort (); 2765 2766 /* Set STT_GNU_IFUNC symbol value. */ 2767 h->root.u.def.value = sym->st_value; 2768 h->root.u.def.section = sec; 2769 } 2770 } 2771 else 2772 { 2773 bool warned ATTRIBUTE_UNUSED; 2774 bool ignored ATTRIBUTE_UNUSED; 2775 2776 RELOC_FOR_GLOBAL_SYMBOL (info, input_bfd, input_section, rel, 2777 r_symndx, symtab_hdr, sym_hashes, 2778 h, sec, relocation, 2779 unresolved_reloc, warned, ignored); 2780 st_size = h->size; 2781 } 2782 2783 if (sec != NULL && discarded_section (sec)) 2784 { 2785 _bfd_clear_contents (howto, input_bfd, input_section, 2786 contents, rel->r_offset); 2787 wrel->r_offset = rel->r_offset; 2788 wrel->r_info = 0; 2789 wrel->r_addend = 0; 2790 2791 /* For ld -r, remove relocations in debug sections against 2792 sections defined in discarded sections. Not done for 2793 eh_frame editing code expects to be present. */ 2794 if (bfd_link_relocatable (info) 2795 && (input_section->flags & SEC_DEBUGGING)) 2796 wrel--; 2797 2798 continue; 2799 } 2800 2801 if (bfd_link_relocatable (info)) 2802 { 2803 if (wrel != rel) 2804 *wrel = *rel; 2805 continue; 2806 } 2807 2808 if (rel->r_addend == 0 && !ABI_64_P (output_bfd)) 2809 { 2810 if (r_type == R_X86_64_64) 2811 { 2812 /* For x32, treat R_X86_64_64 like R_X86_64_32 and 2813 zero-extend it to 64bit if addend is zero. */ 2814 r_type = R_X86_64_32; 2815 memset (contents + rel->r_offset + 4, 0, 4); 2816 } 2817 else if (r_type == R_X86_64_SIZE64) 2818 { 2819 /* For x32, treat R_X86_64_SIZE64 like R_X86_64_SIZE32 and 2820 zero-extend it to 64bit if addend is zero. */ 2821 r_type = R_X86_64_SIZE32; 2822 memset (contents + rel->r_offset + 4, 0, 4); 2823 } 2824 } 2825 2826 eh = (struct elf_x86_link_hash_entry *) h; 2827 2828 /* Since STT_GNU_IFUNC symbol must go through PLT, we handle 2829 it here if it is defined in a non-shared object. */ 2830 if (h != NULL 2831 && h->type == STT_GNU_IFUNC 2832 && h->def_regular) 2833 { 2834 bfd_vma plt_index; 2835 const char *name; 2836 2837 if ((input_section->flags & SEC_ALLOC) == 0) 2838 { 2839 /* If this is a SHT_NOTE section without SHF_ALLOC, treat 2840 STT_GNU_IFUNC symbol as STT_FUNC. */ 2841 if (elf_section_type (input_section) == SHT_NOTE) 2842 goto skip_ifunc; 2843 /* Dynamic relocs are not propagated for SEC_DEBUGGING 2844 sections because such sections are not SEC_ALLOC and 2845 thus ld.so will not process them. */ 2846 if ((input_section->flags & SEC_DEBUGGING) != 0) 2847 continue; 2848 abort (); 2849 } 2850 2851 switch (r_type) 2852 { 2853 default: 2854 break; 2855 2856 case R_X86_64_GOTPCREL: 2857 case R_X86_64_GOTPCRELX: 2858 case R_X86_64_REX_GOTPCRELX: 2859 case R_X86_64_CODE_4_GOTPCRELX: 2860 case R_X86_64_GOTPCREL64: 2861 base_got = htab->elf.sgot; 2862 off = h->got.offset; 2863 2864 if (base_got == NULL) 2865 abort (); 2866 2867 if (off == (bfd_vma) -1) 2868 { 2869 /* We can't use h->got.offset here to save state, or 2870 even just remember the offset, as finish_dynamic_symbol 2871 would use that as offset into .got. */ 2872 2873 if (h->plt.offset == (bfd_vma) -1) 2874 abort (); 2875 2876 if (htab->elf.splt != NULL) 2877 { 2878 plt_index = (h->plt.offset / plt_entry_size 2879 - htab->plt.has_plt0); 2880 off = (plt_index + 3) * GOT_ENTRY_SIZE; 2881 base_got = htab->elf.sgotplt; 2882 } 2883 else 2884 { 2885 plt_index = h->plt.offset / plt_entry_size; 2886 off = plt_index * GOT_ENTRY_SIZE; 2887 base_got = htab->elf.igotplt; 2888 } 2889 2890 if (h->dynindx == -1 2891 || h->forced_local 2892 || info->symbolic) 2893 { 2894 /* This references the local defitionion. We must 2895 initialize this entry in the global offset table. 2896 Since the offset must always be a multiple of 8, 2897 we use the least significant bit to record 2898 whether we have initialized it already. 2899 2900 When doing a dynamic link, we create a .rela.got 2901 relocation entry to initialize the value. This 2902 is done in the finish_dynamic_symbol routine. */ 2903 if ((off & 1) != 0) 2904 off &= ~1; 2905 else 2906 { 2907 bfd_put_64 (output_bfd, relocation, 2908 base_got->contents + off); 2909 /* Note that this is harmless for the GOTPLT64 2910 case, as -1 | 1 still is -1. */ 2911 h->got.offset |= 1; 2912 } 2913 } 2914 } 2915 2916 relocation = (base_got->output_section->vma 2917 + base_got->output_offset + off); 2918 2919 goto do_relocation; 2920 } 2921 2922 if (h->plt.offset == (bfd_vma) -1) 2923 { 2924 /* Handle static pointers of STT_GNU_IFUNC symbols. */ 2925 if (r_type == htab->pointer_r_type 2926 && (input_section->flags & SEC_CODE) == 0) 2927 goto do_ifunc_pointer; 2928 goto bad_ifunc_reloc; 2929 } 2930 2931 /* STT_GNU_IFUNC symbol must go through PLT. */ 2932 if (htab->elf.splt != NULL) 2933 { 2934 if (htab->plt_second != NULL) 2935 { 2936 resolved_plt = htab->plt_second; 2937 plt_offset = eh->plt_second.offset; 2938 } 2939 else 2940 { 2941 resolved_plt = htab->elf.splt; 2942 plt_offset = h->plt.offset; 2943 } 2944 } 2945 else 2946 { 2947 resolved_plt = htab->elf.iplt; 2948 plt_offset = h->plt.offset; 2949 } 2950 2951 relocation = (resolved_plt->output_section->vma 2952 + resolved_plt->output_offset + plt_offset); 2953 2954 switch (r_type) 2955 { 2956 default: 2957 bad_ifunc_reloc: 2958 if (h->root.root.string) 2959 name = h->root.root.string; 2960 else 2961 name = bfd_elf_sym_name (input_bfd, symtab_hdr, sym, 2962 NULL); 2963 _bfd_error_handler 2964 /* xgettext:c-format */ 2965 (_("%pB: relocation %s against STT_GNU_IFUNC " 2966 "symbol `%s' isn't supported"), input_bfd, 2967 howto->name, name); 2968 bfd_set_error (bfd_error_bad_value); 2969 return false; 2970 2971 case R_X86_64_32S: 2972 if (bfd_link_pic (info)) 2973 abort (); 2974 goto do_relocation; 2975 2976 case R_X86_64_32: 2977 if (ABI_64_P (output_bfd)) 2978 goto do_relocation; 2979 /* FALLTHROUGH */ 2980 case R_X86_64_64: 2981 do_ifunc_pointer: 2982 if (rel->r_addend != 0) 2983 { 2984 if (h->root.root.string) 2985 name = h->root.root.string; 2986 else 2987 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 2988 sym, NULL); 2989 _bfd_error_handler 2990 /* xgettext:c-format */ 2991 (_("%pB: relocation %s against STT_GNU_IFUNC " 2992 "symbol `%s' has non-zero addend: %" PRId64), 2993 input_bfd, howto->name, name, (int64_t) rel->r_addend); 2994 bfd_set_error (bfd_error_bad_value); 2995 return false; 2996 } 2997 2998 /* Generate dynamic relcoation only when there is a 2999 non-GOT reference in a shared object or there is no 3000 PLT. */ 3001 if ((bfd_link_pic (info) && h->non_got_ref) 3002 || h->plt.offset == (bfd_vma) -1) 3003 { 3004 Elf_Internal_Rela outrel; 3005 asection *sreloc; 3006 3007 /* Need a dynamic relocation to get the real function 3008 address. */ 3009 outrel.r_offset = _bfd_elf_section_offset (output_bfd, 3010 info, 3011 input_section, 3012 rel->r_offset); 3013 if (outrel.r_offset == (bfd_vma) -1 3014 || outrel.r_offset == (bfd_vma) -2) 3015 abort (); 3016 3017 outrel.r_offset += (input_section->output_section->vma 3018 + input_section->output_offset); 3019 3020 if (POINTER_LOCAL_IFUNC_P (info, h)) 3021 { 3022 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), 3023 h->root.root.string, 3024 h->root.u.def.section->owner); 3025 3026 /* This symbol is resolved locally. */ 3027 outrel.r_info = htab->r_info (0, R_X86_64_IRELATIVE); 3028 outrel.r_addend = (h->root.u.def.value 3029 + h->root.u.def.section->output_section->vma 3030 + h->root.u.def.section->output_offset); 3031 3032 if (htab->params->report_relative_reloc) 3033 _bfd_x86_elf_link_report_relative_reloc 3034 (info, input_section, h, sym, 3035 "R_X86_64_IRELATIVE", &outrel); 3036 } 3037 else 3038 { 3039 outrel.r_info = htab->r_info (h->dynindx, r_type); 3040 outrel.r_addend = 0; 3041 } 3042 3043 /* Dynamic relocations are stored in 3044 1. .rela.ifunc section in PIC object. 3045 2. .rela.got section in dynamic executable. 3046 3. .rela.iplt section in static executable. */ 3047 if (bfd_link_pic (info)) 3048 sreloc = htab->elf.irelifunc; 3049 else if (htab->elf.splt != NULL) 3050 sreloc = htab->elf.srelgot; 3051 else 3052 sreloc = htab->elf.irelplt; 3053 elf_append_rela (output_bfd, sreloc, &outrel); 3054 3055 /* If this reloc is against an external symbol, we 3056 do not want to fiddle with the addend. Otherwise, 3057 we need to include the symbol value so that it 3058 becomes an addend for the dynamic reloc. For an 3059 internal symbol, we have updated addend. */ 3060 continue; 3061 } 3062 /* FALLTHROUGH */ 3063 case R_X86_64_PC32: 3064 case R_X86_64_PC64: 3065 case R_X86_64_PLT32: 3066 goto do_relocation; 3067 } 3068 } 3069 3070 skip_ifunc: 3071 resolved_to_zero = (eh != NULL 3072 && UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh)); 3073 3074 /* When generating a shared object, the relocations handled here are 3075 copied into the output file to be resolved at run time. */ 3076 switch (r_type) 3077 { 3078 case R_X86_64_GOT32: 3079 case R_X86_64_GOT64: 3080 /* Relocation is to the entry for this symbol in the global 3081 offset table. */ 3082 case R_X86_64_GOTPCREL: 3083 case R_X86_64_GOTPCRELX: 3084 case R_X86_64_REX_GOTPCRELX: 3085 case R_X86_64_CODE_4_GOTPCRELX: 3086 case R_X86_64_GOTPCREL64: 3087 /* Use global offset table entry as symbol value. */ 3088 case R_X86_64_GOTPLT64: 3089 /* This is obsolete and treated the same as GOT64. */ 3090 base_got = htab->elf.sgot; 3091 3092 if (htab->elf.sgot == NULL) 3093 abort (); 3094 3095 relative_reloc = false; 3096 if (h != NULL) 3097 { 3098 off = h->got.offset; 3099 if (h->needs_plt 3100 && h->plt.offset != (bfd_vma)-1 3101 && off == (bfd_vma)-1) 3102 { 3103 /* We can't use h->got.offset here to save 3104 state, or even just remember the offset, as 3105 finish_dynamic_symbol would use that as offset into 3106 .got. */ 3107 bfd_vma plt_index = (h->plt.offset / plt_entry_size 3108 - htab->plt.has_plt0); 3109 off = (plt_index + 3) * GOT_ENTRY_SIZE; 3110 base_got = htab->elf.sgotplt; 3111 } 3112 3113 if (RESOLVED_LOCALLY_P (info, h, htab)) 3114 { 3115 /* We must initialize this entry in the global offset 3116 table. Since the offset must always be a multiple 3117 of 8, we use the least significant bit to record 3118 whether we have initialized it already. 3119 3120 When doing a dynamic link, we create a .rela.got 3121 relocation entry to initialize the value. This is 3122 done in the finish_dynamic_symbol routine. */ 3123 if ((off & 1) != 0) 3124 off &= ~1; 3125 else 3126 { 3127 bfd_put_64 (output_bfd, relocation, 3128 base_got->contents + off); 3129 /* Note that this is harmless for the GOTPLT64 case, 3130 as -1 | 1 still is -1. */ 3131 h->got.offset |= 1; 3132 3133 /* NB: Don't generate relative relocation here if 3134 it has been generated by DT_RELR. */ 3135 if (!info->enable_dt_relr 3136 && GENERATE_RELATIVE_RELOC_P (info, h)) 3137 { 3138 /* If this symbol isn't dynamic in PIC, 3139 generate R_X86_64_RELATIVE here. */ 3140 eh->no_finish_dynamic_symbol = 1; 3141 relative_reloc = true; 3142 } 3143 } 3144 } 3145 else 3146 unresolved_reloc = false; 3147 } 3148 else 3149 { 3150 if (local_got_offsets == NULL) 3151 abort (); 3152 3153 off = local_got_offsets[r_symndx]; 3154 3155 /* The offset must always be a multiple of 8. We use 3156 the least significant bit to record whether we have 3157 already generated the necessary reloc. */ 3158 if ((off & 1) != 0) 3159 off &= ~1; 3160 else 3161 { 3162 bfd_put_64 (output_bfd, relocation, 3163 base_got->contents + off); 3164 local_got_offsets[r_symndx] |= 1; 3165 3166 /* NB: GOTPCREL relocations against local absolute 3167 symbol store relocation value in the GOT slot 3168 without relative relocation. Don't generate 3169 relative relocation here if it has been generated 3170 by DT_RELR. */ 3171 if (!info->enable_dt_relr 3172 && bfd_link_pic (info) 3173 && !(sym->st_shndx == SHN_ABS 3174 && (r_type == R_X86_64_GOTPCREL 3175 || r_type == R_X86_64_GOTPCRELX 3176 || r_type == R_X86_64_REX_GOTPCRELX 3177 || r_type == R_X86_64_CODE_4_GOTPCRELX))) 3178 relative_reloc = true; 3179 } 3180 } 3181 3182 if (relative_reloc) 3183 { 3184 asection *s; 3185 Elf_Internal_Rela outrel; 3186 3187 /* We need to generate a R_X86_64_RELATIVE reloc 3188 for the dynamic linker. */ 3189 s = htab->elf.srelgot; 3190 if (s == NULL) 3191 abort (); 3192 3193 outrel.r_offset = (base_got->output_section->vma 3194 + base_got->output_offset 3195 + off); 3196 outrel.r_info = htab->r_info (0, R_X86_64_RELATIVE); 3197 outrel.r_addend = relocation; 3198 3199 if (htab->params->report_relative_reloc) 3200 _bfd_x86_elf_link_report_relative_reloc 3201 (info, input_section, h, sym, "R_X86_64_RELATIVE", 3202 &outrel); 3203 3204 elf_append_rela (output_bfd, s, &outrel); 3205 } 3206 3207 if (off >= (bfd_vma) -2) 3208 abort (); 3209 3210 relocation = base_got->output_section->vma 3211 + base_got->output_offset + off; 3212 if (r_type != R_X86_64_GOTPCREL 3213 && r_type != R_X86_64_GOTPCRELX 3214 && r_type != R_X86_64_REX_GOTPCRELX 3215 && r_type != R_X86_64_CODE_4_GOTPCRELX 3216 && r_type != R_X86_64_GOTPCREL64) 3217 relocation -= htab->elf.sgotplt->output_section->vma 3218 - htab->elf.sgotplt->output_offset; 3219 3220 break; 3221 3222 case R_X86_64_GOTOFF64: 3223 /* Relocation is relative to the start of the global offset 3224 table. */ 3225 3226 /* Check to make sure it isn't a protected function or data 3227 symbol for shared library since it may not be local when 3228 used as function address or with copy relocation. We also 3229 need to make sure that a symbol is referenced locally. */ 3230 if (bfd_link_pic (info) && h) 3231 { 3232 if (!h->def_regular) 3233 { 3234 const char *v; 3235 3236 switch (ELF_ST_VISIBILITY (h->other)) 3237 { 3238 case STV_HIDDEN: 3239 v = _("hidden symbol"); 3240 break; 3241 case STV_INTERNAL: 3242 v = _("internal symbol"); 3243 break; 3244 case STV_PROTECTED: 3245 v = _("protected symbol"); 3246 break; 3247 default: 3248 v = _("symbol"); 3249 break; 3250 } 3251 3252 _bfd_error_handler 3253 /* xgettext:c-format */ 3254 (_("%pB: relocation R_X86_64_GOTOFF64 against undefined %s" 3255 " `%s' can not be used when making a shared object"), 3256 input_bfd, v, h->root.root.string); 3257 bfd_set_error (bfd_error_bad_value); 3258 return false; 3259 } 3260 else if (!bfd_link_executable (info) 3261 && !SYMBOL_REFERENCES_LOCAL_P (info, h) 3262 && (h->type == STT_FUNC 3263 || h->type == STT_OBJECT) 3264 && ELF_ST_VISIBILITY (h->other) == STV_PROTECTED) 3265 { 3266 _bfd_error_handler 3267 /* xgettext:c-format */ 3268 (_("%pB: relocation R_X86_64_GOTOFF64 against protected %s" 3269 " `%s' can not be used when making a shared object"), 3270 input_bfd, 3271 h->type == STT_FUNC ? "function" : "data", 3272 h->root.root.string); 3273 bfd_set_error (bfd_error_bad_value); 3274 return false; 3275 } 3276 } 3277 3278 /* Note that sgot is not involved in this 3279 calculation. We always want the start of .got.plt. If we 3280 defined _GLOBAL_OFFSET_TABLE_ in a different way, as is 3281 permitted by the ABI, we might have to change this 3282 calculation. */ 3283 relocation -= htab->elf.sgotplt->output_section->vma 3284 + htab->elf.sgotplt->output_offset; 3285 break; 3286 3287 case R_X86_64_GOTPC32: 3288 case R_X86_64_GOTPC64: 3289 /* Use global offset table as symbol value. */ 3290 relocation = htab->elf.sgotplt->output_section->vma 3291 + htab->elf.sgotplt->output_offset; 3292 unresolved_reloc = false; 3293 break; 3294 3295 case R_X86_64_PLTOFF64: 3296 /* Relocation is PLT entry relative to GOT. For local 3297 symbols it's the symbol itself relative to GOT. */ 3298 if (h != NULL 3299 /* See PLT32 handling. */ 3300 && (h->plt.offset != (bfd_vma) -1 3301 || eh->plt_got.offset != (bfd_vma) -1) 3302 && htab->elf.splt != NULL) 3303 { 3304 if (eh->plt_got.offset != (bfd_vma) -1) 3305 { 3306 /* Use the GOT PLT. */ 3307 resolved_plt = htab->plt_got; 3308 plt_offset = eh->plt_got.offset; 3309 } 3310 else if (htab->plt_second != NULL) 3311 { 3312 resolved_plt = htab->plt_second; 3313 plt_offset = eh->plt_second.offset; 3314 } 3315 else 3316 { 3317 resolved_plt = htab->elf.splt; 3318 plt_offset = h->plt.offset; 3319 } 3320 3321 relocation = (resolved_plt->output_section->vma 3322 + resolved_plt->output_offset 3323 + plt_offset); 3324 unresolved_reloc = false; 3325 } 3326 3327 relocation -= htab->elf.sgotplt->output_section->vma 3328 + htab->elf.sgotplt->output_offset; 3329 break; 3330 3331 case R_X86_64_PLT32: 3332 /* Relocation is to the entry for this symbol in the 3333 procedure linkage table. */ 3334 3335 /* Resolve a PLT32 reloc against a local symbol directly, 3336 without using the procedure linkage table. */ 3337 if (h == NULL) 3338 break; 3339 3340 if ((h->plt.offset == (bfd_vma) -1 3341 && eh->plt_got.offset == (bfd_vma) -1) 3342 || htab->elf.splt == NULL) 3343 { 3344 /* We didn't make a PLT entry for this symbol. This 3345 happens when statically linking PIC code, or when 3346 using -Bsymbolic. */ 3347 break; 3348 } 3349 3350 use_plt: 3351 if (h->plt.offset != (bfd_vma) -1) 3352 { 3353 if (htab->plt_second != NULL) 3354 { 3355 resolved_plt = htab->plt_second; 3356 plt_offset = eh->plt_second.offset; 3357 } 3358 else 3359 { 3360 resolved_plt = htab->elf.splt; 3361 plt_offset = h->plt.offset; 3362 } 3363 } 3364 else 3365 { 3366 /* Use the GOT PLT. */ 3367 resolved_plt = htab->plt_got; 3368 plt_offset = eh->plt_got.offset; 3369 } 3370 3371 relocation = (resolved_plt->output_section->vma 3372 + resolved_plt->output_offset 3373 + plt_offset); 3374 unresolved_reloc = false; 3375 break; 3376 3377 case R_X86_64_SIZE32: 3378 case R_X86_64_SIZE64: 3379 /* Set to symbol size. */ 3380 relocation = st_size; 3381 goto direct; 3382 3383 case R_X86_64_PC8: 3384 case R_X86_64_PC16: 3385 case R_X86_64_PC32: 3386 /* Don't complain about -fPIC if the symbol is undefined when 3387 building executable unless it is unresolved weak symbol, 3388 references a dynamic definition in PIE or -z nocopyreloc 3389 is used. */ 3390 no_copyreloc_p 3391 = (info->nocopyreloc 3392 || (h != NULL 3393 && !h->root.linker_def 3394 && !h->root.ldscript_def 3395 && eh->def_protected)); 3396 3397 if ((input_section->flags & SEC_ALLOC) != 0 3398 && (input_section->flags & SEC_READONLY) != 0 3399 && h != NULL 3400 && ((bfd_link_executable (info) 3401 && ((h->root.type == bfd_link_hash_undefweak 3402 && (eh == NULL 3403 || !UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, 3404 eh))) 3405 || (bfd_link_pie (info) 3406 && !SYMBOL_DEFINED_NON_SHARED_P (h) 3407 && h->def_dynamic) 3408 || (no_copyreloc_p 3409 && h->def_dynamic 3410 && !(h->root.u.def.section->flags & SEC_CODE)))) 3411 || (bfd_link_pie (info) 3412 && h->root.type == bfd_link_hash_undefweak) 3413 || bfd_link_dll (info))) 3414 { 3415 bool fail = false; 3416 if (SYMBOL_REFERENCES_LOCAL_P (info, h)) 3417 { 3418 /* Symbol is referenced locally. Make sure it is 3419 defined locally. */ 3420 fail = !SYMBOL_DEFINED_NON_SHARED_P (h); 3421 } 3422 else if (bfd_link_pie (info)) 3423 { 3424 /* We can only use PC-relative relocations in PIE 3425 from non-code sections. */ 3426 if (h->root.type == bfd_link_hash_undefweak 3427 || (h->type == STT_FUNC 3428 && (sec->flags & SEC_CODE) != 0)) 3429 fail = true; 3430 } 3431 else if (no_copyreloc_p || bfd_link_dll (info)) 3432 { 3433 /* Symbol doesn't need copy reloc and isn't 3434 referenced locally. Don't allow PC-relative 3435 relocations against default and protected 3436 symbols since address of protected function 3437 and location of protected data may not be in 3438 the shared object. */ 3439 fail = (ELF_ST_VISIBILITY (h->other) == STV_DEFAULT 3440 || ELF_ST_VISIBILITY (h->other) == STV_PROTECTED); 3441 } 3442 3443 if (fail) 3444 return elf_x86_64_need_pic (info, input_bfd, input_section, 3445 h, NULL, NULL, howto); 3446 } 3447 /* Since x86-64 has PC-relative PLT, we can use PLT in PIE 3448 as function address. */ 3449 else if (h != NULL 3450 && (input_section->flags & SEC_CODE) == 0 3451 && bfd_link_pie (info) 3452 && h->type == STT_FUNC 3453 && !h->def_regular 3454 && h->def_dynamic) 3455 goto use_plt; 3456 /* Fall through. */ 3457 3458 case R_X86_64_8: 3459 case R_X86_64_16: 3460 case R_X86_64_32: 3461 case R_X86_64_PC64: 3462 case R_X86_64_64: 3463 /* FIXME: The ABI says the linker should make sure the value is 3464 the same when it's zeroextended to 64 bit. */ 3465 3466 direct: 3467 if ((input_section->flags & SEC_ALLOC) == 0) 3468 break; 3469 3470 need_copy_reloc_in_pie = (bfd_link_pie (info) 3471 && h != NULL 3472 && (h->needs_copy 3473 || eh->needs_copy 3474 || (h->root.type 3475 == bfd_link_hash_undefined)) 3476 && (X86_PCREL_TYPE_P (true, r_type) 3477 || X86_SIZE_TYPE_P (true, 3478 r_type))); 3479 3480 if (GENERATE_DYNAMIC_RELOCATION_P (true, info, eh, r_type, sec, 3481 need_copy_reloc_in_pie, 3482 resolved_to_zero, false)) 3483 { 3484 Elf_Internal_Rela outrel; 3485 bool skip, relocate; 3486 bool generate_dynamic_reloc = true; 3487 asection *sreloc; 3488 const char *relative_reloc_name = NULL; 3489 3490 /* When generating a shared object, these relocations 3491 are copied into the output file to be resolved at run 3492 time. */ 3493 skip = false; 3494 relocate = false; 3495 3496 outrel.r_offset = 3497 _bfd_elf_section_offset (output_bfd, info, input_section, 3498 rel->r_offset); 3499 if (outrel.r_offset == (bfd_vma) -1) 3500 skip = true; 3501 else if (outrel.r_offset == (bfd_vma) -2) 3502 skip = true, relocate = true; 3503 3504 outrel.r_offset += (input_section->output_section->vma 3505 + input_section->output_offset); 3506 3507 if (skip) 3508 memset (&outrel, 0, sizeof outrel); 3509 3510 else if (COPY_INPUT_RELOC_P (true, info, h, r_type)) 3511 { 3512 outrel.r_info = htab->r_info (h->dynindx, r_type); 3513 outrel.r_addend = rel->r_addend; 3514 } 3515 else 3516 { 3517 /* This symbol is local, or marked to become local. 3518 When relocation overflow check is disabled, we 3519 convert R_X86_64_32 to dynamic R_X86_64_RELATIVE. */ 3520 if (r_type == htab->pointer_r_type 3521 || (r_type == R_X86_64_32 3522 && htab->params->no_reloc_overflow_check)) 3523 { 3524 relocate = true; 3525 /* NB: Don't generate relative relocation here if 3526 it has been generated by DT_RELR. */ 3527 if (info->enable_dt_relr) 3528 generate_dynamic_reloc = false; 3529 else 3530 { 3531 outrel.r_info = 3532 htab->r_info (0, R_X86_64_RELATIVE); 3533 outrel.r_addend = relocation + rel->r_addend; 3534 relative_reloc_name = "R_X86_64_RELATIVE"; 3535 } 3536 } 3537 else if (r_type == R_X86_64_64 3538 && !ABI_64_P (output_bfd)) 3539 { 3540 relocate = true; 3541 outrel.r_info = htab->r_info (0, 3542 R_X86_64_RELATIVE64); 3543 outrel.r_addend = relocation + rel->r_addend; 3544 relative_reloc_name = "R_X86_64_RELATIVE64"; 3545 /* Check addend overflow. */ 3546 if ((outrel.r_addend & 0x80000000) 3547 != (rel->r_addend & 0x80000000)) 3548 { 3549 const char *name; 3550 int addend = rel->r_addend; 3551 if (h && h->root.root.string) 3552 name = h->root.root.string; 3553 else 3554 name = bfd_elf_sym_name (input_bfd, symtab_hdr, 3555 sym, NULL); 3556 _bfd_error_handler 3557 /* xgettext:c-format */ 3558 (_("%pB: addend %s%#x in relocation %s against " 3559 "symbol `%s' at %#" PRIx64 3560 " in section `%pA' is out of range"), 3561 input_bfd, addend < 0 ? "-" : "", addend, 3562 howto->name, name, (uint64_t) rel->r_offset, 3563 input_section); 3564 bfd_set_error (bfd_error_bad_value); 3565 return false; 3566 } 3567 } 3568 else 3569 { 3570 long sindx; 3571 3572 if (bfd_is_abs_section (sec)) 3573 sindx = 0; 3574 else if (sec == NULL || sec->owner == NULL) 3575 { 3576 bfd_set_error (bfd_error_bad_value); 3577 return false; 3578 } 3579 else 3580 { 3581 asection *osec; 3582 3583 /* We are turning this relocation into one 3584 against a section symbol. It would be 3585 proper to subtract the symbol's value, 3586 osec->vma, from the emitted reloc addend, 3587 but ld.so expects buggy relocs. */ 3588 osec = sec->output_section; 3589 sindx = elf_section_data (osec)->dynindx; 3590 if (sindx == 0) 3591 { 3592 asection *oi = htab->elf.text_index_section; 3593 sindx = elf_section_data (oi)->dynindx; 3594 } 3595 BFD_ASSERT (sindx != 0); 3596 } 3597 3598 outrel.r_info = htab->r_info (sindx, r_type); 3599 outrel.r_addend = relocation + rel->r_addend; 3600 } 3601 } 3602 3603 if (generate_dynamic_reloc) 3604 { 3605 sreloc = elf_section_data (input_section)->sreloc; 3606 3607 if (sreloc == NULL || sreloc->contents == NULL) 3608 { 3609 r = bfd_reloc_notsupported; 3610 goto check_relocation_error; 3611 } 3612 3613 if (relative_reloc_name 3614 && htab->params->report_relative_reloc) 3615 _bfd_x86_elf_link_report_relative_reloc 3616 (info, input_section, h, sym, 3617 relative_reloc_name, &outrel); 3618 3619 elf_append_rela (output_bfd, sreloc, &outrel); 3620 } 3621 3622 /* If this reloc is against an external symbol, we do 3623 not want to fiddle with the addend. Otherwise, we 3624 need to include the symbol value so that it becomes 3625 an addend for the dynamic reloc. */ 3626 if (! relocate) 3627 continue; 3628 } 3629 3630 break; 3631 3632 case R_X86_64_TLSGD: 3633 case R_X86_64_GOTPC32_TLSDESC: 3634 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 3635 case R_X86_64_TLSDESC_CALL: 3636 case R_X86_64_GOTTPOFF: 3637 case R_X86_64_CODE_4_GOTTPOFF: 3638 case R_X86_64_CODE_6_GOTTPOFF: 3639 tls_type = GOT_UNKNOWN; 3640 if (h == NULL && local_got_offsets) 3641 tls_type = elf_x86_local_got_tls_type (input_bfd) [r_symndx]; 3642 else if (h != NULL) 3643 tls_type = elf_x86_hash_entry (h)->tls_type; 3644 3645 r_type_tls = r_type; 3646 if (! elf_x86_64_tls_transition (info, input_bfd, 3647 input_section, contents, 3648 symtab_hdr, sym_hashes, 3649 &r_type_tls, tls_type, rel, 3650 relend, h, r_symndx, true)) 3651 return false; 3652 3653 if (r_type_tls == R_X86_64_TPOFF32) 3654 { 3655 bfd_vma roff = rel->r_offset; 3656 3657 if (roff >= input_section->size) 3658 goto corrupt_input; 3659 3660 BFD_ASSERT (! unresolved_reloc); 3661 3662 if (r_type == R_X86_64_TLSGD) 3663 { 3664 /* GD->LE transition. For 64bit, change 3665 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 3666 .word 0x6666; rex64; call __tls_get_addr@PLT 3667 or 3668 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 3669 .byte 0x66; rex64 3670 call *__tls_get_addr@GOTPCREL(%rip) 3671 which may be converted to 3672 addr32 call __tls_get_addr 3673 into: 3674 movq %fs:0, %rax 3675 leaq foo@tpoff(%rax), %rax 3676 For 32bit, change 3677 leaq foo@tlsgd(%rip), %rdi 3678 .word 0x6666; rex64; call __tls_get_addr@PLT 3679 or 3680 leaq foo@tlsgd(%rip), %rdi 3681 .byte 0x66; rex64 3682 call *__tls_get_addr@GOTPCREL(%rip) 3683 which may be converted to 3684 addr32 call __tls_get_addr 3685 into: 3686 movl %fs:0, %eax 3687 leaq foo@tpoff(%rax), %rax 3688 For largepic, change: 3689 leaq foo@tlsgd(%rip), %rdi 3690 movabsq $__tls_get_addr@pltoff, %rax 3691 addq %r15, %rax 3692 call *%rax 3693 into: 3694 movq %fs:0, %rax 3695 leaq foo@tpoff(%rax), %rax 3696 nopw 0x0(%rax,%rax,1) */ 3697 int largepic = 0; 3698 if (ABI_64_P (output_bfd)) 3699 { 3700 if (roff + 5 >= input_section->size) 3701 goto corrupt_input; 3702 if (contents[roff + 5] == 0xb8) 3703 { 3704 if (roff < 3 3705 || (roff - 3 + 22) > input_section->size) 3706 { 3707 corrupt_input: 3708 info->callbacks->einfo 3709 (_("%F%P: corrupt input: %pB\n"), 3710 input_bfd); 3711 return false; 3712 } 3713 memcpy (contents + roff - 3, 3714 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80" 3715 "\0\0\0\0\x66\x0f\x1f\x44\0", 22); 3716 largepic = 1; 3717 } 3718 else 3719 { 3720 if (roff < 4 3721 || (roff - 4 + 16) > input_section->size) 3722 goto corrupt_input; 3723 memcpy (contents + roff - 4, 3724 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 3725 16); 3726 } 3727 } 3728 else 3729 { 3730 if (roff < 3 3731 || (roff - 3 + 15) > input_section->size) 3732 goto corrupt_input; 3733 memcpy (contents + roff - 3, 3734 "\x64\x8b\x04\x25\0\0\0\0\x48\x8d\x80\0\0\0", 3735 15); 3736 } 3737 3738 if (roff + 8 + largepic >= input_section->size) 3739 goto corrupt_input; 3740 3741 bfd_put_32 (output_bfd, 3742 elf_x86_64_tpoff (info, relocation), 3743 contents + roff + 8 + largepic); 3744 /* Skip R_X86_64_PC32, R_X86_64_PLT32, 3745 R_X86_64_GOTPCRELX and R_X86_64_PLTOFF64. */ 3746 rel++; 3747 wrel++; 3748 continue; 3749 } 3750 else if (r_type == R_X86_64_GOTPC32_TLSDESC) 3751 { 3752 /* GDesc -> LE transition. 3753 It's originally something like: 3754 leaq x@tlsdesc(%rip), %rax <--- LP64 mode. 3755 rex leal x@tlsdesc(%rip), %eax <--- X32 mode. 3756 3757 Change it to: 3758 movq $x@tpoff, %rax <--- LP64 mode. 3759 rex movl $x@tpoff, %eax <--- X32 mode. 3760 */ 3761 3762 unsigned int val, type; 3763 3764 if (roff < 3) 3765 goto corrupt_input; 3766 type = bfd_get_8 (input_bfd, contents + roff - 3); 3767 val = bfd_get_8 (input_bfd, contents + roff - 1); 3768 bfd_put_8 (output_bfd, 3769 (type & 0x48) | ((type >> 2) & 1), 3770 contents + roff - 3); 3771 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2); 3772 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7), 3773 contents + roff - 1); 3774 bfd_put_32 (output_bfd, 3775 elf_x86_64_tpoff (info, relocation), 3776 contents + roff); 3777 continue; 3778 } 3779 else if (r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC) 3780 { 3781 /* GDesc -> LE transition. 3782 It's originally something like: 3783 lea x@tlsdesc(%rip), %reg 3784 3785 Change it to: 3786 mov $x@tpoff, %reg 3787 where reg is one of r16 to r31. */ 3788 3789 unsigned int val, rex2; 3790 unsigned int rex2_mask = REX_R | REX_R << 4; 3791 3792 if (roff < 4) 3793 goto corrupt_input; 3794 rex2 = bfd_get_8 (input_bfd, contents + roff - 3); 3795 val = bfd_get_8 (input_bfd, contents + roff - 1); 3796 /* Move the R bits to the B bits in REX2 payload 3797 byte. */ 3798 bfd_put_8 (output_bfd, 3799 ((rex2 & ~rex2_mask) 3800 | (rex2 & rex2_mask) >> 2), 3801 contents + roff - 3); 3802 bfd_put_8 (output_bfd, 0xc7, contents + roff - 2); 3803 bfd_put_8 (output_bfd, 0xc0 | ((val >> 3) & 7), 3804 contents + roff - 1); 3805 bfd_put_32 (output_bfd, 3806 elf_x86_64_tpoff (info, relocation), 3807 contents + roff); 3808 continue; 3809 } 3810 else if (r_type == R_X86_64_TLSDESC_CALL) 3811 { 3812 /* GDesc -> LE transition. 3813 It's originally: 3814 call *(%rax) <--- LP64 mode. 3815 call *(%eax) <--- X32 mode. 3816 Turn it into: 3817 xchg %ax,%ax <-- LP64 mode. 3818 nopl (%rax) <-- X32 mode. 3819 */ 3820 unsigned int prefix = 0; 3821 if (!ABI_64_P (input_bfd)) 3822 { 3823 /* Check for call *x@tlsdesc(%eax). */ 3824 if (contents[roff] == 0x67) 3825 prefix = 1; 3826 } 3827 if (prefix) 3828 { 3829 if (roff + 2 >= input_section->size) 3830 goto corrupt_input; 3831 3832 bfd_put_8 (output_bfd, 0x0f, contents + roff); 3833 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1); 3834 bfd_put_8 (output_bfd, 0x00, contents + roff + 2); 3835 } 3836 else 3837 { 3838 if (roff + 1 >= input_section->size) 3839 goto corrupt_input; 3840 3841 bfd_put_8 (output_bfd, 0x66, contents + roff); 3842 bfd_put_8 (output_bfd, 0x90, contents + roff + 1); 3843 } 3844 continue; 3845 } 3846 else if (r_type == R_X86_64_GOTTPOFF) 3847 { 3848 /* IE->LE transition: 3849 For 64bit, originally it can be one of: 3850 movq foo@gottpoff(%rip), %reg 3851 addq foo@gottpoff(%rip), %reg 3852 We change it into: 3853 movq $foo, %reg 3854 leaq foo(%reg), %reg 3855 addq $foo, %reg. 3856 For 32bit, originally it can be one of: 3857 movq foo@gottpoff(%rip), %reg 3858 addl foo@gottpoff(%rip), %reg 3859 We change it into: 3860 movq $foo, %reg 3861 leal foo(%reg), %reg 3862 addl $foo, %reg. */ 3863 3864 unsigned int val, type, reg; 3865 3866 if (roff >= 3) 3867 val = bfd_get_8 (input_bfd, contents + roff - 3); 3868 else 3869 { 3870 if (roff < 2) 3871 goto corrupt_input; 3872 val = 0; 3873 } 3874 type = bfd_get_8 (input_bfd, contents + roff - 2); 3875 reg = bfd_get_8 (input_bfd, contents + roff - 1); 3876 reg >>= 3; 3877 if (type == 0x8b) 3878 { 3879 /* movq */ 3880 if (val == 0x4c) 3881 { 3882 if (roff < 3) 3883 goto corrupt_input; 3884 bfd_put_8 (output_bfd, 0x49, 3885 contents + roff - 3); 3886 } 3887 else if (!ABI_64_P (output_bfd) && val == 0x44) 3888 { 3889 if (roff < 3) 3890 goto corrupt_input; 3891 bfd_put_8 (output_bfd, 0x41, 3892 contents + roff - 3); 3893 } 3894 bfd_put_8 (output_bfd, 0xc7, 3895 contents + roff - 2); 3896 bfd_put_8 (output_bfd, 0xc0 | reg, 3897 contents + roff - 1); 3898 } 3899 else if (reg == 4) 3900 { 3901 /* addq/addl -> addq/addl - addressing with %rsp/%r12 3902 is special */ 3903 if (val == 0x4c) 3904 { 3905 if (roff < 3) 3906 goto corrupt_input; 3907 bfd_put_8 (output_bfd, 0x49, 3908 contents + roff - 3); 3909 } 3910 else if (!ABI_64_P (output_bfd) && val == 0x44) 3911 { 3912 if (roff < 3) 3913 goto corrupt_input; 3914 bfd_put_8 (output_bfd, 0x41, 3915 contents + roff - 3); 3916 } 3917 bfd_put_8 (output_bfd, 0x81, 3918 contents + roff - 2); 3919 bfd_put_8 (output_bfd, 0xc0 | reg, 3920 contents + roff - 1); 3921 } 3922 else 3923 { 3924 /* addq/addl -> leaq/leal */ 3925 if (val == 0x4c) 3926 { 3927 if (roff < 3) 3928 goto corrupt_input; 3929 bfd_put_8 (output_bfd, 0x4d, 3930 contents + roff - 3); 3931 } 3932 else if (!ABI_64_P (output_bfd) && val == 0x44) 3933 { 3934 if (roff < 3) 3935 goto corrupt_input; 3936 bfd_put_8 (output_bfd, 0x45, 3937 contents + roff - 3); 3938 } 3939 bfd_put_8 (output_bfd, 0x8d, 3940 contents + roff - 2); 3941 bfd_put_8 (output_bfd, 0x80 | reg | (reg << 3), 3942 contents + roff - 1); 3943 } 3944 bfd_put_32 (output_bfd, 3945 elf_x86_64_tpoff (info, relocation), 3946 contents + roff); 3947 continue; 3948 } 3949 else if (r_type == R_X86_64_CODE_4_GOTTPOFF) 3950 { 3951 /* IE->LE transition: 3952 Originally it can be one of: 3953 mov foo@gottpoff(%rip), %reg 3954 add foo@gottpoff(%rip), %reg 3955 We change it into: 3956 mov $foo@tpoff, %reg 3957 add $foo@tpoff, %reg 3958 where reg is one of r16 to r31. */ 3959 3960 unsigned int rex2, type, reg; 3961 unsigned int rex2_mask = REX_R | REX_R << 4; 3962 3963 if (roff < 4) 3964 goto corrupt_input; 3965 3966 rex2 = bfd_get_8 (input_bfd, contents + roff - 3); 3967 type = bfd_get_8 (input_bfd, contents + roff - 2); 3968 reg = bfd_get_8 (input_bfd, contents + roff - 1); 3969 reg >>= 3; 3970 /* Move the R bits to the B bits in REX2 payload 3971 byte. */ 3972 if (type == 0x8b) 3973 type = 0xc7; 3974 else 3975 type = 0x81; 3976 bfd_put_8 (output_bfd, 3977 ((rex2 & ~rex2_mask) 3978 | (rex2 & rex2_mask) >> 2), 3979 contents + roff - 3); 3980 bfd_put_8 (output_bfd, type, 3981 contents + roff - 2); 3982 bfd_put_8 (output_bfd, 0xc0 | reg, 3983 contents + roff - 1); 3984 bfd_put_32 (output_bfd, 3985 elf_x86_64_tpoff (info, relocation), 3986 contents + roff); 3987 continue; 3988 } 3989 else if (r_type == R_X86_64_CODE_6_GOTTPOFF) 3990 { 3991 /* IE->LE transition: 3992 Originally it is 3993 add %reg1, foo@gottpoff(%rip), %reg2 3994 or 3995 add foo@gottpoff(%rip), %reg1, %reg2 3996 We change it into: 3997 add $foo@tpoff, %reg1, %reg2 3998 */ 3999 unsigned int reg, byte1; 4000 unsigned int updated_byte1; 4001 4002 if (roff < 6) 4003 goto corrupt_input; 4004 4005 /* Move the R bits to the B bits in EVEX payload 4006 byte 1. */ 4007 byte1 = bfd_get_8 (input_bfd, contents + roff - 5); 4008 updated_byte1 = byte1; 4009 4010 /* Set the R bits since they is inverted. */ 4011 updated_byte1 |= 1 << 7 | 1 << 4; 4012 4013 /* Update the B bits from the R bits. */ 4014 if ((byte1 & (1 << 7)) == 0) 4015 updated_byte1 &= ~(1 << 5); 4016 if ((byte1 & (1 << 4)) == 0) 4017 updated_byte1 |= 1 << 3; 4018 4019 reg = bfd_get_8 (input_bfd, contents + roff - 1); 4020 reg >>= 3; 4021 4022 bfd_put_8 (output_bfd, updated_byte1, 4023 contents + roff - 5); 4024 bfd_put_8 (output_bfd, 0x81, 4025 contents + roff - 2); 4026 bfd_put_8 (output_bfd, 0xc0 | reg, 4027 contents + roff - 1); 4028 bfd_put_32 (output_bfd, 4029 elf_x86_64_tpoff (info, relocation), 4030 contents + roff); 4031 continue; 4032 } 4033 else 4034 BFD_ASSERT (false); 4035 } 4036 4037 if (htab->elf.sgot == NULL) 4038 abort (); 4039 4040 if (h != NULL) 4041 { 4042 off = h->got.offset; 4043 offplt = elf_x86_hash_entry (h)->tlsdesc_got; 4044 } 4045 else 4046 { 4047 if (local_got_offsets == NULL) 4048 abort (); 4049 4050 off = local_got_offsets[r_symndx]; 4051 offplt = local_tlsdesc_gotents[r_symndx]; 4052 } 4053 4054 if ((off & 1) != 0) 4055 off &= ~1; 4056 else 4057 { 4058 Elf_Internal_Rela outrel; 4059 int dr_type, indx; 4060 asection *sreloc; 4061 4062 if (htab->elf.srelgot == NULL) 4063 abort (); 4064 4065 indx = h && h->dynindx != -1 ? h->dynindx : 0; 4066 4067 if (GOT_TLS_GDESC_P (tls_type)) 4068 { 4069 outrel.r_info = htab->r_info (indx, R_X86_64_TLSDESC); 4070 BFD_ASSERT (htab->sgotplt_jump_table_size + offplt 4071 + 2 * GOT_ENTRY_SIZE <= htab->elf.sgotplt->size); 4072 outrel.r_offset = (htab->elf.sgotplt->output_section->vma 4073 + htab->elf.sgotplt->output_offset 4074 + offplt 4075 + htab->sgotplt_jump_table_size); 4076 sreloc = htab->elf.srelplt; 4077 if (indx == 0) 4078 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); 4079 else 4080 outrel.r_addend = 0; 4081 elf_append_rela (output_bfd, sreloc, &outrel); 4082 } 4083 4084 sreloc = htab->elf.srelgot; 4085 4086 outrel.r_offset = (htab->elf.sgot->output_section->vma 4087 + htab->elf.sgot->output_offset + off); 4088 4089 if (GOT_TLS_GD_P (tls_type)) 4090 dr_type = R_X86_64_DTPMOD64; 4091 else if (GOT_TLS_GDESC_P (tls_type)) 4092 goto dr_done; 4093 else 4094 dr_type = R_X86_64_TPOFF64; 4095 4096 bfd_put_64 (output_bfd, 0, htab->elf.sgot->contents + off); 4097 outrel.r_addend = 0; 4098 if ((dr_type == R_X86_64_TPOFF64 4099 || dr_type == R_X86_64_TLSDESC) && indx == 0) 4100 outrel.r_addend = relocation - _bfd_x86_elf_dtpoff_base (info); 4101 outrel.r_info = htab->r_info (indx, dr_type); 4102 4103 elf_append_rela (output_bfd, sreloc, &outrel); 4104 4105 if (GOT_TLS_GD_P (tls_type)) 4106 { 4107 if (indx == 0) 4108 { 4109 BFD_ASSERT (! unresolved_reloc); 4110 bfd_put_64 (output_bfd, 4111 relocation - _bfd_x86_elf_dtpoff_base (info), 4112 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 4113 } 4114 else 4115 { 4116 bfd_put_64 (output_bfd, 0, 4117 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 4118 outrel.r_info = htab->r_info (indx, 4119 R_X86_64_DTPOFF64); 4120 outrel.r_offset += GOT_ENTRY_SIZE; 4121 elf_append_rela (output_bfd, sreloc, 4122 &outrel); 4123 } 4124 } 4125 4126 dr_done: 4127 if (h != NULL) 4128 h->got.offset |= 1; 4129 else 4130 local_got_offsets[r_symndx] |= 1; 4131 } 4132 4133 if (off >= (bfd_vma) -2 4134 && ! GOT_TLS_GDESC_P (tls_type)) 4135 abort (); 4136 if (r_type_tls == r_type) 4137 { 4138 if (r_type == R_X86_64_GOTPC32_TLSDESC 4139 || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC 4140 || r_type == R_X86_64_TLSDESC_CALL) 4141 relocation = htab->elf.sgotplt->output_section->vma 4142 + htab->elf.sgotplt->output_offset 4143 + offplt + htab->sgotplt_jump_table_size; 4144 else 4145 relocation = htab->elf.sgot->output_section->vma 4146 + htab->elf.sgot->output_offset + off; 4147 unresolved_reloc = false; 4148 } 4149 else 4150 { 4151 bfd_vma roff = rel->r_offset; 4152 4153 if (r_type == R_X86_64_TLSGD) 4154 { 4155 /* GD->IE transition. For 64bit, change 4156 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 4157 .word 0x6666; rex64; call __tls_get_addr@PLT 4158 or 4159 .byte 0x66; leaq foo@tlsgd(%rip), %rdi 4160 .byte 0x66; rex64 4161 call *__tls_get_addr@GOTPCREL(%rip 4162 which may be converted to 4163 addr32 call __tls_get_addr 4164 into: 4165 movq %fs:0, %rax 4166 addq foo@gottpoff(%rip), %rax 4167 For 32bit, change 4168 leaq foo@tlsgd(%rip), %rdi 4169 .word 0x6666; rex64; call __tls_get_addr@PLT 4170 or 4171 leaq foo@tlsgd(%rip), %rdi 4172 .byte 0x66; rex64; 4173 call *__tls_get_addr@GOTPCREL(%rip) 4174 which may be converted to 4175 addr32 call __tls_get_addr 4176 into: 4177 movl %fs:0, %eax 4178 addq foo@gottpoff(%rip), %rax 4179 For largepic, change: 4180 leaq foo@tlsgd(%rip), %rdi 4181 movabsq $__tls_get_addr@pltoff, %rax 4182 addq %r15, %rax 4183 call *%rax 4184 into: 4185 movq %fs:0, %rax 4186 addq foo@gottpoff(%rax), %rax 4187 nopw 0x0(%rax,%rax,1) */ 4188 int largepic = 0; 4189 if (ABI_64_P (output_bfd)) 4190 { 4191 if (contents[roff + 5] == 0xb8) 4192 { 4193 if (roff < 3 4194 || (roff - 3 + 22) > input_section->size) 4195 goto corrupt_input; 4196 memcpy (contents + roff - 3, 4197 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05" 4198 "\0\0\0\0\x66\x0f\x1f\x44\0", 22); 4199 largepic = 1; 4200 } 4201 else 4202 { 4203 if (roff < 4 4204 || (roff - 4 + 16) > input_section->size) 4205 goto corrupt_input; 4206 memcpy (contents + roff - 4, 4207 "\x64\x48\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 4208 16); 4209 } 4210 } 4211 else 4212 { 4213 if (roff < 3 4214 || (roff - 3 + 15) > input_section->size) 4215 goto corrupt_input; 4216 memcpy (contents + roff - 3, 4217 "\x64\x8b\x04\x25\0\0\0\0\x48\x03\x05\0\0\0", 4218 15); 4219 } 4220 4221 relocation = (htab->elf.sgot->output_section->vma 4222 + htab->elf.sgot->output_offset + off 4223 - roff 4224 - largepic 4225 - input_section->output_section->vma 4226 - input_section->output_offset 4227 - 12); 4228 bfd_put_32 (output_bfd, relocation, 4229 contents + roff + 8 + largepic); 4230 /* Skip R_X86_64_PLT32/R_X86_64_PLTOFF64. */ 4231 rel++; 4232 wrel++; 4233 continue; 4234 } 4235 else if (r_type == R_X86_64_GOTPC32_TLSDESC 4236 || r_type == R_X86_64_CODE_4_GOTPC32_TLSDESC) 4237 { 4238 /* GDesc -> IE transition. 4239 It's originally something like: 4240 leaq x@tlsdesc(%rip), %rax <--- LP64 mode. 4241 rex leal x@tlsdesc(%rip), %eax <--- X32 mode. 4242 4243 Change it to: 4244 # before xchg %ax,%ax in LP64 mode. 4245 movq x@gottpoff(%rip), %rax 4246 # before nopl (%rax) in X32 mode. 4247 rex movl x@gottpoff(%rip), %eax 4248 */ 4249 4250 /* Now modify the instruction as appropriate. To 4251 turn a lea into a mov in the form we use it, it 4252 suffices to change the second byte from 0x8d to 4253 0x8b. */ 4254 if (roff < 2) 4255 goto corrupt_input; 4256 bfd_put_8 (output_bfd, 0x8b, contents + roff - 2); 4257 4258 bfd_put_32 (output_bfd, 4259 htab->elf.sgot->output_section->vma 4260 + htab->elf.sgot->output_offset + off 4261 - rel->r_offset 4262 - input_section->output_section->vma 4263 - input_section->output_offset 4264 - 4, 4265 contents + roff); 4266 continue; 4267 } 4268 else if (r_type == R_X86_64_TLSDESC_CALL) 4269 { 4270 /* GDesc -> IE transition. 4271 It's originally: 4272 call *(%rax) <--- LP64 mode. 4273 call *(%eax) <--- X32 mode. 4274 4275 Change it to: 4276 xchg %ax, %ax <-- LP64 mode. 4277 nopl (%rax) <-- X32 mode. 4278 */ 4279 4280 unsigned int prefix = 0; 4281 if (!ABI_64_P (input_bfd)) 4282 { 4283 /* Check for call *x@tlsdesc(%eax). */ 4284 if (contents[roff] == 0x67) 4285 prefix = 1; 4286 } 4287 if (prefix) 4288 { 4289 bfd_put_8 (output_bfd, 0x0f, contents + roff); 4290 bfd_put_8 (output_bfd, 0x1f, contents + roff + 1); 4291 bfd_put_8 (output_bfd, 0x00, contents + roff + 2); 4292 } 4293 else 4294 { 4295 bfd_put_8 (output_bfd, 0x66, contents + roff); 4296 bfd_put_8 (output_bfd, 0x90, contents + roff + 1); 4297 } 4298 continue; 4299 } 4300 else 4301 BFD_ASSERT (false); 4302 } 4303 break; 4304 4305 case R_X86_64_TLSLD: 4306 if (! elf_x86_64_tls_transition (info, input_bfd, 4307 input_section, contents, 4308 symtab_hdr, sym_hashes, 4309 &r_type, GOT_UNKNOWN, rel, 4310 relend, h, r_symndx, true)) 4311 return false; 4312 4313 if (r_type != R_X86_64_TLSLD) 4314 { 4315 /* LD->LE transition: 4316 leaq foo@tlsld(%rip), %rdi 4317 call __tls_get_addr@PLT 4318 For 64bit, we change it into: 4319 .word 0x6666; .byte 0x66; movq %fs:0, %rax 4320 For 32bit, we change it into: 4321 nopl 0x0(%rax); movl %fs:0, %eax 4322 Or 4323 leaq foo@tlsld(%rip), %rdi; 4324 call *__tls_get_addr@GOTPCREL(%rip) 4325 which may be converted to 4326 addr32 call __tls_get_addr 4327 For 64bit, we change it into: 4328 .word 0x6666; .word 0x6666; movq %fs:0, %rax 4329 For 32bit, we change it into: 4330 nopw 0x0(%rax); movl %fs:0, %eax 4331 For largepic, change: 4332 leaq foo@tlsgd(%rip), %rdi 4333 movabsq $__tls_get_addr@pltoff, %rax 4334 addq %rbx, %rax 4335 call *%rax 4336 into 4337 data16 data16 data16 nopw %cs:0x0(%rax,%rax,1) 4338 movq %fs:0, %eax */ 4339 4340 BFD_ASSERT (r_type == R_X86_64_TPOFF32); 4341 if (ABI_64_P (output_bfd)) 4342 { 4343 if ((rel->r_offset + 5) >= input_section->size) 4344 goto corrupt_input; 4345 if (contents[rel->r_offset + 5] == 0xb8) 4346 { 4347 if (rel->r_offset < 3 4348 || (rel->r_offset - 3 + 22) > input_section->size) 4349 goto corrupt_input; 4350 memcpy (contents + rel->r_offset - 3, 4351 "\x66\x66\x66\x66\x2e\x0f\x1f\x84\0\0\0\0\0" 4352 "\x64\x48\x8b\x04\x25\0\0\0", 22); 4353 } 4354 else if (contents[rel->r_offset + 4] == 0xff 4355 || contents[rel->r_offset + 4] == 0x67) 4356 { 4357 if (rel->r_offset < 3 4358 || (rel->r_offset - 3 + 13) > input_section->size) 4359 goto corrupt_input; 4360 memcpy (contents + rel->r_offset - 3, 4361 "\x66\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 4362 13); 4363 4364 } 4365 else 4366 { 4367 if (rel->r_offset < 3 4368 || (rel->r_offset - 3 + 12) > input_section->size) 4369 goto corrupt_input; 4370 memcpy (contents + rel->r_offset - 3, 4371 "\x66\x66\x66\x64\x48\x8b\x04\x25\0\0\0", 12); 4372 } 4373 } 4374 else 4375 { 4376 if ((rel->r_offset + 4) >= input_section->size) 4377 goto corrupt_input; 4378 if (contents[rel->r_offset + 4] == 0xff) 4379 { 4380 if (rel->r_offset < 3 4381 || (rel->r_offset - 3 + 13) > input_section->size) 4382 goto corrupt_input; 4383 memcpy (contents + rel->r_offset - 3, 4384 "\x66\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 4385 13); 4386 } 4387 else 4388 { 4389 if (rel->r_offset < 3 4390 || (rel->r_offset - 3 + 12) > input_section->size) 4391 goto corrupt_input; 4392 memcpy (contents + rel->r_offset - 3, 4393 "\x0f\x1f\x40\x00\x64\x8b\x04\x25\0\0\0", 12); 4394 } 4395 } 4396 /* Skip R_X86_64_PC32, R_X86_64_PLT32, R_X86_64_GOTPCRELX 4397 and R_X86_64_PLTOFF64. */ 4398 rel++; 4399 wrel++; 4400 continue; 4401 } 4402 4403 if (htab->elf.sgot == NULL) 4404 abort (); 4405 4406 off = htab->tls_ld_or_ldm_got.offset; 4407 if (off & 1) 4408 off &= ~1; 4409 else 4410 { 4411 Elf_Internal_Rela outrel; 4412 4413 if (htab->elf.srelgot == NULL) 4414 abort (); 4415 4416 outrel.r_offset = (htab->elf.sgot->output_section->vma 4417 + htab->elf.sgot->output_offset + off); 4418 4419 bfd_put_64 (output_bfd, 0, 4420 htab->elf.sgot->contents + off); 4421 bfd_put_64 (output_bfd, 0, 4422 htab->elf.sgot->contents + off + GOT_ENTRY_SIZE); 4423 outrel.r_info = htab->r_info (0, R_X86_64_DTPMOD64); 4424 outrel.r_addend = 0; 4425 elf_append_rela (output_bfd, htab->elf.srelgot, 4426 &outrel); 4427 htab->tls_ld_or_ldm_got.offset |= 1; 4428 } 4429 relocation = htab->elf.sgot->output_section->vma 4430 + htab->elf.sgot->output_offset + off; 4431 unresolved_reloc = false; 4432 break; 4433 4434 case R_X86_64_DTPOFF32: 4435 if (!bfd_link_executable (info) 4436 || (input_section->flags & SEC_CODE) == 0) 4437 relocation -= _bfd_x86_elf_dtpoff_base (info); 4438 else 4439 relocation = elf_x86_64_tpoff (info, relocation); 4440 break; 4441 4442 case R_X86_64_TPOFF32: 4443 case R_X86_64_TPOFF64: 4444 BFD_ASSERT (bfd_link_executable (info)); 4445 relocation = elf_x86_64_tpoff (info, relocation); 4446 break; 4447 4448 case R_X86_64_DTPOFF64: 4449 BFD_ASSERT ((input_section->flags & SEC_CODE) == 0); 4450 relocation -= _bfd_x86_elf_dtpoff_base (info); 4451 break; 4452 4453 default: 4454 break; 4455 } 4456 4457 /* Dynamic relocs are not propagated for SEC_DEBUGGING sections 4458 because such sections are not SEC_ALLOC and thus ld.so will 4459 not process them. */ 4460 if (unresolved_reloc 4461 && !((input_section->flags & SEC_DEBUGGING) != 0 4462 && h->def_dynamic) 4463 && _bfd_elf_section_offset (output_bfd, info, input_section, 4464 rel->r_offset) != (bfd_vma) -1) 4465 { 4466 switch (r_type) 4467 { 4468 case R_X86_64_32S: 4469 sec = h->root.u.def.section; 4470 if ((info->nocopyreloc || eh->def_protected) 4471 && !(h->root.u.def.section->flags & SEC_CODE)) 4472 return elf_x86_64_need_pic (info, input_bfd, input_section, 4473 h, NULL, NULL, howto); 4474 /* Fall through. */ 4475 4476 default: 4477 _bfd_error_handler 4478 /* xgettext:c-format */ 4479 (_("%pB(%pA+%#" PRIx64 "): " 4480 "unresolvable %s relocation against symbol `%s'"), 4481 input_bfd, 4482 input_section, 4483 (uint64_t) rel->r_offset, 4484 howto->name, 4485 h->root.root.string); 4486 return false; 4487 } 4488 } 4489 4490 do_relocation: 4491 r = _bfd_final_link_relocate (howto, input_bfd, input_section, 4492 contents, rel->r_offset, 4493 relocation, rel->r_addend); 4494 4495 check_relocation_error: 4496 if (r != bfd_reloc_ok) 4497 { 4498 const char *name; 4499 4500 if (h != NULL) 4501 name = h->root.root.string; 4502 else 4503 { 4504 name = bfd_elf_string_from_elf_section (input_bfd, 4505 symtab_hdr->sh_link, 4506 sym->st_name); 4507 if (name == NULL) 4508 return false; 4509 if (*name == '\0') 4510 name = bfd_section_name (sec); 4511 } 4512 4513 if (r == bfd_reloc_overflow) 4514 { 4515 if (converted_reloc) 4516 { 4517 info->callbacks->einfo 4518 ("%X%H:", input_bfd, input_section, rel->r_offset); 4519 info->callbacks->einfo 4520 (_(" failed to convert GOTPCREL relocation against " 4521 "'%s'; relink with --no-relax\n"), 4522 name); 4523 status = false; 4524 continue; 4525 } 4526 (*info->callbacks->reloc_overflow) 4527 (info, (h ? &h->root : NULL), name, howto->name, 4528 (bfd_vma) 0, input_bfd, input_section, rel->r_offset); 4529 } 4530 else 4531 { 4532 _bfd_error_handler 4533 /* xgettext:c-format */ 4534 (_("%pB(%pA+%#" PRIx64 "): reloc against `%s': error %d"), 4535 input_bfd, input_section, 4536 (uint64_t) rel->r_offset, name, (int) r); 4537 return false; 4538 } 4539 } 4540 4541 if (wrel != rel) 4542 *wrel = *rel; 4543 } 4544 4545 if (wrel != rel) 4546 { 4547 Elf_Internal_Shdr *rel_hdr; 4548 size_t deleted = rel - wrel; 4549 4550 rel_hdr = _bfd_elf_single_rel_hdr (input_section->output_section); 4551 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted; 4552 if (rel_hdr->sh_size == 0) 4553 { 4554 /* It is too late to remove an empty reloc section. Leave 4555 one NONE reloc. 4556 ??? What is wrong with an empty section??? */ 4557 rel_hdr->sh_size = rel_hdr->sh_entsize; 4558 deleted -= 1; 4559 } 4560 rel_hdr = _bfd_elf_single_rel_hdr (input_section); 4561 rel_hdr->sh_size -= rel_hdr->sh_entsize * deleted; 4562 input_section->reloc_count -= deleted; 4563 } 4564 4565 return status; 4566 } 4567 4568 /* Finish up dynamic symbol handling. We set the contents of various 4569 dynamic sections here. */ 4570 4571 static bool 4572 elf_x86_64_finish_dynamic_symbol (bfd *output_bfd, 4573 struct bfd_link_info *info, 4574 struct elf_link_hash_entry *h, 4575 Elf_Internal_Sym *sym) 4576 { 4577 struct elf_x86_link_hash_table *htab; 4578 bool use_plt_second; 4579 struct elf_x86_link_hash_entry *eh; 4580 bool local_undefweak; 4581 4582 htab = elf_x86_hash_table (info, X86_64_ELF_DATA); 4583 4584 /* Use the second PLT section only if there is .plt section. */ 4585 use_plt_second = htab->elf.splt != NULL && htab->plt_second != NULL; 4586 4587 eh = (struct elf_x86_link_hash_entry *) h; 4588 if (eh->no_finish_dynamic_symbol) 4589 abort (); 4590 4591 /* We keep PLT/GOT entries without dynamic PLT/GOT relocations for 4592 resolved undefined weak symbols in executable so that their 4593 references have value 0 at run-time. */ 4594 local_undefweak = UNDEFINED_WEAK_RESOLVED_TO_ZERO (info, eh); 4595 4596 if (h->plt.offset != (bfd_vma) -1) 4597 { 4598 bfd_vma plt_index; 4599 bfd_vma got_offset, plt_offset; 4600 Elf_Internal_Rela rela; 4601 bfd_byte *loc; 4602 asection *plt, *gotplt, *relplt, *resolved_plt; 4603 const struct elf_backend_data *bed; 4604 bfd_vma plt_got_pcrel_offset; 4605 4606 /* When building a static executable, use .iplt, .igot.plt and 4607 .rela.iplt sections for STT_GNU_IFUNC symbols. */ 4608 if (htab->elf.splt != NULL) 4609 { 4610 plt = htab->elf.splt; 4611 gotplt = htab->elf.sgotplt; 4612 relplt = htab->elf.srelplt; 4613 } 4614 else 4615 { 4616 plt = htab->elf.iplt; 4617 gotplt = htab->elf.igotplt; 4618 relplt = htab->elf.irelplt; 4619 } 4620 4621 VERIFY_PLT_ENTRY (info, h, plt, gotplt, relplt, local_undefweak) 4622 4623 /* Get the index in the procedure linkage table which 4624 corresponds to this symbol. This is the index of this symbol 4625 in all the symbols for which we are making plt entries. The 4626 first entry in the procedure linkage table is reserved. 4627 4628 Get the offset into the .got table of the entry that 4629 corresponds to this function. Each .got entry is GOT_ENTRY_SIZE 4630 bytes. The first three are reserved for the dynamic linker. 4631 4632 For static executables, we don't reserve anything. */ 4633 4634 if (plt == htab->elf.splt) 4635 { 4636 got_offset = (h->plt.offset / htab->plt.plt_entry_size 4637 - htab->plt.has_plt0); 4638 got_offset = (got_offset + 3) * GOT_ENTRY_SIZE; 4639 } 4640 else 4641 { 4642 got_offset = h->plt.offset / htab->plt.plt_entry_size; 4643 got_offset = got_offset * GOT_ENTRY_SIZE; 4644 } 4645 4646 /* Fill in the entry in the procedure linkage table. */ 4647 memcpy (plt->contents + h->plt.offset, htab->plt.plt_entry, 4648 htab->plt.plt_entry_size); 4649 if (use_plt_second) 4650 { 4651 memcpy (htab->plt_second->contents + eh->plt_second.offset, 4652 htab->non_lazy_plt->plt_entry, 4653 htab->non_lazy_plt->plt_entry_size); 4654 4655 resolved_plt = htab->plt_second; 4656 plt_offset = eh->plt_second.offset; 4657 } 4658 else 4659 { 4660 resolved_plt = plt; 4661 plt_offset = h->plt.offset; 4662 } 4663 4664 /* Insert the relocation positions of the plt section. */ 4665 4666 /* Put offset the PC-relative instruction referring to the GOT entry, 4667 subtracting the size of that instruction. */ 4668 plt_got_pcrel_offset = (gotplt->output_section->vma 4669 + gotplt->output_offset 4670 + got_offset 4671 - resolved_plt->output_section->vma 4672 - resolved_plt->output_offset 4673 - plt_offset 4674 - htab->plt.plt_got_insn_size); 4675 4676 /* Check PC-relative offset overflow in PLT entry. */ 4677 if ((plt_got_pcrel_offset + 0x80000000) > 0xffffffff) 4678 /* xgettext:c-format */ 4679 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in PLT entry for `%s'\n"), 4680 output_bfd, h->root.root.string); 4681 4682 bfd_put_32 (output_bfd, plt_got_pcrel_offset, 4683 (resolved_plt->contents + plt_offset 4684 + htab->plt.plt_got_offset)); 4685 4686 /* Fill in the entry in the global offset table, initially this 4687 points to the second part of the PLT entry. Leave the entry 4688 as zero for undefined weak symbol in PIE. No PLT relocation 4689 against undefined weak symbol in PIE. */ 4690 if (!local_undefweak) 4691 { 4692 if (htab->plt.has_plt0) 4693 bfd_put_64 (output_bfd, (plt->output_section->vma 4694 + plt->output_offset 4695 + h->plt.offset 4696 + htab->lazy_plt->plt_lazy_offset), 4697 gotplt->contents + got_offset); 4698 4699 /* Fill in the entry in the .rela.plt section. */ 4700 rela.r_offset = (gotplt->output_section->vma 4701 + gotplt->output_offset 4702 + got_offset); 4703 if (PLT_LOCAL_IFUNC_P (info, h)) 4704 { 4705 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), 4706 h->root.root.string, 4707 h->root.u.def.section->owner); 4708 4709 /* If an STT_GNU_IFUNC symbol is locally defined, generate 4710 R_X86_64_IRELATIVE instead of R_X86_64_JUMP_SLOT. */ 4711 rela.r_info = htab->r_info (0, R_X86_64_IRELATIVE); 4712 rela.r_addend = (h->root.u.def.value 4713 + h->root.u.def.section->output_section->vma 4714 + h->root.u.def.section->output_offset); 4715 4716 if (htab->params->report_relative_reloc) 4717 _bfd_x86_elf_link_report_relative_reloc 4718 (info, relplt, h, sym, "R_X86_64_IRELATIVE", &rela); 4719 4720 /* R_X86_64_IRELATIVE comes last. */ 4721 plt_index = htab->next_irelative_index--; 4722 } 4723 else 4724 { 4725 rela.r_info = htab->r_info (h->dynindx, R_X86_64_JUMP_SLOT); 4726 if (htab->params->mark_plt) 4727 rela.r_addend = (resolved_plt->output_section->vma 4728 + plt_offset 4729 + htab->plt.plt_indirect_branch_offset); 4730 else 4731 rela.r_addend = 0; 4732 plt_index = htab->next_jump_slot_index++; 4733 } 4734 4735 /* Don't fill the second and third slots in PLT entry for 4736 static executables nor without PLT0. */ 4737 if (plt == htab->elf.splt && htab->plt.has_plt0) 4738 { 4739 bfd_vma plt0_offset 4740 = h->plt.offset + htab->lazy_plt->plt_plt_insn_end; 4741 4742 /* Put relocation index. */ 4743 bfd_put_32 (output_bfd, plt_index, 4744 (plt->contents + h->plt.offset 4745 + htab->lazy_plt->plt_reloc_offset)); 4746 4747 /* Put offset for jmp .PLT0 and check for overflow. We don't 4748 check relocation index for overflow since branch displacement 4749 will overflow first. */ 4750 if (plt0_offset > 0x80000000) 4751 /* xgettext:c-format */ 4752 info->callbacks->einfo (_("%F%pB: branch displacement overflow in PLT entry for `%s'\n"), 4753 output_bfd, h->root.root.string); 4754 bfd_put_32 (output_bfd, - plt0_offset, 4755 (plt->contents + h->plt.offset 4756 + htab->lazy_plt->plt_plt_offset)); 4757 } 4758 4759 bed = get_elf_backend_data (output_bfd); 4760 loc = relplt->contents + plt_index * bed->s->sizeof_rela; 4761 bed->s->swap_reloca_out (output_bfd, &rela, loc); 4762 } 4763 } 4764 else if (eh->plt_got.offset != (bfd_vma) -1) 4765 { 4766 bfd_vma got_offset, plt_offset; 4767 asection *plt, *got; 4768 bool got_after_plt; 4769 int32_t got_pcrel_offset; 4770 4771 /* Set the entry in the GOT procedure linkage table. */ 4772 plt = htab->plt_got; 4773 got = htab->elf.sgot; 4774 got_offset = h->got.offset; 4775 4776 if (got_offset == (bfd_vma) -1 4777 || (h->type == STT_GNU_IFUNC && h->def_regular) 4778 || plt == NULL 4779 || got == NULL) 4780 abort (); 4781 4782 /* Use the non-lazy PLT entry template for the GOT PLT since they 4783 are the identical. */ 4784 /* Fill in the entry in the GOT procedure linkage table. */ 4785 plt_offset = eh->plt_got.offset; 4786 memcpy (plt->contents + plt_offset, 4787 htab->non_lazy_plt->plt_entry, 4788 htab->non_lazy_plt->plt_entry_size); 4789 4790 /* Put offset the PC-relative instruction referring to the GOT 4791 entry, subtracting the size of that instruction. */ 4792 got_pcrel_offset = (got->output_section->vma 4793 + got->output_offset 4794 + got_offset 4795 - plt->output_section->vma 4796 - plt->output_offset 4797 - plt_offset 4798 - htab->non_lazy_plt->plt_got_insn_size); 4799 4800 /* Check PC-relative offset overflow in GOT PLT entry. */ 4801 got_after_plt = got->output_section->vma > plt->output_section->vma; 4802 if ((got_after_plt && got_pcrel_offset < 0) 4803 || (!got_after_plt && got_pcrel_offset > 0)) 4804 /* xgettext:c-format */ 4805 info->callbacks->einfo (_("%F%pB: PC-relative offset overflow in GOT PLT entry for `%s'\n"), 4806 output_bfd, h->root.root.string); 4807 4808 bfd_put_32 (output_bfd, got_pcrel_offset, 4809 (plt->contents + plt_offset 4810 + htab->non_lazy_plt->plt_got_offset)); 4811 } 4812 4813 if (!local_undefweak 4814 && !h->def_regular 4815 && (h->plt.offset != (bfd_vma) -1 4816 || eh->plt_got.offset != (bfd_vma) -1)) 4817 { 4818 /* Mark the symbol as undefined, rather than as defined in 4819 the .plt section. Leave the value if there were any 4820 relocations where pointer equality matters (this is a clue 4821 for the dynamic linker, to make function pointer 4822 comparisons work between an application and shared 4823 library), otherwise set it to zero. If a function is only 4824 called from a binary, there is no need to slow down 4825 shared libraries because of that. */ 4826 sym->st_shndx = SHN_UNDEF; 4827 if (!h->pointer_equality_needed) 4828 sym->st_value = 0; 4829 } 4830 4831 _bfd_x86_elf_link_fixup_ifunc_symbol (info, htab, h, sym); 4832 4833 /* Don't generate dynamic GOT relocation against undefined weak 4834 symbol in executable. */ 4835 if (h->got.offset != (bfd_vma) -1 4836 && ! GOT_TLS_GD_ANY_P (elf_x86_hash_entry (h)->tls_type) 4837 && elf_x86_hash_entry (h)->tls_type != GOT_TLS_IE 4838 && !local_undefweak) 4839 { 4840 Elf_Internal_Rela rela; 4841 asection *relgot = htab->elf.srelgot; 4842 const char *relative_reloc_name = NULL; 4843 bool generate_dynamic_reloc = true; 4844 4845 /* This symbol has an entry in the global offset table. Set it 4846 up. */ 4847 if (htab->elf.sgot == NULL || htab->elf.srelgot == NULL) 4848 abort (); 4849 4850 rela.r_offset = (htab->elf.sgot->output_section->vma 4851 + htab->elf.sgot->output_offset 4852 + (h->got.offset &~ (bfd_vma) 1)); 4853 4854 /* If this is a static link, or it is a -Bsymbolic link and the 4855 symbol is defined locally or was forced to be local because 4856 of a version file, we just want to emit a RELATIVE reloc. 4857 The entry in the global offset table will already have been 4858 initialized in the relocate_section function. */ 4859 if (h->def_regular 4860 && h->type == STT_GNU_IFUNC) 4861 { 4862 if (h->plt.offset == (bfd_vma) -1) 4863 { 4864 /* STT_GNU_IFUNC is referenced without PLT. */ 4865 if (htab->elf.splt == NULL) 4866 { 4867 /* use .rel[a].iplt section to store .got relocations 4868 in static executable. */ 4869 relgot = htab->elf.irelplt; 4870 } 4871 if (SYMBOL_REFERENCES_LOCAL_P (info, h)) 4872 { 4873 info->callbacks->minfo (_("Local IFUNC function `%s' in %pB\n"), 4874 h->root.root.string, 4875 h->root.u.def.section->owner); 4876 4877 rela.r_info = htab->r_info (0, 4878 R_X86_64_IRELATIVE); 4879 rela.r_addend = (h->root.u.def.value 4880 + h->root.u.def.section->output_section->vma 4881 + h->root.u.def.section->output_offset); 4882 relative_reloc_name = "R_X86_64_IRELATIVE"; 4883 } 4884 else 4885 goto do_glob_dat; 4886 } 4887 else if (bfd_link_pic (info)) 4888 { 4889 /* Generate R_X86_64_GLOB_DAT. */ 4890 goto do_glob_dat; 4891 } 4892 else 4893 { 4894 asection *plt; 4895 bfd_vma plt_offset; 4896 4897 if (!h->pointer_equality_needed) 4898 abort (); 4899 4900 /* For non-shared object, we can't use .got.plt, which 4901 contains the real function addres if we need pointer 4902 equality. We load the GOT entry with the PLT entry. */ 4903 if (htab->plt_second != NULL) 4904 { 4905 plt = htab->plt_second; 4906 plt_offset = eh->plt_second.offset; 4907 } 4908 else 4909 { 4910 plt = htab->elf.splt ? htab->elf.splt : htab->elf.iplt; 4911 plt_offset = h->plt.offset; 4912 } 4913 bfd_put_64 (output_bfd, (plt->output_section->vma 4914 + plt->output_offset 4915 + plt_offset), 4916 htab->elf.sgot->contents + h->got.offset); 4917 return true; 4918 } 4919 } 4920 else if (bfd_link_pic (info) 4921 && SYMBOL_REFERENCES_LOCAL_P (info, h)) 4922 { 4923 if (!SYMBOL_DEFINED_NON_SHARED_P (h)) 4924 return false; 4925 BFD_ASSERT((h->got.offset & 1) != 0); 4926 if (info->enable_dt_relr) 4927 generate_dynamic_reloc = false; 4928 else 4929 { 4930 rela.r_info = htab->r_info (0, R_X86_64_RELATIVE); 4931 rela.r_addend = (h->root.u.def.value 4932 + h->root.u.def.section->output_section->vma 4933 + h->root.u.def.section->output_offset); 4934 relative_reloc_name = "R_X86_64_RELATIVE"; 4935 } 4936 } 4937 else 4938 { 4939 BFD_ASSERT((h->got.offset & 1) == 0); 4940 do_glob_dat: 4941 bfd_put_64 (output_bfd, (bfd_vma) 0, 4942 htab->elf.sgot->contents + h->got.offset); 4943 rela.r_info = htab->r_info (h->dynindx, R_X86_64_GLOB_DAT); 4944 rela.r_addend = 0; 4945 } 4946 4947 if (generate_dynamic_reloc) 4948 { 4949 if (relative_reloc_name != NULL 4950 && htab->params->report_relative_reloc) 4951 _bfd_x86_elf_link_report_relative_reloc 4952 (info, relgot, h, sym, relative_reloc_name, &rela); 4953 4954 elf_append_rela (output_bfd, relgot, &rela); 4955 } 4956 } 4957 4958 if (h->needs_copy) 4959 { 4960 Elf_Internal_Rela rela; 4961 asection *s; 4962 4963 /* This symbol needs a copy reloc. Set it up. */ 4964 VERIFY_COPY_RELOC (h, htab) 4965 4966 rela.r_offset = (h->root.u.def.value 4967 + h->root.u.def.section->output_section->vma 4968 + h->root.u.def.section->output_offset); 4969 rela.r_info = htab->r_info (h->dynindx, R_X86_64_COPY); 4970 rela.r_addend = 0; 4971 if (h->root.u.def.section == htab->elf.sdynrelro) 4972 s = htab->elf.sreldynrelro; 4973 else 4974 s = htab->elf.srelbss; 4975 elf_append_rela (output_bfd, s, &rela); 4976 } 4977 4978 return true; 4979 } 4980 4981 /* Finish up local dynamic symbol handling. We set the contents of 4982 various dynamic sections here. */ 4983 4984 static int 4985 elf_x86_64_finish_local_dynamic_symbol (void **slot, void *inf) 4986 { 4987 struct elf_link_hash_entry *h 4988 = (struct elf_link_hash_entry *) *slot; 4989 struct bfd_link_info *info 4990 = (struct bfd_link_info *) inf; 4991 4992 return elf_x86_64_finish_dynamic_symbol (info->output_bfd, 4993 info, h, NULL); 4994 } 4995 4996 /* Finish up undefined weak symbol handling in PIE. Fill its PLT entry 4997 here since undefined weak symbol may not be dynamic and may not be 4998 called for elf_x86_64_finish_dynamic_symbol. */ 4999 5000 static bool 5001 elf_x86_64_pie_finish_undefweak_symbol (struct bfd_hash_entry *bh, 5002 void *inf) 5003 { 5004 struct elf_link_hash_entry *h = (struct elf_link_hash_entry *) bh; 5005 struct bfd_link_info *info = (struct bfd_link_info *) inf; 5006 5007 if (h->root.type != bfd_link_hash_undefweak 5008 || h->dynindx != -1) 5009 return true; 5010 5011 return elf_x86_64_finish_dynamic_symbol (info->output_bfd, 5012 info, h, NULL); 5013 } 5014 5015 /* Used to decide how to sort relocs in an optimal manner for the 5016 dynamic linker, before writing them out. */ 5017 5018 static enum elf_reloc_type_class 5019 elf_x86_64_reloc_type_class (const struct bfd_link_info *info, 5020 const asection *rel_sec ATTRIBUTE_UNUSED, 5021 const Elf_Internal_Rela *rela) 5022 { 5023 bfd *abfd = info->output_bfd; 5024 const struct elf_backend_data *bed = get_elf_backend_data (abfd); 5025 struct elf_x86_link_hash_table *htab 5026 = elf_x86_hash_table (info, X86_64_ELF_DATA); 5027 5028 if (htab->elf.dynsym != NULL 5029 && htab->elf.dynsym->contents != NULL) 5030 { 5031 /* Check relocation against STT_GNU_IFUNC symbol if there are 5032 dynamic symbols. */ 5033 unsigned long r_symndx = htab->r_sym (rela->r_info); 5034 if (r_symndx != STN_UNDEF) 5035 { 5036 Elf_Internal_Sym sym; 5037 if (!bed->s->swap_symbol_in (abfd, 5038 (htab->elf.dynsym->contents 5039 + r_symndx * bed->s->sizeof_sym), 5040 0, &sym)) 5041 abort (); 5042 5043 if (ELF_ST_TYPE (sym.st_info) == STT_GNU_IFUNC) 5044 return reloc_class_ifunc; 5045 } 5046 } 5047 5048 switch ((int) ELF32_R_TYPE (rela->r_info)) 5049 { 5050 case R_X86_64_IRELATIVE: 5051 return reloc_class_ifunc; 5052 case R_X86_64_RELATIVE: 5053 case R_X86_64_RELATIVE64: 5054 return reloc_class_relative; 5055 case R_X86_64_JUMP_SLOT: 5056 return reloc_class_plt; 5057 case R_X86_64_COPY: 5058 return reloc_class_copy; 5059 default: 5060 return reloc_class_normal; 5061 } 5062 } 5063 5064 /* Finish up the dynamic sections. */ 5065 5066 static bool 5067 elf_x86_64_finish_dynamic_sections (bfd *output_bfd, 5068 struct bfd_link_info *info) 5069 { 5070 struct elf_x86_link_hash_table *htab; 5071 5072 htab = _bfd_x86_elf_finish_dynamic_sections (output_bfd, info); 5073 if (htab == NULL) 5074 return false; 5075 5076 if (! htab->elf.dynamic_sections_created) 5077 return true; 5078 5079 if (htab->elf.splt && htab->elf.splt->size > 0) 5080 { 5081 if (bfd_is_abs_section (htab->elf.splt->output_section)) 5082 { 5083 info->callbacks->einfo 5084 (_("%F%P: discarded output section: `%pA'\n"), 5085 htab->elf.splt); 5086 return false; 5087 } 5088 5089 elf_section_data (htab->elf.splt->output_section) 5090 ->this_hdr.sh_entsize = htab->plt.plt_entry_size; 5091 5092 if (htab->plt.has_plt0) 5093 { 5094 /* Fill in the special first entry in the procedure linkage 5095 table. */ 5096 memcpy (htab->elf.splt->contents, 5097 htab->lazy_plt->plt0_entry, 5098 htab->lazy_plt->plt0_entry_size); 5099 /* Add offset for pushq GOT+8(%rip), since the instruction 5100 uses 6 bytes subtract this value. */ 5101 bfd_put_32 (output_bfd, 5102 (htab->elf.sgotplt->output_section->vma 5103 + htab->elf.sgotplt->output_offset 5104 + 8 5105 - htab->elf.splt->output_section->vma 5106 - htab->elf.splt->output_offset 5107 - 6), 5108 (htab->elf.splt->contents 5109 + htab->lazy_plt->plt0_got1_offset)); 5110 /* Add offset for the PC-relative instruction accessing 5111 GOT+16, subtracting the offset to the end of that 5112 instruction. */ 5113 bfd_put_32 (output_bfd, 5114 (htab->elf.sgotplt->output_section->vma 5115 + htab->elf.sgotplt->output_offset 5116 + 16 5117 - htab->elf.splt->output_section->vma 5118 - htab->elf.splt->output_offset 5119 - htab->lazy_plt->plt0_got2_insn_end), 5120 (htab->elf.splt->contents 5121 + htab->lazy_plt->plt0_got2_offset)); 5122 } 5123 5124 if (htab->elf.tlsdesc_plt) 5125 { 5126 bfd_put_64 (output_bfd, (bfd_vma) 0, 5127 htab->elf.sgot->contents + htab->elf.tlsdesc_got); 5128 5129 memcpy (htab->elf.splt->contents + htab->elf.tlsdesc_plt, 5130 htab->lazy_plt->plt_tlsdesc_entry, 5131 htab->lazy_plt->plt_tlsdesc_entry_size); 5132 5133 /* Add offset for pushq GOT+8(%rip), since ENDBR64 uses 4 5134 bytes and the instruction uses 6 bytes, subtract these 5135 values. */ 5136 bfd_put_32 (output_bfd, 5137 (htab->elf.sgotplt->output_section->vma 5138 + htab->elf.sgotplt->output_offset 5139 + 8 5140 - htab->elf.splt->output_section->vma 5141 - htab->elf.splt->output_offset 5142 - htab->elf.tlsdesc_plt 5143 - htab->lazy_plt->plt_tlsdesc_got1_insn_end), 5144 (htab->elf.splt->contents 5145 + htab->elf.tlsdesc_plt 5146 + htab->lazy_plt->plt_tlsdesc_got1_offset)); 5147 /* Add offset for indirect branch via GOT+TDG, where TDG 5148 stands for htab->tlsdesc_got, subtracting the offset 5149 to the end of that instruction. */ 5150 bfd_put_32 (output_bfd, 5151 (htab->elf.sgot->output_section->vma 5152 + htab->elf.sgot->output_offset 5153 + htab->elf.tlsdesc_got 5154 - htab->elf.splt->output_section->vma 5155 - htab->elf.splt->output_offset 5156 - htab->elf.tlsdesc_plt 5157 - htab->lazy_plt->plt_tlsdesc_got2_insn_end), 5158 (htab->elf.splt->contents 5159 + htab->elf.tlsdesc_plt 5160 + htab->lazy_plt->plt_tlsdesc_got2_offset)); 5161 } 5162 } 5163 5164 /* Fill PLT entries for undefined weak symbols in PIE. */ 5165 if (bfd_link_pie (info)) 5166 bfd_hash_traverse (&info->hash->table, 5167 elf_x86_64_pie_finish_undefweak_symbol, 5168 info); 5169 5170 return true; 5171 } 5172 5173 /* Fill PLT/GOT entries and allocate dynamic relocations for local 5174 STT_GNU_IFUNC symbols, which aren't in the ELF linker hash table. 5175 It has to be done before elf_link_sort_relocs is called so that 5176 dynamic relocations are properly sorted. */ 5177 5178 static bool 5179 elf_x86_64_output_arch_local_syms 5180 (bfd *output_bfd ATTRIBUTE_UNUSED, 5181 struct bfd_link_info *info, 5182 void *flaginfo ATTRIBUTE_UNUSED, 5183 int (*func) (void *, const char *, 5184 Elf_Internal_Sym *, 5185 asection *, 5186 struct elf_link_hash_entry *) ATTRIBUTE_UNUSED) 5187 { 5188 struct elf_x86_link_hash_table *htab 5189 = elf_x86_hash_table (info, X86_64_ELF_DATA); 5190 if (htab == NULL) 5191 return false; 5192 5193 /* Fill PLT and GOT entries for local STT_GNU_IFUNC symbols. */ 5194 htab_traverse (htab->loc_hash_table, 5195 elf_x86_64_finish_local_dynamic_symbol, 5196 info); 5197 5198 return true; 5199 } 5200 5201 /* Similar to _bfd_elf_get_synthetic_symtab. Support PLTs with all 5202 dynamic relocations. */ 5203 5204 static long 5205 elf_x86_64_get_synthetic_symtab (bfd *abfd, 5206 long symcount ATTRIBUTE_UNUSED, 5207 asymbol **syms ATTRIBUTE_UNUSED, 5208 long dynsymcount, 5209 asymbol **dynsyms, 5210 asymbol **ret) 5211 { 5212 long count, i, n; 5213 int j; 5214 bfd_byte *plt_contents; 5215 long relsize; 5216 const struct elf_x86_lazy_plt_layout *lazy_plt; 5217 const struct elf_x86_non_lazy_plt_layout *non_lazy_plt; 5218 const struct elf_x86_lazy_plt_layout *lazy_bnd_plt; 5219 const struct elf_x86_non_lazy_plt_layout *non_lazy_bnd_plt; 5220 const struct elf_x86_lazy_plt_layout *lazy_ibt_plt; 5221 const struct elf_x86_non_lazy_plt_layout *non_lazy_ibt_plt; 5222 const struct elf_x86_lazy_plt_layout *x32_lazy_ibt_plt; 5223 const struct elf_x86_non_lazy_plt_layout *x32_non_lazy_ibt_plt; 5224 asection *plt; 5225 enum elf_x86_plt_type plt_type; 5226 struct elf_x86_plt plts[] = 5227 { 5228 { ".plt", NULL, NULL, plt_unknown, 0, 0, 0, 0 }, 5229 { ".plt.got", NULL, NULL, plt_non_lazy, 0, 0, 0, 0 }, 5230 { ".plt.sec", NULL, NULL, plt_second, 0, 0, 0, 0 }, 5231 { ".plt.bnd", NULL, NULL, plt_second, 0, 0, 0, 0 }, 5232 { NULL, NULL, NULL, plt_non_lazy, 0, 0, 0, 0 } 5233 }; 5234 5235 *ret = NULL; 5236 5237 if ((abfd->flags & (DYNAMIC | EXEC_P)) == 0) 5238 return 0; 5239 5240 if (dynsymcount <= 0) 5241 return 0; 5242 5243 relsize = bfd_get_dynamic_reloc_upper_bound (abfd); 5244 if (relsize <= 0) 5245 return -1; 5246 5247 lazy_plt = &elf_x86_64_lazy_plt; 5248 non_lazy_plt = &elf_x86_64_non_lazy_plt; 5249 lazy_bnd_plt = &elf_x86_64_lazy_bnd_plt; 5250 non_lazy_bnd_plt = &elf_x86_64_non_lazy_bnd_plt; 5251 if (ABI_64_P (abfd)) 5252 { 5253 lazy_ibt_plt = &elf_x86_64_lazy_ibt_plt; 5254 non_lazy_ibt_plt = &elf_x86_64_non_lazy_ibt_plt; 5255 x32_lazy_ibt_plt = &elf_x32_lazy_ibt_plt; 5256 x32_non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; 5257 } 5258 else 5259 { 5260 lazy_ibt_plt = &elf_x32_lazy_ibt_plt; 5261 non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; 5262 x32_lazy_ibt_plt = NULL; 5263 x32_non_lazy_ibt_plt = NULL; 5264 } 5265 5266 count = 0; 5267 for (j = 0; plts[j].name != NULL; j++) 5268 { 5269 plt = bfd_get_section_by_name (abfd, plts[j].name); 5270 if (plt == NULL 5271 || plt->size == 0 5272 || (plt->flags & SEC_HAS_CONTENTS) == 0) 5273 continue; 5274 5275 /* Get the PLT section contents. */ 5276 if (!_bfd_elf_mmap_section_contents (abfd, plt, &plt_contents)) 5277 break; 5278 5279 /* Check what kind of PLT it is. */ 5280 plt_type = plt_unknown; 5281 if (plts[j].type == plt_unknown 5282 && (plt->size >= (lazy_plt->plt_entry_size 5283 + lazy_plt->plt_entry_size))) 5284 { 5285 /* Match lazy PLT first. Need to check the first two 5286 instructions. */ 5287 if ((memcmp (plt_contents, lazy_plt->plt0_entry, 5288 lazy_plt->plt0_got1_offset) == 0) 5289 && (memcmp (plt_contents + 6, lazy_plt->plt0_entry + 6, 5290 2) == 0)) 5291 { 5292 if (x32_lazy_ibt_plt != NULL 5293 && (memcmp (plt_contents 5294 + x32_lazy_ibt_plt->plt_entry_size, 5295 x32_lazy_ibt_plt->plt_entry, 5296 x32_lazy_ibt_plt->plt_got_offset) == 0)) 5297 { 5298 /* The fist entry in the x32 lazy IBT PLT is the same 5299 as the lazy PLT. */ 5300 plt_type = plt_lazy | plt_second; 5301 lazy_plt = x32_lazy_ibt_plt; 5302 } 5303 else 5304 plt_type = plt_lazy; 5305 } 5306 else if (lazy_bnd_plt != NULL 5307 && (memcmp (plt_contents, lazy_bnd_plt->plt0_entry, 5308 lazy_bnd_plt->plt0_got1_offset) == 0) 5309 && (memcmp (plt_contents + 6, 5310 lazy_bnd_plt->plt0_entry + 6, 3) == 0)) 5311 { 5312 plt_type = plt_lazy | plt_second; 5313 /* The fist entry in the lazy IBT PLT is the same as the 5314 lazy BND PLT. */ 5315 if ((memcmp (plt_contents + lazy_ibt_plt->plt_entry_size, 5316 lazy_ibt_plt->plt_entry, 5317 lazy_ibt_plt->plt_got_offset) == 0)) 5318 lazy_plt = lazy_ibt_plt; 5319 else 5320 lazy_plt = lazy_bnd_plt; 5321 } 5322 } 5323 5324 if (non_lazy_plt != NULL 5325 && (plt_type == plt_unknown || plt_type == plt_non_lazy) 5326 && plt->size >= non_lazy_plt->plt_entry_size) 5327 { 5328 /* Match non-lazy PLT. */ 5329 if (memcmp (plt_contents, non_lazy_plt->plt_entry, 5330 non_lazy_plt->plt_got_offset) == 0) 5331 plt_type = plt_non_lazy; 5332 } 5333 5334 if (plt_type == plt_unknown || plt_type == plt_second) 5335 { 5336 if (non_lazy_bnd_plt != NULL 5337 && plt->size >= non_lazy_bnd_plt->plt_entry_size 5338 && (memcmp (plt_contents, non_lazy_bnd_plt->plt_entry, 5339 non_lazy_bnd_plt->plt_got_offset) == 0)) 5340 { 5341 /* Match BND PLT. */ 5342 plt_type = plt_second; 5343 non_lazy_plt = non_lazy_bnd_plt; 5344 } 5345 else if (non_lazy_ibt_plt != NULL 5346 && plt->size >= non_lazy_ibt_plt->plt_entry_size 5347 && (memcmp (plt_contents, 5348 non_lazy_ibt_plt->plt_entry, 5349 non_lazy_ibt_plt->plt_got_offset) == 0)) 5350 { 5351 /* Match IBT PLT. */ 5352 plt_type = plt_second; 5353 non_lazy_plt = non_lazy_ibt_plt; 5354 } 5355 else if (x32_non_lazy_ibt_plt != NULL 5356 && plt->size >= x32_non_lazy_ibt_plt->plt_entry_size 5357 && (memcmp (plt_contents, 5358 x32_non_lazy_ibt_plt->plt_entry, 5359 x32_non_lazy_ibt_plt->plt_got_offset) == 0)) 5360 { 5361 /* Match x32 IBT PLT. */ 5362 plt_type = plt_second; 5363 non_lazy_plt = x32_non_lazy_ibt_plt; 5364 } 5365 } 5366 5367 if (plt_type == plt_unknown) 5368 { 5369 _bfd_elf_munmap_section_contents (plt, plt_contents); 5370 continue; 5371 } 5372 5373 plts[j].sec = plt; 5374 plts[j].type = plt_type; 5375 5376 if ((plt_type & plt_lazy)) 5377 { 5378 plts[j].plt_got_offset = lazy_plt->plt_got_offset; 5379 plts[j].plt_got_insn_size = lazy_plt->plt_got_insn_size; 5380 plts[j].plt_entry_size = lazy_plt->plt_entry_size; 5381 /* Skip PLT0 in lazy PLT. */ 5382 i = 1; 5383 } 5384 else 5385 { 5386 plts[j].plt_got_offset = non_lazy_plt->plt_got_offset; 5387 plts[j].plt_got_insn_size = non_lazy_plt->plt_got_insn_size; 5388 plts[j].plt_entry_size = non_lazy_plt->plt_entry_size; 5389 i = 0; 5390 } 5391 5392 /* Skip lazy PLT when the second PLT is used. */ 5393 if (plt_type == (plt_lazy | plt_second)) 5394 plts[j].count = 0; 5395 else 5396 { 5397 n = plt->size / plts[j].plt_entry_size; 5398 plts[j].count = n; 5399 count += n - i; 5400 } 5401 5402 plts[j].contents = plt_contents; 5403 } 5404 5405 return _bfd_x86_elf_get_synthetic_symtab (abfd, count, relsize, 5406 (bfd_vma) 0, plts, dynsyms, 5407 ret); 5408 } 5409 5410 /* Handle an x86-64 specific section when reading an object file. This 5411 is called when elfcode.h finds a section with an unknown type. */ 5412 5413 static bool 5414 elf_x86_64_section_from_shdr (bfd *abfd, Elf_Internal_Shdr *hdr, 5415 const char *name, int shindex) 5416 { 5417 if (hdr->sh_type != SHT_X86_64_UNWIND) 5418 return false; 5419 5420 if (! _bfd_elf_make_section_from_shdr (abfd, hdr, name, shindex)) 5421 return false; 5422 5423 return true; 5424 } 5425 5426 /* Hook called by the linker routine which adds symbols from an object 5427 file. We use it to put SHN_X86_64_LCOMMON items in .lbss, instead 5428 of .bss. */ 5429 5430 static bool 5431 elf_x86_64_add_symbol_hook (bfd *abfd, 5432 struct bfd_link_info *info ATTRIBUTE_UNUSED, 5433 Elf_Internal_Sym *sym, 5434 const char **namep ATTRIBUTE_UNUSED, 5435 flagword *flagsp ATTRIBUTE_UNUSED, 5436 asection **secp, 5437 bfd_vma *valp) 5438 { 5439 asection *lcomm; 5440 5441 switch (sym->st_shndx) 5442 { 5443 case SHN_X86_64_LCOMMON: 5444 lcomm = bfd_get_section_by_name (abfd, "LARGE_COMMON"); 5445 if (lcomm == NULL) 5446 { 5447 lcomm = bfd_make_section_with_flags (abfd, 5448 "LARGE_COMMON", 5449 (SEC_ALLOC 5450 | SEC_IS_COMMON 5451 | SEC_LINKER_CREATED)); 5452 if (lcomm == NULL) 5453 return false; 5454 elf_section_flags (lcomm) |= SHF_X86_64_LARGE; 5455 } 5456 *secp = lcomm; 5457 *valp = sym->st_size; 5458 return true; 5459 } 5460 5461 return true; 5462 } 5463 5464 5465 /* Given a BFD section, try to locate the corresponding ELF section 5466 index. */ 5467 5468 static bool 5469 elf_x86_64_elf_section_from_bfd_section (bfd *abfd ATTRIBUTE_UNUSED, 5470 asection *sec, int *index_return) 5471 { 5472 if (sec == &_bfd_elf_large_com_section) 5473 { 5474 *index_return = SHN_X86_64_LCOMMON; 5475 return true; 5476 } 5477 return false; 5478 } 5479 5480 /* Process a symbol. */ 5481 5482 static void 5483 elf_x86_64_symbol_processing (bfd *abfd ATTRIBUTE_UNUSED, 5484 asymbol *asym) 5485 { 5486 elf_symbol_type *elfsym = (elf_symbol_type *) asym; 5487 5488 switch (elfsym->internal_elf_sym.st_shndx) 5489 { 5490 case SHN_X86_64_LCOMMON: 5491 asym->section = &_bfd_elf_large_com_section; 5492 asym->value = elfsym->internal_elf_sym.st_size; 5493 /* Common symbol doesn't set BSF_GLOBAL. */ 5494 asym->flags &= ~BSF_GLOBAL; 5495 break; 5496 } 5497 } 5498 5499 static bool 5500 elf_x86_64_common_definition (Elf_Internal_Sym *sym) 5501 { 5502 return (sym->st_shndx == SHN_COMMON 5503 || sym->st_shndx == SHN_X86_64_LCOMMON); 5504 } 5505 5506 static unsigned int 5507 elf_x86_64_common_section_index (asection *sec) 5508 { 5509 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0) 5510 return SHN_COMMON; 5511 else 5512 return SHN_X86_64_LCOMMON; 5513 } 5514 5515 static asection * 5516 elf_x86_64_common_section (asection *sec) 5517 { 5518 if ((elf_section_flags (sec) & SHF_X86_64_LARGE) == 0) 5519 return bfd_com_section_ptr; 5520 else 5521 return &_bfd_elf_large_com_section; 5522 } 5523 5524 static bool 5525 elf_x86_64_merge_symbol (struct elf_link_hash_entry *h, 5526 const Elf_Internal_Sym *sym, 5527 asection **psec, 5528 bool newdef, 5529 bool olddef, 5530 bfd *oldbfd, 5531 const asection *oldsec) 5532 { 5533 /* A normal common symbol and a large common symbol result in a 5534 normal common symbol. We turn the large common symbol into a 5535 normal one. */ 5536 if (!olddef 5537 && h->root.type == bfd_link_hash_common 5538 && !newdef 5539 && bfd_is_com_section (*psec) 5540 && oldsec != *psec) 5541 { 5542 if (sym->st_shndx == SHN_COMMON 5543 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) != 0) 5544 { 5545 h->root.u.c.p->section 5546 = bfd_make_section_old_way (oldbfd, "COMMON"); 5547 h->root.u.c.p->section->flags = SEC_ALLOC; 5548 } 5549 else if (sym->st_shndx == SHN_X86_64_LCOMMON 5550 && (elf_section_flags (oldsec) & SHF_X86_64_LARGE) == 0) 5551 *psec = bfd_com_section_ptr; 5552 } 5553 5554 return true; 5555 } 5556 5557 static bool 5558 elf_x86_64_section_flags (const Elf_Internal_Shdr *hdr) 5559 { 5560 if ((hdr->sh_flags & SHF_X86_64_LARGE) != 0) 5561 hdr->bfd_section->flags |= SEC_ELF_LARGE; 5562 5563 return true; 5564 } 5565 5566 static bool 5567 elf_x86_64_fake_sections (bfd *abfd ATTRIBUTE_UNUSED, 5568 Elf_Internal_Shdr *hdr, asection *sec) 5569 { 5570 if (sec->flags & SEC_ELF_LARGE) 5571 hdr->sh_flags |= SHF_X86_64_LARGE; 5572 5573 return true; 5574 } 5575 5576 static bool 5577 elf_x86_64_copy_private_section_data (bfd *ibfd, asection *isec, 5578 bfd *obfd, asection *osec) 5579 { 5580 if (!_bfd_elf_copy_private_section_data (ibfd, isec, obfd, osec)) 5581 return false; 5582 5583 /* objcopy --set-section-flags without "large" drops SHF_X86_64_LARGE. */ 5584 if (ibfd != obfd) 5585 elf_section_flags (osec) &= ~SHF_X86_64_LARGE; 5586 5587 return true; 5588 } 5589 5590 static int 5591 elf_x86_64_additional_program_headers (bfd *abfd, 5592 struct bfd_link_info *info ATTRIBUTE_UNUSED) 5593 { 5594 asection *s; 5595 int count = 0; 5596 5597 /* Check to see if we need a large readonly segment. */ 5598 s = bfd_get_section_by_name (abfd, ".lrodata"); 5599 if (s && (s->flags & SEC_LOAD)) 5600 count++; 5601 5602 /* Check to see if we need a large data segment. Since .lbss sections 5603 is placed right after the .bss section, there should be no need for 5604 a large data segment just because of .lbss. */ 5605 s = bfd_get_section_by_name (abfd, ".ldata"); 5606 if (s && (s->flags & SEC_LOAD)) 5607 count++; 5608 5609 return count; 5610 } 5611 5612 /* Return TRUE iff relocations for INPUT are compatible with OUTPUT. */ 5613 5614 static bool 5615 elf_x86_64_relocs_compatible (const bfd_target *input, 5616 const bfd_target *output) 5617 { 5618 return ((xvec_get_elf_backend_data (input)->s->elfclass 5619 == xvec_get_elf_backend_data (output)->s->elfclass) 5620 && _bfd_elf_relocs_compatible (input, output)); 5621 } 5622 5623 /* Set up x86-64 GNU properties. Return the first relocatable ELF input 5624 with GNU properties if found. Otherwise, return NULL. */ 5625 5626 static bfd * 5627 elf_x86_64_link_setup_gnu_properties (struct bfd_link_info *info) 5628 { 5629 struct elf_x86_init_table init_table; 5630 const struct elf_backend_data *bed; 5631 struct elf_x86_link_hash_table *htab; 5632 5633 if ((int) R_X86_64_standard >= (int) R_X86_64_converted_reloc_bit 5634 || (int) R_X86_64_max <= (int) R_X86_64_converted_reloc_bit 5635 || ((int) (R_X86_64_GNU_VTINHERIT | R_X86_64_converted_reloc_bit) 5636 != (int) R_X86_64_GNU_VTINHERIT) 5637 || ((int) (R_X86_64_GNU_VTENTRY | R_X86_64_converted_reloc_bit) 5638 != (int) R_X86_64_GNU_VTENTRY)) 5639 abort (); 5640 5641 /* This is unused for x86-64. */ 5642 init_table.plt0_pad_byte = 0x90; 5643 5644 bed = get_elf_backend_data (info->output_bfd); 5645 htab = elf_x86_hash_table (info, bed->target_id); 5646 if (!htab) 5647 abort (); 5648 5649 init_table.lazy_plt = &elf_x86_64_lazy_plt; 5650 init_table.non_lazy_plt = &elf_x86_64_non_lazy_plt; 5651 5652 init_table.lazy_ibt_plt = &elf_x32_lazy_ibt_plt; 5653 init_table.non_lazy_ibt_plt = &elf_x32_non_lazy_ibt_plt; 5654 5655 if (ABI_64_P (info->output_bfd)) 5656 { 5657 init_table.sframe_lazy_plt = &elf_x86_64_sframe_plt; 5658 init_table.sframe_non_lazy_plt = &elf_x86_64_sframe_non_lazy_plt; 5659 init_table.sframe_lazy_ibt_plt = &elf_x86_64_sframe_plt; 5660 init_table.sframe_non_lazy_ibt_plt = &elf_x86_64_sframe_non_lazy_plt; 5661 } 5662 else 5663 { 5664 /* SFrame is not supported for non AMD64. */ 5665 init_table.sframe_lazy_plt = NULL; 5666 init_table.sframe_non_lazy_plt = NULL; 5667 } 5668 5669 if (ABI_64_P (info->output_bfd)) 5670 { 5671 init_table.r_info = elf64_r_info; 5672 init_table.r_sym = elf64_r_sym; 5673 } 5674 else 5675 { 5676 init_table.r_info = elf32_r_info; 5677 init_table.r_sym = elf32_r_sym; 5678 } 5679 5680 return _bfd_x86_elf_link_setup_gnu_properties (info, &init_table); 5681 } 5682 5683 static void 5684 elf_x86_64_add_glibc_version_dependency 5685 (struct elf_find_verdep_info *rinfo) 5686 { 5687 unsigned int i = 0; 5688 const char *version[3] = { NULL, NULL, NULL }; 5689 struct elf_x86_link_hash_table *htab; 5690 5691 if (rinfo->info->enable_dt_relr) 5692 { 5693 version[i] = "GLIBC_ABI_DT_RELR"; 5694 i++; 5695 } 5696 5697 htab = elf_x86_hash_table (rinfo->info, X86_64_ELF_DATA); 5698 if (htab != NULL && htab->params->mark_plt) 5699 { 5700 version[i] = "GLIBC_2.36"; 5701 i++; 5702 } 5703 5704 if (i != 0) 5705 _bfd_elf_link_add_glibc_version_dependency (rinfo, version); 5706 } 5707 5708 static const struct bfd_elf_special_section 5709 elf_x86_64_special_sections[]= 5710 { 5711 { STRING_COMMA_LEN (".gnu.linkonce.lb"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 5712 { STRING_COMMA_LEN (".gnu.linkonce.lr"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, 5713 { STRING_COMMA_LEN (".gnu.linkonce.lt"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_EXECINSTR + SHF_X86_64_LARGE}, 5714 { STRING_COMMA_LEN (".lbss"), -2, SHT_NOBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 5715 { STRING_COMMA_LEN (".ldata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_WRITE + SHF_X86_64_LARGE}, 5716 { STRING_COMMA_LEN (".lrodata"), -2, SHT_PROGBITS, SHF_ALLOC + SHF_X86_64_LARGE}, 5717 { NULL, 0, 0, 0, 0 } 5718 }; 5719 5720 #define TARGET_LITTLE_SYM x86_64_elf64_vec 5721 #define TARGET_LITTLE_NAME "elf64-x86-64" 5722 #define ELF_ARCH bfd_arch_i386 5723 #define ELF_TARGET_ID X86_64_ELF_DATA 5724 #define ELF_MACHINE_CODE EM_X86_64 5725 #define ELF_MAXPAGESIZE 0x1000 5726 #define ELF_COMMONPAGESIZE 0x1000 5727 5728 #define elf_backend_can_gc_sections 1 5729 #define elf_backend_can_refcount 1 5730 #define elf_backend_want_got_plt 1 5731 #define elf_backend_plt_readonly 1 5732 #define elf_backend_want_plt_sym 0 5733 #define elf_backend_got_header_size (GOT_ENTRY_SIZE*3) 5734 #define elf_backend_rela_normal 1 5735 #define elf_backend_plt_alignment 4 5736 #define elf_backend_caches_rawsize 1 5737 #define elf_backend_dtrel_excludes_plt 1 5738 #define elf_backend_want_dynrelro 1 5739 5740 #define elf_info_to_howto elf_x86_64_info_to_howto 5741 5742 #define bfd_elf64_bfd_copy_private_section_data \ 5743 elf_x86_64_copy_private_section_data 5744 #define bfd_elf64_bfd_reloc_type_lookup elf_x86_64_reloc_type_lookup 5745 #define bfd_elf64_bfd_reloc_name_lookup \ 5746 elf_x86_64_reloc_name_lookup 5747 5748 #define elf_backend_relocs_compatible elf_x86_64_relocs_compatible 5749 #define elf_backend_early_size_sections elf_x86_64_early_size_sections 5750 #define elf_backend_create_dynamic_sections _bfd_elf_create_dynamic_sections 5751 #define elf_backend_finish_dynamic_sections elf_x86_64_finish_dynamic_sections 5752 #define elf_backend_finish_dynamic_symbol elf_x86_64_finish_dynamic_symbol 5753 #define elf_backend_output_arch_local_syms elf_x86_64_output_arch_local_syms 5754 #define elf_backend_grok_prstatus elf_x86_64_grok_prstatus 5755 #define elf_backend_grok_psinfo elf_x86_64_grok_psinfo 5756 #ifdef CORE_HEADER 5757 #define elf_backend_write_core_note elf_x86_64_write_core_note 5758 #endif 5759 #define elf_backend_reloc_type_class elf_x86_64_reloc_type_class 5760 #define elf_backend_relocate_section elf_x86_64_relocate_section 5761 #define elf_backend_init_index_section _bfd_elf_init_1_index_section 5762 #define elf_backend_object_p elf64_x86_64_elf_object_p 5763 #define bfd_elf64_get_synthetic_symtab elf_x86_64_get_synthetic_symtab 5764 5765 #define elf_backend_section_from_shdr \ 5766 elf_x86_64_section_from_shdr 5767 5768 #define elf_backend_section_from_bfd_section \ 5769 elf_x86_64_elf_section_from_bfd_section 5770 #define elf_backend_add_symbol_hook \ 5771 elf_x86_64_add_symbol_hook 5772 #define elf_backend_symbol_processing \ 5773 elf_x86_64_symbol_processing 5774 #define elf_backend_common_section_index \ 5775 elf_x86_64_common_section_index 5776 #define elf_backend_common_section \ 5777 elf_x86_64_common_section 5778 #define elf_backend_common_definition \ 5779 elf_x86_64_common_definition 5780 #define elf_backend_merge_symbol \ 5781 elf_x86_64_merge_symbol 5782 #define elf_backend_special_sections \ 5783 elf_x86_64_special_sections 5784 #define elf_backend_section_flags elf_x86_64_section_flags 5785 #define elf_backend_fake_sections elf_x86_64_fake_sections 5786 #define elf_backend_additional_program_headers \ 5787 elf_x86_64_additional_program_headers 5788 #define elf_backend_setup_gnu_properties \ 5789 elf_x86_64_link_setup_gnu_properties 5790 #define elf_backend_hide_symbol \ 5791 _bfd_x86_elf_hide_symbol 5792 #define elf_backend_add_glibc_version_dependency \ 5793 elf_x86_64_add_glibc_version_dependency 5794 5795 #undef elf64_bed 5796 #define elf64_bed elf64_x86_64_bed 5797 5798 #include "elf64-target.h" 5799 5800 /* CloudABI support. */ 5801 5802 #undef TARGET_LITTLE_SYM 5803 #define TARGET_LITTLE_SYM x86_64_elf64_cloudabi_vec 5804 #undef TARGET_LITTLE_NAME 5805 #define TARGET_LITTLE_NAME "elf64-x86-64-cloudabi" 5806 5807 #undef ELF_OSABI 5808 #define ELF_OSABI ELFOSABI_CLOUDABI 5809 5810 #undef elf64_bed 5811 #define elf64_bed elf64_x86_64_cloudabi_bed 5812 5813 #include "elf64-target.h" 5814 5815 /* FreeBSD support. */ 5816 5817 #undef TARGET_LITTLE_SYM 5818 #define TARGET_LITTLE_SYM x86_64_elf64_fbsd_vec 5819 #undef TARGET_LITTLE_NAME 5820 #define TARGET_LITTLE_NAME "elf64-x86-64-freebsd" 5821 5822 #undef ELF_OSABI 5823 #define ELF_OSABI ELFOSABI_FREEBSD 5824 5825 #undef elf64_bed 5826 #define elf64_bed elf64_x86_64_fbsd_bed 5827 5828 #include "elf64-target.h" 5829 5830 /* Solaris 2 support. */ 5831 5832 #undef TARGET_LITTLE_SYM 5833 #define TARGET_LITTLE_SYM x86_64_elf64_sol2_vec 5834 #undef TARGET_LITTLE_NAME 5835 #define TARGET_LITTLE_NAME "elf64-x86-64-sol2" 5836 5837 #undef ELF_TARGET_OS 5838 #define ELF_TARGET_OS is_solaris 5839 5840 /* Restore default: we cannot use ELFOSABI_SOLARIS, otherwise ELFOSABI_NONE 5841 objects won't be recognized. */ 5842 #undef ELF_OSABI 5843 5844 #undef elf64_bed 5845 #define elf64_bed elf64_x86_64_sol2_bed 5846 5847 /* The 64-bit static TLS arena size is rounded to the nearest 16-byte 5848 boundary. */ 5849 #undef elf_backend_static_tls_alignment 5850 #define elf_backend_static_tls_alignment 16 5851 5852 /* The Solaris 2 ABI requires a plt symbol on all platforms. 5853 5854 Cf. Linker and Libraries Guide, Ch. 2, Link-Editor, Generating the Output 5855 File, p.63. */ 5856 #undef elf_backend_want_plt_sym 5857 #define elf_backend_want_plt_sym 1 5858 5859 #undef elf_backend_strtab_flags 5860 #define elf_backend_strtab_flags SHF_STRINGS 5861 5862 static bool 5863 elf64_x86_64_copy_solaris_special_section_fields (const bfd *ibfd ATTRIBUTE_UNUSED, 5864 bfd *obfd ATTRIBUTE_UNUSED, 5865 const Elf_Internal_Shdr *isection ATTRIBUTE_UNUSED, 5866 Elf_Internal_Shdr *osection ATTRIBUTE_UNUSED) 5867 { 5868 /* PR 19938: FIXME: Need to add code for setting the sh_info 5869 and sh_link fields of Solaris specific section types. */ 5870 return false; 5871 } 5872 5873 #undef elf_backend_copy_special_section_fields 5874 #define elf_backend_copy_special_section_fields elf64_x86_64_copy_solaris_special_section_fields 5875 5876 #include "elf64-target.h" 5877 5878 /* Restore defaults. */ 5879 #undef ELF_OSABI 5880 #undef elf_backend_static_tls_alignment 5881 #undef elf_backend_want_plt_sym 5882 #define elf_backend_want_plt_sym 0 5883 #undef elf_backend_strtab_flags 5884 #undef elf_backend_copy_special_section_fields 5885 5886 /* 32bit x86-64 support. */ 5887 5888 #undef TARGET_LITTLE_SYM 5889 #define TARGET_LITTLE_SYM x86_64_elf32_vec 5890 #undef TARGET_LITTLE_NAME 5891 #define TARGET_LITTLE_NAME "elf32-x86-64" 5892 #undef elf32_bed 5893 #define elf32_bed elf32_x86_64_bed 5894 5895 #undef ELF_ARCH 5896 #define ELF_ARCH bfd_arch_i386 5897 5898 #undef ELF_MACHINE_CODE 5899 #define ELF_MACHINE_CODE EM_X86_64 5900 5901 #undef ELF_TARGET_OS 5902 #undef ELF_OSABI 5903 5904 #define bfd_elf32_bfd_copy_private_section_data \ 5905 elf_x86_64_copy_private_section_data 5906 #define bfd_elf32_bfd_reloc_type_lookup \ 5907 elf_x86_64_reloc_type_lookup 5908 #define bfd_elf32_bfd_reloc_name_lookup \ 5909 elf_x86_64_reloc_name_lookup 5910 #define bfd_elf32_get_synthetic_symtab \ 5911 elf_x86_64_get_synthetic_symtab 5912 5913 #undef elf_backend_object_p 5914 #define elf_backend_object_p \ 5915 elf32_x86_64_elf_object_p 5916 5917 #undef elf_backend_bfd_from_remote_memory 5918 #define elf_backend_bfd_from_remote_memory \ 5919 _bfd_elf32_bfd_from_remote_memory 5920 5921 #undef elf_backend_size_info 5922 #define elf_backend_size_info \ 5923 _bfd_elf32_size_info 5924 5925 #include "elf32-target.h" 5926