1 //===- X86_64.cpp ---------------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "OutputSections.h" 10 #include "Relocations.h" 11 #include "Symbols.h" 12 #include "SyntheticSections.h" 13 #include "Target.h" 14 #include "lld/Common/ErrorHandler.h" 15 #include "llvm/BinaryFormat/ELF.h" 16 #include "llvm/Support/Endian.h" 17 #include "llvm/Support/MathExtras.h" 18 19 using namespace llvm; 20 using namespace llvm::object; 21 using namespace llvm::support::endian; 22 using namespace llvm::ELF; 23 using namespace lld; 24 using namespace lld::elf; 25 26 namespace { 27 class X86_64 : public TargetInfo { 28 public: 29 X86_64(Ctx &); 30 int getTlsGdRelaxSkip(RelType type) const override; 31 RelExpr getRelExpr(RelType type, const Symbol &s, 32 const uint8_t *loc) const override; 33 RelType getDynRel(RelType type) const override; 34 void writeGotPltHeader(uint8_t *buf) const override; 35 void writeGotPlt(uint8_t *buf, const Symbol &s) const override; 36 void writeIgotPlt(uint8_t *buf, const Symbol &s) const override; 37 void writePltHeader(uint8_t *buf) const override; 38 void writePlt(uint8_t *buf, const Symbol &sym, 39 uint64_t pltEntryAddr) const override; 40 void relocate(uint8_t *loc, const Relocation &rel, 41 uint64_t val) const override; 42 int64_t getImplicitAddend(const uint8_t *buf, RelType type) const override; 43 void applyJumpInstrMod(uint8_t *loc, JumpModType type, 44 unsigned size) const override; 45 RelExpr adjustGotPcExpr(RelType type, int64_t addend, 46 const uint8_t *loc) const override; 47 void relocateAlloc(InputSectionBase &sec, uint8_t *buf) const override; 48 bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end, 49 uint8_t stOther) const override; 50 bool deleteFallThruJmpInsn(InputSection &is, InputFile *file, 51 InputSection *nextIS) const override; 52 bool relaxOnce(int pass) const override; 53 54 private: 55 void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 56 void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 57 void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 58 void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const; 59 }; 60 } // namespace 61 62 // This is vector of NOP instructions of sizes from 1 to 8 bytes. The 63 // appropriately sized instructions are used to fill the gaps between sections 64 // which are executed during fall through. 65 static const std::vector<std::vector<uint8_t>> nopInstructions = { 66 {0x90}, 67 {0x66, 0x90}, 68 {0x0f, 0x1f, 0x00}, 69 {0x0f, 0x1f, 0x40, 0x00}, 70 {0x0f, 0x1f, 0x44, 0x00, 0x00}, 71 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x00}, 72 {0x0F, 0x1F, 0x80, 0x00, 0x00, 0x00, 0x00}, 73 {0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}, 74 {0x66, 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00}}; 75 76 X86_64::X86_64(Ctx &ctx) : TargetInfo(ctx) { 77 copyRel = R_X86_64_COPY; 78 gotRel = R_X86_64_GLOB_DAT; 79 pltRel = R_X86_64_JUMP_SLOT; 80 relativeRel = R_X86_64_RELATIVE; 81 iRelativeRel = R_X86_64_IRELATIVE; 82 symbolicRel = R_X86_64_64; 83 tlsDescRel = R_X86_64_TLSDESC; 84 tlsGotRel = R_X86_64_TPOFF64; 85 tlsModuleIndexRel = R_X86_64_DTPMOD64; 86 tlsOffsetRel = R_X86_64_DTPOFF64; 87 gotBaseSymInGotPlt = true; 88 gotEntrySize = 8; 89 pltHeaderSize = 16; 90 pltEntrySize = 16; 91 ipltEntrySize = 16; 92 trapInstr = {0xcc, 0xcc, 0xcc, 0xcc}; // 0xcc = INT3 93 nopInstrs = nopInstructions; 94 95 // Align to the large page size (known as a superpage or huge page). 96 // FreeBSD automatically promotes large, superpage-aligned allocations. 97 defaultImageBase = 0x200000; 98 } 99 100 int X86_64::getTlsGdRelaxSkip(RelType type) const { 101 // TLSDESC relocations are processed separately. See relaxTlsGdToLe below. 102 return type == R_X86_64_GOTPC32_TLSDESC || 103 type == R_X86_64_CODE_4_GOTPC32_TLSDESC || 104 type == R_X86_64_TLSDESC_CALL 105 ? 1 106 : 2; 107 } 108 109 // Opcodes for the different X86_64 jmp instructions. 110 enum JmpInsnOpcode : uint32_t { 111 J_JMP_32, 112 J_JNE_32, 113 J_JE_32, 114 J_JG_32, 115 J_JGE_32, 116 J_JB_32, 117 J_JBE_32, 118 J_JL_32, 119 J_JLE_32, 120 J_JA_32, 121 J_JAE_32, 122 J_UNKNOWN, 123 }; 124 125 // Given the first (optional) and second byte of the insn's opcode, this 126 // returns the corresponding enum value. 127 static JmpInsnOpcode getJmpInsnType(const uint8_t *first, 128 const uint8_t *second) { 129 if (*second == 0xe9) 130 return J_JMP_32; 131 132 if (first == nullptr) 133 return J_UNKNOWN; 134 135 if (*first == 0x0f) { 136 switch (*second) { 137 case 0x84: 138 return J_JE_32; 139 case 0x85: 140 return J_JNE_32; 141 case 0x8f: 142 return J_JG_32; 143 case 0x8d: 144 return J_JGE_32; 145 case 0x82: 146 return J_JB_32; 147 case 0x86: 148 return J_JBE_32; 149 case 0x8c: 150 return J_JL_32; 151 case 0x8e: 152 return J_JLE_32; 153 case 0x87: 154 return J_JA_32; 155 case 0x83: 156 return J_JAE_32; 157 } 158 } 159 return J_UNKNOWN; 160 } 161 162 // Return the relocation index for input section IS with a specific Offset. 163 // Returns the maximum size of the vector if no such relocation is found. 164 static unsigned getRelocationWithOffset(const InputSection &is, 165 uint64_t offset) { 166 unsigned size = is.relocs().size(); 167 for (unsigned i = size - 1; i + 1 > 0; --i) { 168 if (is.relocs()[i].offset == offset && is.relocs()[i].expr != R_NONE) 169 return i; 170 } 171 return size; 172 } 173 174 // Returns true if R corresponds to a relocation used for a jump instruction. 175 // TODO: Once special relocations for relaxable jump instructions are available, 176 // this should be modified to use those relocations. 177 static bool isRelocationForJmpInsn(Relocation &R) { 178 return R.type == R_X86_64_PLT32 || R.type == R_X86_64_PC32 || 179 R.type == R_X86_64_PC8; 180 } 181 182 // Return true if Relocation R points to the first instruction in the 183 // next section. 184 // TODO: Delete this once psABI reserves a new relocation type for fall thru 185 // jumps. 186 static bool isFallThruRelocation(InputSection &is, InputFile *file, 187 InputSection *nextIS, Relocation &r) { 188 if (!isRelocationForJmpInsn(r)) 189 return false; 190 191 uint64_t addrLoc = is.getOutputSection()->addr + is.outSecOff + r.offset; 192 uint64_t targetOffset = is.getRelocTargetVA(is.getCtx(), r, addrLoc); 193 194 // If this jmp is a fall thru, the target offset is the beginning of the 195 // next section. 196 uint64_t nextSectionOffset = 197 nextIS->getOutputSection()->addr + nextIS->outSecOff; 198 return (addrLoc + 4 + targetOffset) == nextSectionOffset; 199 } 200 201 // Return the jmp instruction opcode that is the inverse of the given 202 // opcode. For example, JE inverted is JNE. 203 static JmpInsnOpcode invertJmpOpcode(const JmpInsnOpcode opcode) { 204 switch (opcode) { 205 case J_JE_32: 206 return J_JNE_32; 207 case J_JNE_32: 208 return J_JE_32; 209 case J_JG_32: 210 return J_JLE_32; 211 case J_JGE_32: 212 return J_JL_32; 213 case J_JB_32: 214 return J_JAE_32; 215 case J_JBE_32: 216 return J_JA_32; 217 case J_JL_32: 218 return J_JGE_32; 219 case J_JLE_32: 220 return J_JG_32; 221 case J_JA_32: 222 return J_JBE_32; 223 case J_JAE_32: 224 return J_JB_32; 225 default: 226 return J_UNKNOWN; 227 } 228 } 229 230 // Deletes direct jump instruction in input sections that jumps to the 231 // following section as it is not required. If there are two consecutive jump 232 // instructions, it checks if they can be flipped and one can be deleted. 233 // For example: 234 // .section .text 235 // a.BB.foo: 236 // ... 237 // 10: jne aa.BB.foo 238 // 16: jmp bar 239 // aa.BB.foo: 240 // ... 241 // 242 // can be converted to: 243 // a.BB.foo: 244 // ... 245 // 10: je bar #jne flipped to je and the jmp is deleted. 246 // aa.BB.foo: 247 // ... 248 bool X86_64::deleteFallThruJmpInsn(InputSection &is, InputFile *file, 249 InputSection *nextIS) const { 250 const unsigned sizeOfDirectJmpInsn = 5; 251 252 if (nextIS == nullptr) 253 return false; 254 255 if (is.getSize() < sizeOfDirectJmpInsn) 256 return false; 257 258 // If this jmp insn can be removed, it is the last insn and the 259 // relocation is 4 bytes before the end. 260 unsigned rIndex = getRelocationWithOffset(is, is.getSize() - 4); 261 if (rIndex == is.relocs().size()) 262 return false; 263 264 Relocation &r = is.relocs()[rIndex]; 265 266 // Check if the relocation corresponds to a direct jmp. 267 const uint8_t *secContents = is.content().data(); 268 // If it is not a direct jmp instruction, there is nothing to do here. 269 if (*(secContents + r.offset - 1) != 0xe9) 270 return false; 271 272 if (isFallThruRelocation(is, file, nextIS, r)) { 273 // This is a fall thru and can be deleted. 274 r.expr = R_NONE; 275 r.offset = 0; 276 is.drop_back(sizeOfDirectJmpInsn); 277 is.nopFiller = true; 278 return true; 279 } 280 281 // Now, check if flip and delete is possible. 282 const unsigned sizeOfJmpCCInsn = 6; 283 // To flip, there must be at least one JmpCC and one direct jmp. 284 if (is.getSize() < sizeOfDirectJmpInsn + sizeOfJmpCCInsn) 285 return false; 286 287 unsigned rbIndex = 288 getRelocationWithOffset(is, (is.getSize() - sizeOfDirectJmpInsn - 4)); 289 if (rbIndex == is.relocs().size()) 290 return false; 291 292 Relocation &rB = is.relocs()[rbIndex]; 293 294 const uint8_t *jmpInsnB = secContents + rB.offset - 1; 295 JmpInsnOpcode jmpOpcodeB = getJmpInsnType(jmpInsnB - 1, jmpInsnB); 296 if (jmpOpcodeB == J_UNKNOWN) 297 return false; 298 299 if (!isFallThruRelocation(is, file, nextIS, rB)) 300 return false; 301 302 // jmpCC jumps to the fall thru block, the branch can be flipped and the 303 // jmp can be deleted. 304 JmpInsnOpcode jInvert = invertJmpOpcode(jmpOpcodeB); 305 if (jInvert == J_UNKNOWN) 306 return false; 307 is.jumpInstrMod = make<JumpInstrMod>(); 308 *is.jumpInstrMod = {rB.offset - 1, jInvert, 4}; 309 // Move R's values to rB except the offset. 310 rB = {r.expr, r.type, rB.offset, r.addend, r.sym}; 311 // Cancel R 312 r.expr = R_NONE; 313 r.offset = 0; 314 is.drop_back(sizeOfDirectJmpInsn); 315 is.nopFiller = true; 316 return true; 317 } 318 319 bool X86_64::relaxOnce(int pass) const { 320 uint64_t minVA = UINT64_MAX, maxVA = 0; 321 for (OutputSection *osec : ctx.outputSections) { 322 minVA = std::min(minVA, osec->addr); 323 maxVA = std::max(maxVA, osec->addr + osec->size); 324 } 325 // If the max VA is under 2^31, GOTPCRELX relocations cannot overfow. In 326 // -pie/-shared, the condition can be relaxed to test the max VA difference as 327 // there is no R_RELAX_GOT_PC_NOPIC. 328 if (isUInt<31>(maxVA) || (isUInt<31>(maxVA - minVA) && ctx.arg.isPic)) 329 return false; 330 331 SmallVector<InputSection *, 0> storage; 332 bool changed = false; 333 for (OutputSection *osec : ctx.outputSections) { 334 if (!(osec->flags & SHF_EXECINSTR)) 335 continue; 336 for (InputSection *sec : getInputSections(*osec, storage)) { 337 for (Relocation &rel : sec->relocs()) { 338 if (rel.expr != R_RELAX_GOT_PC && rel.expr != R_RELAX_GOT_PC_NOPIC) 339 continue; 340 assert(rel.addend == -4); 341 342 Relocation rel1 = rel; 343 rel1.addend = rel.expr == R_RELAX_GOT_PC_NOPIC ? 0 : -4; 344 uint64_t v = sec->getRelocTargetVA(ctx, rel1, 345 sec->getOutputSection()->addr + 346 sec->outSecOff + rel.offset); 347 if (isInt<32>(v)) 348 continue; 349 if (rel.sym->auxIdx == 0) { 350 rel.sym->allocateAux(ctx); 351 addGotEntry(ctx, *rel.sym); 352 changed = true; 353 } 354 rel.expr = R_GOT_PC; 355 } 356 } 357 } 358 return changed; 359 } 360 361 RelExpr X86_64::getRelExpr(RelType type, const Symbol &s, 362 const uint8_t *loc) const { 363 switch (type) { 364 case R_X86_64_8: 365 case R_X86_64_16: 366 case R_X86_64_32: 367 case R_X86_64_32S: 368 case R_X86_64_64: 369 return R_ABS; 370 case R_X86_64_DTPOFF32: 371 case R_X86_64_DTPOFF64: 372 return R_DTPREL; 373 case R_X86_64_TPOFF32: 374 case R_X86_64_TPOFF64: 375 return R_TPREL; 376 case R_X86_64_TLSDESC_CALL: 377 return R_TLSDESC_CALL; 378 case R_X86_64_TLSLD: 379 return R_TLSLD_PC; 380 case R_X86_64_TLSGD: 381 return R_TLSGD_PC; 382 case R_X86_64_SIZE32: 383 case R_X86_64_SIZE64: 384 return R_SIZE; 385 case R_X86_64_PLT32: 386 return R_PLT_PC; 387 case R_X86_64_PC8: 388 case R_X86_64_PC16: 389 case R_X86_64_PC32: 390 case R_X86_64_PC64: 391 return R_PC; 392 case R_X86_64_GOT32: 393 case R_X86_64_GOT64: 394 return R_GOTPLT; 395 case R_X86_64_GOTPC32_TLSDESC: 396 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 397 return R_TLSDESC_PC; 398 case R_X86_64_GOTPCREL: 399 case R_X86_64_GOTPCRELX: 400 case R_X86_64_REX_GOTPCRELX: 401 case R_X86_64_CODE_4_GOTPCRELX: 402 case R_X86_64_GOTTPOFF: 403 case R_X86_64_CODE_4_GOTTPOFF: 404 case R_X86_64_CODE_6_GOTTPOFF: 405 return R_GOT_PC; 406 case R_X86_64_GOTOFF64: 407 return R_GOTPLTREL; 408 case R_X86_64_PLTOFF64: 409 return R_PLT_GOTPLT; 410 case R_X86_64_GOTPC32: 411 case R_X86_64_GOTPC64: 412 return R_GOTPLTONLY_PC; 413 case R_X86_64_NONE: 414 return R_NONE; 415 default: 416 Err(ctx) << getErrorLoc(ctx, loc) << "unknown relocation (" << type.v 417 << ") against symbol " << &s; 418 return R_NONE; 419 } 420 } 421 422 void X86_64::writeGotPltHeader(uint8_t *buf) const { 423 // The first entry holds the link-time address of _DYNAMIC. It is documented 424 // in the psABI and glibc before Aug 2021 used the entry to compute run-time 425 // load address of the shared object (note that this is relevant for linking 426 // ld.so, not any other program). 427 write64le(buf, ctx.mainPart->dynamic->getVA()); 428 } 429 430 void X86_64::writeGotPlt(uint8_t *buf, const Symbol &s) const { 431 // See comments in X86::writeGotPlt. 432 write64le(buf, s.getPltVA(ctx) + 6); 433 } 434 435 void X86_64::writeIgotPlt(uint8_t *buf, const Symbol &s) const { 436 // An x86 entry is the address of the ifunc resolver function (for -z rel). 437 if (ctx.arg.writeAddends) 438 write64le(buf, s.getVA(ctx)); 439 } 440 441 void X86_64::writePltHeader(uint8_t *buf) const { 442 const uint8_t pltData[] = { 443 0xff, 0x35, 0, 0, 0, 0, // pushq GOTPLT+8(%rip) 444 0xff, 0x25, 0, 0, 0, 0, // jmp *GOTPLT+16(%rip) 445 0x0f, 0x1f, 0x40, 0x00, // nop 446 }; 447 memcpy(buf, pltData, sizeof(pltData)); 448 uint64_t gotPlt = ctx.in.gotPlt->getVA(); 449 uint64_t plt = ctx.in.ibtPlt ? ctx.in.ibtPlt->getVA() : ctx.in.plt->getVA(); 450 write32le(buf + 2, gotPlt - plt + 2); // GOTPLT+8 451 write32le(buf + 8, gotPlt - plt + 4); // GOTPLT+16 452 } 453 454 void X86_64::writePlt(uint8_t *buf, const Symbol &sym, 455 uint64_t pltEntryAddr) const { 456 const uint8_t inst[] = { 457 0xff, 0x25, 0, 0, 0, 0, // jmpq *got(%rip) 458 0x68, 0, 0, 0, 0, // pushq <relocation index> 459 0xe9, 0, 0, 0, 0, // jmpq plt[0] 460 }; 461 memcpy(buf, inst, sizeof(inst)); 462 463 write32le(buf + 2, sym.getGotPltVA(ctx) - pltEntryAddr - 6); 464 write32le(buf + 7, sym.getPltIdx(ctx)); 465 write32le(buf + 12, ctx.in.plt->getVA() - pltEntryAddr - 16); 466 } 467 468 RelType X86_64::getDynRel(RelType type) const { 469 if (type == R_X86_64_64 || type == R_X86_64_PC64 || type == R_X86_64_SIZE32 || 470 type == R_X86_64_SIZE64) 471 return type; 472 return R_X86_64_NONE; 473 } 474 475 void X86_64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, 476 uint64_t val) const { 477 if (rel.type == R_X86_64_TLSGD) { 478 // Convert 479 // .byte 0x66 480 // leaq x@tlsgd(%rip), %rdi 481 // .word 0x6666 482 // rex64 483 // call __tls_get_addr@plt 484 // to the following two instructions. 485 const uint8_t inst[] = { 486 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 487 0x00, 0x00, // mov %fs:0x0,%rax 488 0x48, 0x8d, 0x80, 0, 0, 0, 0, // lea x@tpoff,%rax 489 }; 490 memcpy(loc - 4, inst, sizeof(inst)); 491 492 // The original code used a pc relative relocation and so we have to 493 // compensate for the -4 in had in the addend. 494 write32le(loc + 8, val + 4); 495 } else if (rel.type == R_X86_64_GOTPC32_TLSDESC || 496 rel.type == R_X86_64_CODE_4_GOTPC32_TLSDESC) { 497 // Convert leaq x@tlsdesc(%rip), %REG to movq $x@tpoff, %REG. 498 if ((loc[-3] & 0xfb) != 0x48 || loc[-2] != 0x8d || 499 (loc[-1] & 0xc7) != 0x05) { 500 Err(ctx) << getErrorLoc(ctx, (rel.type == R_X86_64_GOTPC32_TLSDESC) 501 ? loc - 3 502 : loc - 4) 503 << "R_X86_64_GOTPC32_TLSDESC/R_X86_64_CODE_4_GOTPC32_TLSDESC " 504 "must be used in leaq x@tlsdesc(%rip), %REG"; 505 return; 506 } 507 if (rel.type == R_X86_64_GOTPC32_TLSDESC) { 508 loc[-3] = 0x48 | ((loc[-3] >> 2) & 1); 509 } else { 510 loc[-3] = (loc[-3] & ~0x44) | ((loc[-3] & 0x44) >> 2); 511 } 512 loc[-2] = 0xc7; 513 loc[-1] = 0xc0 | ((loc[-1] >> 3) & 7); 514 515 write32le(loc, val + 4); 516 } else { 517 // Convert call *x@tlsdesc(%REG) to xchg ax, ax. 518 assert(rel.type == R_X86_64_TLSDESC_CALL); 519 loc[0] = 0x66; 520 loc[1] = 0x90; 521 } 522 } 523 524 void X86_64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, 525 uint64_t val) const { 526 if (rel.type == R_X86_64_TLSGD) { 527 // Convert 528 // .byte 0x66 529 // leaq x@tlsgd(%rip), %rdi 530 // .word 0x6666 531 // rex64 532 // call __tls_get_addr@plt 533 // to the following two instructions. 534 const uint8_t inst[] = { 535 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 536 0x00, 0x00, // mov %fs:0x0,%rax 537 0x48, 0x03, 0x05, 0, 0, 0, 0, // addq x@gottpoff(%rip),%rax 538 }; 539 memcpy(loc - 4, inst, sizeof(inst)); 540 541 // Both code sequences are PC relatives, but since we are moving the 542 // constant forward by 8 bytes we have to subtract the value by 8. 543 write32le(loc + 8, val - 8); 544 } else if (rel.type == R_X86_64_GOTPC32_TLSDESC || 545 rel.type == R_X86_64_CODE_4_GOTPC32_TLSDESC) { 546 // Convert leaq x@tlsdesc(%rip), %REG to movq x@gottpoff(%rip), %REG. 547 if ((loc[-3] & 0xfb) != 0x48 || loc[-2] != 0x8d || 548 (loc[-1] & 0xc7) != 0x05) { 549 Err(ctx) << getErrorLoc(ctx, (rel.type == R_X86_64_GOTPC32_TLSDESC) 550 ? loc - 3 551 : loc - 4) 552 << "R_X86_64_GOTPC32_TLSDESC/R_X86_64_CODE_4_GOTPC32_TLSDESC " 553 "must be used in leaq x@tlsdesc(%rip), %REG"; 554 return; 555 } 556 loc[-2] = 0x8b; 557 write32le(loc, val); 558 } else { 559 // Convert call *x@tlsdesc(%rax) to xchg ax, ax. 560 assert(rel.type == R_X86_64_TLSDESC_CALL); 561 loc[0] = 0x66; 562 loc[1] = 0x90; 563 } 564 } 565 566 // In some conditions, 567 // R_X86_64_GOTTPOFF/R_X86_64_CODE_4_GOTTPOFF/R_X86_64_CODE_6_GOTTPOFF 568 // relocation can be optimized to R_X86_64_TPOFF32 so that it does not use GOT. 569 void X86_64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, 570 uint64_t val) const { 571 uint8_t *inst = loc - 3; 572 uint8_t reg = loc[-1] >> 3; 573 uint8_t *regSlot = loc - 1; 574 575 if (rel.type == R_X86_64_GOTTPOFF) { 576 // Note that ADD with RSP or R12 is converted to ADD instead of LEA 577 // because LEA with these registers needs 4 bytes to encode and thus 578 // wouldn't fit the space. 579 580 if (memcmp(inst, "\x48\x03\x25", 3) == 0) { 581 // "addq foo@gottpoff(%rip),%rsp" -> "addq $foo,%rsp" 582 memcpy(inst, "\x48\x81\xc4", 3); 583 } else if (memcmp(inst, "\x4c\x03\x25", 3) == 0) { 584 // "addq foo@gottpoff(%rip),%r12" -> "addq $foo,%r12" 585 memcpy(inst, "\x49\x81\xc4", 3); 586 } else if (memcmp(inst, "\x4c\x03", 2) == 0) { 587 // "addq foo@gottpoff(%rip),%r[8-15]" -> "leaq foo(%r[8-15]),%r[8-15]" 588 memcpy(inst, "\x4d\x8d", 2); 589 *regSlot = 0x80 | (reg << 3) | reg; 590 } else if (memcmp(inst, "\x48\x03", 2) == 0) { 591 // "addq foo@gottpoff(%rip),%reg -> "leaq foo(%reg),%reg" 592 memcpy(inst, "\x48\x8d", 2); 593 *regSlot = 0x80 | (reg << 3) | reg; 594 } else if (memcmp(inst, "\x4c\x8b", 2) == 0) { 595 // "movq foo@gottpoff(%rip),%r[8-15]" -> "movq $foo,%r[8-15]" 596 memcpy(inst, "\x49\xc7", 2); 597 *regSlot = 0xc0 | reg; 598 } else if (memcmp(inst, "\x48\x8b", 2) == 0) { 599 // "movq foo@gottpoff(%rip),%reg" -> "movq $foo,%reg" 600 memcpy(inst, "\x48\xc7", 2); 601 *regSlot = 0xc0 | reg; 602 } else { 603 Err(ctx) 604 << getErrorLoc(ctx, loc - 3) 605 << "R_X86_64_GOTTPOFF must be used in MOVQ or ADDQ instructions only"; 606 } 607 } else if (rel.type == R_X86_64_CODE_4_GOTTPOFF) { 608 if (loc[-4] != 0xd5) { 609 Err(ctx) << getErrorLoc(ctx, loc - 4) 610 << "invalid prefix with R_X86_64_CODE_4_GOTTPOFF!"; 611 return; 612 } 613 const uint8_t rex = loc[-3]; 614 loc[-3] = (rex & ~0x44) | (rex & 0x44) >> 2; 615 *regSlot = 0xc0 | reg; 616 617 if (loc[-2] == 0x8b) { 618 // "movq foo@gottpoff(%rip),%r[16-31]" -> "movq $foo,%r[16-31]" 619 loc[-2] = 0xc7; 620 } else if (loc[-2] == 0x03) { 621 // "addq foo@gottpoff(%rip),%r[16-31]" -> "addq $foo,%r[16-31]" 622 loc[-2] = 0x81; 623 } else { 624 Err(ctx) << getErrorLoc(ctx, loc - 4) 625 << "R_X86_64_CODE_4_GOTTPOFF must be used in MOVQ or ADDQ " 626 "instructions only"; 627 } 628 } else if (rel.type == R_X86_64_CODE_6_GOTTPOFF) { 629 if (loc[-6] != 0x62) { 630 Err(ctx) << getErrorLoc(ctx, loc - 6) 631 << "invalid prefix with R_X86_64_CODE_6_GOTTPOFF!"; 632 return; 633 } 634 // Check bits are satisfied: 635 // loc[-5]: X==1 (inverted polarity), (loc[-5] & 0x7) == 0x4 636 // loc[-4]: W==1, X2==1 (inverted polarity), pp==0b00(NP) 637 // loc[-3]: NF==1 or ND==1 638 // loc[-2]: opcode==0x1 or opcode==0x3 639 // loc[-1]: Mod==0b00, RM==0b101 640 if (((loc[-5] & 0x47) == 0x44) && ((loc[-4] & 0x87) == 0x84) && 641 ((loc[-3] & 0x14) != 0) && (loc[-2] == 0x1 || loc[-2] == 0x3) && 642 ((loc[-1] & 0xc7) == 0x5)) { 643 // "addq %reg1, foo@GOTTPOFF(%rip), %reg2" -> "addq $foo, %reg1, %reg2" 644 // "addq foo@GOTTPOFF(%rip), %reg1, %reg2" -> "addq $foo, %reg1, %reg2" 645 // "{nf} addq %reg1, foo@GOTTPOFF(%rip), %reg2" 646 // -> "{nf} addq $foo, %reg1, %reg2" 647 // "{nf} addq name@GOTTPOFF(%rip), %reg1, %reg2" 648 // -> "{nf} addq $foo, %reg1, %reg2" 649 // "{nf} addq name@GOTTPOFF(%rip), %reg" -> "{nf} addq $foo, %reg" 650 loc[-2] = 0x81; 651 // Move R bits to B bits in EVEX payloads and ModRM byte. 652 const uint8_t evexPayload0 = loc[-5]; 653 if ((evexPayload0 & (1 << 7)) == 0) 654 loc[-5] = (evexPayload0 | (1 << 7)) & ~(1 << 5); 655 if ((evexPayload0 & (1 << 4)) == 0) 656 loc[-5] = evexPayload0 | (1 << 4) | (1 << 3); 657 *regSlot = 0xc0 | reg; 658 } else { 659 Err(ctx) << getErrorLoc(ctx, loc - 6) 660 << "R_X86_64_CODE_6_GOTTPOFF must be used in ADDQ instructions " 661 "with NDD/NF/NDD+NF only"; 662 } 663 } else { 664 llvm_unreachable("Unsupported relocation type!"); 665 } 666 667 // The original code used a PC relative relocation. 668 // Need to compensate for the -4 it had in the addend. 669 write32le(loc, val + 4); 670 } 671 672 void X86_64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, 673 uint64_t val) const { 674 const uint8_t inst[] = { 675 0x66, 0x66, // .word 0x6666 676 0x66, // .byte 0x66 677 0x64, 0x48, 0x8b, 0x04, 0x25, 0x00, 0x00, 0x00, 0x00, // mov %fs:0,%rax 678 }; 679 680 if (loc[4] == 0xe8) { 681 // Convert 682 // leaq bar@tlsld(%rip), %rdi # 48 8d 3d <Loc> 683 // callq __tls_get_addr@PLT # e8 <disp32> 684 // leaq bar@dtpoff(%rax), %rcx 685 // to 686 // .word 0x6666 687 // .byte 0x66 688 // mov %fs:0,%rax 689 // leaq bar@tpoff(%rax), %rcx 690 memcpy(loc - 3, inst, sizeof(inst)); 691 return; 692 } 693 694 if (loc[4] == 0xff && loc[5] == 0x15) { 695 // Convert 696 // leaq x@tlsld(%rip),%rdi # 48 8d 3d <Loc> 697 // call *__tls_get_addr@GOTPCREL(%rip) # ff 15 <disp32> 698 // to 699 // .long 0x66666666 700 // movq %fs:0,%rax 701 // See "Table 11.9: LD -> LE Code Transition (LP64)" in 702 // https://raw.githubusercontent.com/wiki/hjl-tools/x86-psABI/x86-64-psABI-1.0.pdf 703 loc[-3] = 0x66; 704 memcpy(loc - 2, inst, sizeof(inst)); 705 return; 706 } 707 708 ErrAlways(ctx) 709 << getErrorLoc(ctx, loc - 3) 710 << "expected R_X86_64_PLT32 or R_X86_64_GOTPCRELX after R_X86_64_TLSLD"; 711 } 712 713 // A JumpInstrMod at a specific offset indicates that the jump instruction 714 // opcode at that offset must be modified. This is specifically used to relax 715 // jump instructions with basic block sections. This function looks at the 716 // JumpMod and effects the change. 717 void X86_64::applyJumpInstrMod(uint8_t *loc, JumpModType type, 718 unsigned size) const { 719 switch (type) { 720 case J_JMP_32: 721 if (size == 4) 722 *loc = 0xe9; 723 else 724 *loc = 0xeb; 725 break; 726 case J_JE_32: 727 if (size == 4) { 728 loc[-1] = 0x0f; 729 *loc = 0x84; 730 } else 731 *loc = 0x74; 732 break; 733 case J_JNE_32: 734 if (size == 4) { 735 loc[-1] = 0x0f; 736 *loc = 0x85; 737 } else 738 *loc = 0x75; 739 break; 740 case J_JG_32: 741 if (size == 4) { 742 loc[-1] = 0x0f; 743 *loc = 0x8f; 744 } else 745 *loc = 0x7f; 746 break; 747 case J_JGE_32: 748 if (size == 4) { 749 loc[-1] = 0x0f; 750 *loc = 0x8d; 751 } else 752 *loc = 0x7d; 753 break; 754 case J_JB_32: 755 if (size == 4) { 756 loc[-1] = 0x0f; 757 *loc = 0x82; 758 } else 759 *loc = 0x72; 760 break; 761 case J_JBE_32: 762 if (size == 4) { 763 loc[-1] = 0x0f; 764 *loc = 0x86; 765 } else 766 *loc = 0x76; 767 break; 768 case J_JL_32: 769 if (size == 4) { 770 loc[-1] = 0x0f; 771 *loc = 0x8c; 772 } else 773 *loc = 0x7c; 774 break; 775 case J_JLE_32: 776 if (size == 4) { 777 loc[-1] = 0x0f; 778 *loc = 0x8e; 779 } else 780 *loc = 0x7e; 781 break; 782 case J_JA_32: 783 if (size == 4) { 784 loc[-1] = 0x0f; 785 *loc = 0x87; 786 } else 787 *loc = 0x77; 788 break; 789 case J_JAE_32: 790 if (size == 4) { 791 loc[-1] = 0x0f; 792 *loc = 0x83; 793 } else 794 *loc = 0x73; 795 break; 796 case J_UNKNOWN: 797 llvm_unreachable("Unknown Jump Relocation"); 798 } 799 } 800 801 int64_t X86_64::getImplicitAddend(const uint8_t *buf, RelType type) const { 802 switch (type) { 803 case R_X86_64_8: 804 case R_X86_64_PC8: 805 return SignExtend64<8>(*buf); 806 case R_X86_64_16: 807 case R_X86_64_PC16: 808 return SignExtend64<16>(read16le(buf)); 809 case R_X86_64_32: 810 case R_X86_64_32S: 811 case R_X86_64_TPOFF32: 812 case R_X86_64_GOT32: 813 case R_X86_64_GOTPC32: 814 case R_X86_64_GOTPC32_TLSDESC: 815 case R_X86_64_GOTPCREL: 816 case R_X86_64_GOTPCRELX: 817 case R_X86_64_REX_GOTPCRELX: 818 case R_X86_64_CODE_4_GOTPCRELX: 819 case R_X86_64_PC32: 820 case R_X86_64_GOTTPOFF: 821 case R_X86_64_CODE_4_GOTTPOFF: 822 case R_X86_64_CODE_6_GOTTPOFF: 823 case R_X86_64_PLT32: 824 case R_X86_64_TLSGD: 825 case R_X86_64_TLSLD: 826 case R_X86_64_DTPOFF32: 827 case R_X86_64_SIZE32: 828 return SignExtend64<32>(read32le(buf)); 829 case R_X86_64_64: 830 case R_X86_64_TPOFF64: 831 case R_X86_64_DTPOFF64: 832 case R_X86_64_DTPMOD64: 833 case R_X86_64_PC64: 834 case R_X86_64_SIZE64: 835 case R_X86_64_GLOB_DAT: 836 case R_X86_64_GOT64: 837 case R_X86_64_GOTOFF64: 838 case R_X86_64_GOTPC64: 839 case R_X86_64_PLTOFF64: 840 case R_X86_64_IRELATIVE: 841 case R_X86_64_RELATIVE: 842 return read64le(buf); 843 case R_X86_64_TLSDESC: 844 return read64le(buf + 8); 845 case R_X86_64_JUMP_SLOT: 846 case R_X86_64_NONE: 847 // These relocations are defined as not having an implicit addend. 848 return 0; 849 default: 850 InternalErr(ctx, buf) << "cannot read addend for relocation " << type; 851 return 0; 852 } 853 } 854 855 static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val); 856 857 void X86_64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { 858 switch (rel.type) { 859 case R_X86_64_8: 860 checkIntUInt(ctx, loc, val, 8, rel); 861 *loc = val; 862 break; 863 case R_X86_64_PC8: 864 checkInt(ctx, loc, val, 8, rel); 865 *loc = val; 866 break; 867 case R_X86_64_16: 868 checkIntUInt(ctx, loc, val, 16, rel); 869 write16le(loc, val); 870 break; 871 case R_X86_64_PC16: 872 checkInt(ctx, loc, val, 16, rel); 873 write16le(loc, val); 874 break; 875 case R_X86_64_32: 876 checkUInt(ctx, loc, val, 32, rel); 877 write32le(loc, val); 878 break; 879 case R_X86_64_32S: 880 case R_X86_64_GOT32: 881 case R_X86_64_GOTPC32: 882 case R_X86_64_GOTPCREL: 883 case R_X86_64_PC32: 884 case R_X86_64_PLT32: 885 case R_X86_64_DTPOFF32: 886 case R_X86_64_SIZE32: 887 checkInt(ctx, loc, val, 32, rel); 888 write32le(loc, val); 889 break; 890 case R_X86_64_64: 891 case R_X86_64_TPOFF64: 892 case R_X86_64_DTPOFF64: 893 case R_X86_64_PC64: 894 case R_X86_64_SIZE64: 895 case R_X86_64_GOT64: 896 case R_X86_64_GOTOFF64: 897 case R_X86_64_GOTPC64: 898 case R_X86_64_PLTOFF64: 899 write64le(loc, val); 900 break; 901 case R_X86_64_GOTPCRELX: 902 case R_X86_64_REX_GOTPCRELX: 903 case R_X86_64_CODE_4_GOTPCRELX: 904 if (rel.expr != R_GOT_PC) { 905 relaxGot(loc, rel, val); 906 } else { 907 checkInt(ctx, loc, val, 32, rel); 908 write32le(loc, val); 909 } 910 break; 911 case R_X86_64_GOTPC32_TLSDESC: 912 case R_X86_64_CODE_4_GOTPC32_TLSDESC: 913 case R_X86_64_TLSDESC_CALL: 914 case R_X86_64_TLSGD: 915 if (rel.expr == R_RELAX_TLS_GD_TO_LE) { 916 relaxTlsGdToLe(loc, rel, val); 917 } else if (rel.expr == R_RELAX_TLS_GD_TO_IE) { 918 relaxTlsGdToIe(loc, rel, val); 919 } else { 920 checkInt(ctx, loc, val, 32, rel); 921 write32le(loc, val); 922 } 923 break; 924 case R_X86_64_TLSLD: 925 if (rel.expr == R_RELAX_TLS_LD_TO_LE) { 926 relaxTlsLdToLe(loc, rel, val); 927 } else { 928 checkInt(ctx, loc, val, 32, rel); 929 write32le(loc, val); 930 } 931 break; 932 case R_X86_64_GOTTPOFF: 933 case R_X86_64_CODE_4_GOTTPOFF: 934 case R_X86_64_CODE_6_GOTTPOFF: 935 if (rel.expr == R_RELAX_TLS_IE_TO_LE) { 936 relaxTlsIeToLe(loc, rel, val); 937 } else { 938 checkInt(ctx, loc, val, 32, rel); 939 write32le(loc, val); 940 } 941 break; 942 case R_X86_64_TPOFF32: 943 checkInt(ctx, loc, val, 32, rel); 944 write32le(loc, val); 945 break; 946 947 case R_X86_64_TLSDESC: 948 // The addend is stored in the second 64-bit word. 949 write64le(loc + 8, val); 950 break; 951 default: 952 llvm_unreachable("unknown relocation"); 953 } 954 } 955 956 RelExpr X86_64::adjustGotPcExpr(RelType type, int64_t addend, 957 const uint8_t *loc) const { 958 // Only R_X86_64_[REX_]|[CODE_4_]GOTPCRELX can be relaxed. GNU as may emit 959 // GOTPCRELX with addend != -4. Such an instruction does not load the full GOT 960 // entry, so we cannot relax the relocation. E.g. movl x@GOTPCREL+4(%rip), 961 // %rax (addend=0) loads the high 32 bits of the GOT entry. 962 if (!ctx.arg.relax || addend != -4 || 963 (type != R_X86_64_GOTPCRELX && type != R_X86_64_REX_GOTPCRELX && 964 type != R_X86_64_CODE_4_GOTPCRELX)) 965 return R_GOT_PC; 966 const uint8_t op = loc[-2]; 967 const uint8_t modRm = loc[-1]; 968 969 // FIXME: When PIC is disabled and foo is defined locally in the 970 // lower 32 bit address space, memory operand in mov can be converted into 971 // immediate operand. Otherwise, mov must be changed to lea. We support only 972 // latter relaxation at this moment. 973 if (op == 0x8b) 974 return R_RELAX_GOT_PC; 975 976 // Relax call and jmp. 977 if (op == 0xff && (modRm == 0x15 || modRm == 0x25)) 978 return R_RELAX_GOT_PC; 979 980 // We don't support test/binop instructions without a REX/REX2 prefix. 981 if (type == R_X86_64_GOTPCRELX) 982 return R_GOT_PC; 983 984 // Relaxation of test, adc, add, and, cmp, or, sbb, sub, xor. 985 // If PIC then no relaxation is available. 986 return ctx.arg.isPic ? R_GOT_PC : R_RELAX_GOT_PC_NOPIC; 987 } 988 989 // A subset of relaxations can only be applied for no-PIC. This method 990 // handles such relaxations. Instructions encoding information was taken from: 991 // "Intel 64 and IA-32 Architectures Software Developer's Manual V2" 992 // (http://www.intel.com/content/dam/www/public/us/en/documents/manuals/ 993 // 64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf) 994 static void relaxGotNoPic(uint8_t *loc, uint64_t val, uint8_t op, uint8_t modRm, 995 bool isRex2) { 996 const uint8_t rex = loc[-3]; 997 // Convert "test %reg, foo@GOTPCREL(%rip)" to "test $foo, %reg". 998 if (op == 0x85) { 999 // See "TEST-Logical Compare" (4-428 Vol. 2B), 1000 // TEST r/m64, r64 uses "full" ModR / M byte (no opcode extension). 1001 1002 // ModR/M byte has form XX YYY ZZZ, where 1003 // YYY is MODRM.reg(register 2), ZZZ is MODRM.rm(register 1). 1004 // XX has different meanings: 1005 // 00: The operand's memory address is in reg1. 1006 // 01: The operand's memory address is reg1 + a byte-sized displacement. 1007 // 10: The operand's memory address is reg1 + a word-sized displacement. 1008 // 11: The operand is reg1 itself. 1009 // If an instruction requires only one operand, the unused reg2 field 1010 // holds extra opcode bits rather than a register code 1011 // 0xC0 == 11 000 000 binary. 1012 // 0x38 == 00 111 000 binary. 1013 // We transfer reg2 to reg1 here as operand. 1014 // See "2.1.3 ModR/M and SIB Bytes" (Vol. 2A 2-3). 1015 loc[-1] = 0xc0 | (modRm & 0x38) >> 3; // ModR/M byte. 1016 1017 // Change opcode from TEST r/m64, r64 to TEST r/m64, imm32 1018 // See "TEST-Logical Compare" (4-428 Vol. 2B). 1019 loc[-2] = 0xf7; 1020 1021 // Move R bit to the B bit in REX/REX2 byte. 1022 // REX byte is encoded as 0100WRXB, where 1023 // 0100 is 4bit fixed pattern. 1024 // REX.W When 1, a 64-bit operand size is used. Otherwise, when 0, the 1025 // default operand size is used (which is 32-bit for most but not all 1026 // instructions). 1027 // REX.R This 1-bit value is an extension to the MODRM.reg field. 1028 // REX.X This 1-bit value is an extension to the SIB.index field. 1029 // REX.B This 1-bit value is an extension to the MODRM.rm field or the 1030 // SIB.base field. 1031 // See "2.2.1.2 More on REX Prefix Fields " (2-8 Vol. 2A). 1032 // 1033 // REX2 prefix is encoded as 0xd5|M|R2|X2|B2|WRXB, where 1034 // 0xd5 is 1byte fixed pattern. 1035 // REX2's [W,R,X,B] have the same meanings as REX's. 1036 // REX2.M encodes the map id. 1037 // R2/X2/B2 provides the fifth and most siginicant bits of the R/X/B 1038 // register identifiers, each of which can now address all 32 GPRs. 1039 if (isRex2) 1040 loc[-3] = (rex & ~0x44) | (rex & 0x44) >> 2; 1041 else 1042 loc[-3] = (rex & ~0x4) | (rex & 0x4) >> 2; 1043 write32le(loc, val); 1044 return; 1045 } 1046 1047 // If we are here then we need to relax the adc, add, and, cmp, or, sbb, sub 1048 // or xor operations. 1049 1050 // Convert "binop foo@GOTPCREL(%rip), %reg" to "binop $foo, %reg". 1051 // Logic is close to one for test instruction above, but we also 1052 // write opcode extension here, see below for details. 1053 loc[-1] = 0xc0 | (modRm & 0x38) >> 3 | (op & 0x3c); // ModR/M byte. 1054 1055 // Primary opcode is 0x81, opcode extension is one of: 1056 // 000b = ADD, 001b is OR, 010b is ADC, 011b is SBB, 1057 // 100b is AND, 101b is SUB, 110b is XOR, 111b is CMP. 1058 // This value was wrote to MODRM.reg in a line above. 1059 // See "3.2 INSTRUCTIONS (A-M)" (Vol. 2A 3-15), 1060 // "INSTRUCTION SET REFERENCE, N-Z" (Vol. 2B 4-1) for 1061 // descriptions about each operation. 1062 loc[-2] = 0x81; 1063 if (isRex2) 1064 loc[-3] = (rex & ~0x44) | (rex & 0x44) >> 2; 1065 else 1066 loc[-3] = (rex & ~0x4) | (rex & 0x4) >> 2; 1067 write32le(loc, val); 1068 } 1069 1070 static void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) { 1071 assert(isInt<32>(val) && 1072 "GOTPCRELX should not have been relaxed if it overflows"); 1073 const uint8_t op = loc[-2]; 1074 const uint8_t modRm = loc[-1]; 1075 1076 // Convert "mov foo@GOTPCREL(%rip),%reg" to "lea foo(%rip),%reg". 1077 if (op == 0x8b) { 1078 loc[-2] = 0x8d; 1079 write32le(loc, val); 1080 return; 1081 } 1082 1083 if (op != 0xff) { 1084 // We are relaxing a rip relative to an absolute, so compensate 1085 // for the old -4 addend. 1086 assert(!rel.sym->file->ctx.arg.isPic); 1087 relaxGotNoPic(loc, val + 4, op, modRm, 1088 rel.type == R_X86_64_CODE_4_GOTPCRELX); 1089 return; 1090 } 1091 1092 // Convert call/jmp instructions. 1093 if (modRm == 0x15) { 1094 // ABI says we can convert "call *foo@GOTPCREL(%rip)" to "nop; call foo". 1095 // Instead we convert to "addr32 call foo" where addr32 is an instruction 1096 // prefix. That makes result expression to be a single instruction. 1097 loc[-2] = 0x67; // addr32 prefix 1098 loc[-1] = 0xe8; // call 1099 write32le(loc, val); 1100 return; 1101 } 1102 1103 // Convert "jmp *foo@GOTPCREL(%rip)" to "jmp foo; nop". 1104 // jmp doesn't return, so it is fine to use nop here, it is just a stub. 1105 assert(modRm == 0x25); 1106 loc[-2] = 0xe9; // jmp 1107 loc[3] = 0x90; // nop 1108 write32le(loc - 1, val + 1); 1109 } 1110 1111 // A split-stack prologue starts by checking the amount of stack remaining 1112 // in one of two ways: 1113 // A) Comparing of the stack pointer to a field in the tcb. 1114 // B) Or a load of a stack pointer offset with an lea to r10 or r11. 1115 bool X86_64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end, 1116 uint8_t stOther) const { 1117 if (!ctx.arg.is64) { 1118 ErrAlways(ctx) << "target doesn't support split stacks"; 1119 return false; 1120 } 1121 1122 if (loc + 8 >= end) 1123 return false; 1124 1125 // Replace "cmp %fs:0x70,%rsp" and subsequent branch 1126 // with "stc, nopl 0x0(%rax,%rax,1)" 1127 if (memcmp(loc, "\x64\x48\x3b\x24\x25", 5) == 0) { 1128 memcpy(loc, "\xf9\x0f\x1f\x84\x00\x00\x00\x00", 8); 1129 return true; 1130 } 1131 1132 // Adjust "lea X(%rsp),%rYY" to lea "(X - 0x4000)(%rsp),%rYY" where rYY could 1133 // be r10 or r11. The lea instruction feeds a subsequent compare which checks 1134 // if there is X available stack space. Making X larger effectively reserves 1135 // that much additional space. The stack grows downward so subtract the value. 1136 if (memcmp(loc, "\x4c\x8d\x94\x24", 4) == 0 || 1137 memcmp(loc, "\x4c\x8d\x9c\x24", 4) == 0) { 1138 // The offset bytes are encoded four bytes after the start of the 1139 // instruction. 1140 write32le(loc + 4, read32le(loc + 4) - 0x4000); 1141 return true; 1142 } 1143 return false; 1144 } 1145 1146 void X86_64::relocateAlloc(InputSectionBase &sec, uint8_t *buf) const { 1147 uint64_t secAddr = sec.getOutputSection()->addr; 1148 if (auto *s = dyn_cast<InputSection>(&sec)) 1149 secAddr += s->outSecOff; 1150 else if (auto *ehIn = dyn_cast<EhInputSection>(&sec)) 1151 secAddr += ehIn->getParent()->outSecOff; 1152 for (const Relocation &rel : sec.relocs()) { 1153 if (rel.expr == R_NONE) // See deleteFallThruJmpInsn 1154 continue; 1155 uint8_t *loc = buf + rel.offset; 1156 const uint64_t val = sec.getRelocTargetVA(ctx, rel, secAddr + rel.offset); 1157 relocate(loc, rel, val); 1158 } 1159 if (sec.jumpInstrMod) { 1160 applyJumpInstrMod(buf + sec.jumpInstrMod->offset, 1161 sec.jumpInstrMod->original, sec.jumpInstrMod->size); 1162 } 1163 } 1164 1165 // If Intel Indirect Branch Tracking is enabled, we have to emit special PLT 1166 // entries containing endbr64 instructions. A PLT entry will be split into two 1167 // parts, one in .plt.sec (writePlt), and the other in .plt (writeIBTPlt). 1168 namespace { 1169 class IntelIBT : public X86_64 { 1170 public: 1171 IntelIBT(Ctx &ctx) : X86_64(ctx) { pltHeaderSize = 0; }; 1172 void writeGotPlt(uint8_t *buf, const Symbol &s) const override; 1173 void writePlt(uint8_t *buf, const Symbol &sym, 1174 uint64_t pltEntryAddr) const override; 1175 void writeIBTPlt(uint8_t *buf, size_t numEntries) const override; 1176 1177 static const unsigned IBTPltHeaderSize = 16; 1178 }; 1179 } // namespace 1180 1181 void IntelIBT::writeGotPlt(uint8_t *buf, const Symbol &s) const { 1182 uint64_t va = ctx.in.ibtPlt->getVA() + IBTPltHeaderSize + 1183 s.getPltIdx(ctx) * pltEntrySize; 1184 write64le(buf, va); 1185 } 1186 1187 void IntelIBT::writePlt(uint8_t *buf, const Symbol &sym, 1188 uint64_t pltEntryAddr) const { 1189 const uint8_t Inst[] = { 1190 0xf3, 0x0f, 0x1e, 0xfa, // endbr64 1191 0xff, 0x25, 0, 0, 0, 0, // jmpq *got(%rip) 1192 0x66, 0x0f, 0x1f, 0x44, 0, 0, // nop 1193 }; 1194 memcpy(buf, Inst, sizeof(Inst)); 1195 write32le(buf + 6, sym.getGotPltVA(ctx) - pltEntryAddr - 10); 1196 } 1197 1198 void IntelIBT::writeIBTPlt(uint8_t *buf, size_t numEntries) const { 1199 writePltHeader(buf); 1200 buf += IBTPltHeaderSize; 1201 1202 const uint8_t inst[] = { 1203 0xf3, 0x0f, 0x1e, 0xfa, // endbr64 1204 0x68, 0, 0, 0, 0, // pushq <relocation index> 1205 0xe9, 0, 0, 0, 0, // jmpq plt[0] 1206 0x66, 0x90, // nop 1207 }; 1208 1209 for (size_t i = 0; i < numEntries; ++i) { 1210 memcpy(buf, inst, sizeof(inst)); 1211 write32le(buf + 5, i); 1212 write32le(buf + 10, -pltHeaderSize - sizeof(inst) * i - 30); 1213 buf += sizeof(inst); 1214 } 1215 } 1216 1217 // These nonstandard PLT entries are to migtigate Spectre v2 security 1218 // vulnerability. In order to mitigate Spectre v2, we want to avoid indirect 1219 // branch instructions such as `jmp *GOTPLT(%rip)`. So, in the following PLT 1220 // entries, we use a CALL followed by MOV and RET to do the same thing as an 1221 // indirect jump. That instruction sequence is so-called "retpoline". 1222 // 1223 // We have two types of retpoline PLTs as a size optimization. If `-z now` 1224 // is specified, all dynamic symbols are resolved at load-time. Thus, when 1225 // that option is given, we can omit code for symbol lazy resolution. 1226 namespace { 1227 class Retpoline : public X86_64 { 1228 public: 1229 Retpoline(Ctx &); 1230 void writeGotPlt(uint8_t *buf, const Symbol &s) const override; 1231 void writePltHeader(uint8_t *buf) const override; 1232 void writePlt(uint8_t *buf, const Symbol &sym, 1233 uint64_t pltEntryAddr) const override; 1234 }; 1235 1236 class RetpolineZNow : public X86_64 { 1237 public: 1238 RetpolineZNow(Ctx &); 1239 void writeGotPlt(uint8_t *buf, const Symbol &s) const override {} 1240 void writePltHeader(uint8_t *buf) const override; 1241 void writePlt(uint8_t *buf, const Symbol &sym, 1242 uint64_t pltEntryAddr) const override; 1243 }; 1244 } // namespace 1245 1246 Retpoline::Retpoline(Ctx &ctx) : X86_64(ctx) { 1247 pltHeaderSize = 48; 1248 pltEntrySize = 32; 1249 ipltEntrySize = 32; 1250 } 1251 1252 void Retpoline::writeGotPlt(uint8_t *buf, const Symbol &s) const { 1253 write64le(buf, s.getPltVA(ctx) + 17); 1254 } 1255 1256 void Retpoline::writePltHeader(uint8_t *buf) const { 1257 const uint8_t insn[] = { 1258 0xff, 0x35, 0, 0, 0, 0, // 0: pushq GOTPLT+8(%rip) 1259 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // 6: mov GOTPLT+16(%rip), %r11 1260 0xe8, 0x0e, 0x00, 0x00, 0x00, // d: callq next 1261 0xf3, 0x90, // 12: loop: pause 1262 0x0f, 0xae, 0xe8, // 14: lfence 1263 0xeb, 0xf9, // 17: jmp loop 1264 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 19: int3; .align 16 1265 0x4c, 0x89, 0x1c, 0x24, // 20: next: mov %r11, (%rsp) 1266 0xc3, // 24: ret 1267 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 25: int3; padding 1268 0xcc, 0xcc, 0xcc, 0xcc, // 2c: int3; padding 1269 }; 1270 memcpy(buf, insn, sizeof(insn)); 1271 1272 uint64_t gotPlt = ctx.in.gotPlt->getVA(); 1273 uint64_t plt = ctx.in.plt->getVA(); 1274 write32le(buf + 2, gotPlt - plt - 6 + 8); 1275 write32le(buf + 9, gotPlt - plt - 13 + 16); 1276 } 1277 1278 void Retpoline::writePlt(uint8_t *buf, const Symbol &sym, 1279 uint64_t pltEntryAddr) const { 1280 const uint8_t insn[] = { 1281 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // 0: mov foo@GOTPLT(%rip), %r11 1282 0xe8, 0, 0, 0, 0, // 7: callq plt+0x20 1283 0xe9, 0, 0, 0, 0, // c: jmp plt+0x12 1284 0x68, 0, 0, 0, 0, // 11: pushq <relocation index> 1285 0xe9, 0, 0, 0, 0, // 16: jmp plt+0 1286 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1b: int3; padding 1287 }; 1288 memcpy(buf, insn, sizeof(insn)); 1289 1290 uint64_t off = pltEntryAddr - ctx.in.plt->getVA(); 1291 1292 write32le(buf + 3, sym.getGotPltVA(ctx) - pltEntryAddr - 7); 1293 write32le(buf + 8, -off - 12 + 32); 1294 write32le(buf + 13, -off - 17 + 18); 1295 write32le(buf + 18, sym.getPltIdx(ctx)); 1296 write32le(buf + 23, -off - 27); 1297 } 1298 1299 RetpolineZNow::RetpolineZNow(Ctx &ctx) : X86_64(ctx) { 1300 pltHeaderSize = 32; 1301 pltEntrySize = 16; 1302 ipltEntrySize = 16; 1303 } 1304 1305 void RetpolineZNow::writePltHeader(uint8_t *buf) const { 1306 const uint8_t insn[] = { 1307 0xe8, 0x0b, 0x00, 0x00, 0x00, // 0: call next 1308 0xf3, 0x90, // 5: loop: pause 1309 0x0f, 0xae, 0xe8, // 7: lfence 1310 0xeb, 0xf9, // a: jmp loop 1311 0xcc, 0xcc, 0xcc, 0xcc, // c: int3; .align 16 1312 0x4c, 0x89, 0x1c, 0x24, // 10: next: mov %r11, (%rsp) 1313 0xc3, // 14: ret 1314 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 15: int3; padding 1315 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, // 1a: int3; padding 1316 0xcc, // 1f: int3; padding 1317 }; 1318 memcpy(buf, insn, sizeof(insn)); 1319 } 1320 1321 void RetpolineZNow::writePlt(uint8_t *buf, const Symbol &sym, 1322 uint64_t pltEntryAddr) const { 1323 const uint8_t insn[] = { 1324 0x4c, 0x8b, 0x1d, 0, 0, 0, 0, // mov foo@GOTPLT(%rip), %r11 1325 0xe9, 0, 0, 0, 0, // jmp plt+0 1326 0xcc, 0xcc, 0xcc, 0xcc, // int3; padding 1327 }; 1328 memcpy(buf, insn, sizeof(insn)); 1329 1330 write32le(buf + 3, sym.getGotPltVA(ctx) - pltEntryAddr - 7); 1331 write32le(buf + 8, ctx.in.plt->getVA() - pltEntryAddr - 12); 1332 } 1333 1334 void elf::setX86_64TargetInfo(Ctx &ctx) { 1335 if (ctx.arg.zRetpolineplt) { 1336 if (ctx.arg.zNow) 1337 ctx.target.reset(new RetpolineZNow(ctx)); 1338 else 1339 ctx.target.reset(new Retpoline(ctx)); 1340 return; 1341 } 1342 1343 if (ctx.arg.andFeatures & GNU_PROPERTY_X86_FEATURE_1_IBT) 1344 ctx.target.reset(new IntelIBT(ctx)); 1345 else 1346 ctx.target.reset(new X86_64(ctx)); 1347 } 1348