1 //===- X86_64.cpp ---------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "InputFiles.h"
10 #include "Symbols.h"
11 #include "SyntheticSections.h"
12 #include "Target.h"
13
14 #include "lld/Common/ErrorHandler.h"
15 #include "mach-o/compact_unwind_encoding.h"
16 #include "llvm/BinaryFormat/MachO.h"
17 #include "llvm/Support/Endian.h"
18
19 using namespace llvm::MachO;
20 using namespace llvm::support::endian;
21 using namespace lld;
22 using namespace lld::macho;
23
24 namespace {
25
26 struct X86_64 : TargetInfo {
27 X86_64();
28
29 int64_t getEmbeddedAddend(MemoryBufferRef, uint64_t offset,
30 const relocation_info) const override;
31 void relocateOne(uint8_t *loc, const Reloc &, uint64_t va,
32 uint64_t relocVA) const override;
33
34 void writeStub(uint8_t *buf, const Symbol &,
35 uint64_t pointerVA) const override;
36 void writeStubHelperHeader(uint8_t *buf) const override;
37 void writeStubHelperEntry(uint8_t *buf, const Symbol &,
38 uint64_t entryAddr) const override;
39
40 void writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
41 uint64_t stubOffset, uint64_t selrefsVA,
42 uint64_t selectorIndex, uint64_t gotAddr,
43 uint64_t msgSendIndex) const override;
44
45 void relaxGotLoad(uint8_t *loc, uint8_t type) const override;
getPageSize__anon56a43fd50111::X86_6446 uint64_t getPageSize() const override { return 4 * 1024; }
47
48 void handleDtraceReloc(const Symbol *sym, const Reloc &r,
49 uint8_t *loc) const override;
50 };
51 } // namespace
52
53 static constexpr std::array<RelocAttrs, 10> relocAttrsArray{{
54 #define B(x) RelocAttrBits::x
55 {"UNSIGNED",
56 B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(BYTE4) | B(BYTE8)},
57 {"SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
58 {"BRANCH", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)},
59 {"GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)},
60 {"GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)},
61 {"SUBTRACTOR", B(SUBTRAHEND) | B(EXTERN) | B(BYTE4) | B(BYTE8)},
62 {"SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
63 {"SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
64 {"SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)},
65 {"TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)},
66 #undef B
67 }};
68
pcrelOffset(uint8_t type)69 static int pcrelOffset(uint8_t type) {
70 switch (type) {
71 case X86_64_RELOC_SIGNED_1:
72 return 1;
73 case X86_64_RELOC_SIGNED_2:
74 return 2;
75 case X86_64_RELOC_SIGNED_4:
76 return 4;
77 default:
78 return 0;
79 }
80 }
81
getEmbeddedAddend(MemoryBufferRef mb,uint64_t offset,relocation_info rel) const82 int64_t X86_64::getEmbeddedAddend(MemoryBufferRef mb, uint64_t offset,
83 relocation_info rel) const {
84 auto *buf = reinterpret_cast<const uint8_t *>(mb.getBufferStart());
85 const uint8_t *loc = buf + offset + rel.r_address;
86
87 switch (rel.r_length) {
88 case 2:
89 return static_cast<int32_t>(read32le(loc)) + pcrelOffset(rel.r_type);
90 case 3:
91 return read64le(loc) + pcrelOffset(rel.r_type);
92 default:
93 llvm_unreachable("invalid r_length");
94 }
95 }
96
relocateOne(uint8_t * loc,const Reloc & r,uint64_t value,uint64_t relocVA) const97 void X86_64::relocateOne(uint8_t *loc, const Reloc &r, uint64_t value,
98 uint64_t relocVA) const {
99 if (r.pcrel) {
100 uint64_t pc = relocVA + 4 + pcrelOffset(r.type);
101 value -= pc;
102 }
103
104 switch (r.length) {
105 case 2:
106 if (r.type == X86_64_RELOC_UNSIGNED)
107 checkUInt(loc, r, value, 32);
108 else
109 checkInt(loc, r, value, 32);
110 write32le(loc, value);
111 break;
112 case 3:
113 write64le(loc, value);
114 break;
115 default:
116 llvm_unreachable("invalid r_length");
117 }
118 }
119
120 // The following methods emit a number of assembly sequences with RIP-relative
121 // addressing. Note that RIP-relative addressing on X86-64 has the RIP pointing
122 // to the next instruction, not the current instruction, so we always have to
123 // account for the current instruction's size when calculating offsets.
124 // writeRipRelative helps with that.
125 //
126 // bufAddr: The virtual address corresponding to buf[0].
127 // bufOff: The offset within buf of the next instruction.
128 // destAddr: The destination address that the current instruction references.
writeRipRelative(SymbolDiagnostic d,uint8_t * buf,uint64_t bufAddr,uint64_t bufOff,uint64_t destAddr)129 static void writeRipRelative(SymbolDiagnostic d, uint8_t *buf, uint64_t bufAddr,
130 uint64_t bufOff, uint64_t destAddr) {
131 uint64_t rip = bufAddr + bufOff;
132 checkInt(buf, d, destAddr - rip, 32);
133 // For the instructions we care about, the RIP-relative address is always
134 // stored in the last 4 bytes of the instruction.
135 write32le(buf + bufOff - 4, destAddr - rip);
136 }
137
138 static constexpr uint8_t stub[] = {
139 0xff, 0x25, 0, 0, 0, 0, // jmpq *__la_symbol_ptr(%rip)
140 };
141
writeStub(uint8_t * buf,const Symbol & sym,uint64_t pointerVA) const142 void X86_64::writeStub(uint8_t *buf, const Symbol &sym,
143 uint64_t pointerVA) const {
144 memcpy(buf, stub, 2); // just copy the two nonzero bytes
145 uint64_t stubAddr = in.stubs->addr + sym.stubsIndex * sizeof(stub);
146 writeRipRelative({&sym, "stub"}, buf, stubAddr, sizeof(stub), pointerVA);
147 }
148
149 static constexpr uint8_t stubHelperHeader[] = {
150 0x4c, 0x8d, 0x1d, 0, 0, 0, 0, // 0x0: leaq ImageLoaderCache(%rip), %r11
151 0x41, 0x53, // 0x7: pushq %r11
152 0xff, 0x25, 0, 0, 0, 0, // 0x9: jmpq *dyld_stub_binder@GOT(%rip)
153 0x90, // 0xf: nop
154 };
155
writeStubHelperHeader(uint8_t * buf) const156 void X86_64::writeStubHelperHeader(uint8_t *buf) const {
157 memcpy(buf, stubHelperHeader, sizeof(stubHelperHeader));
158 SymbolDiagnostic d = {nullptr, "stub helper header"};
159 writeRipRelative(d, buf, in.stubHelper->addr, 7,
160 in.imageLoaderCache->getVA());
161 writeRipRelative(d, buf, in.stubHelper->addr, 0xf,
162 in.got->addr +
163 in.stubHelper->stubBinder->gotIndex * LP64::wordSize);
164 }
165
166 static constexpr uint8_t stubHelperEntry[] = {
167 0x68, 0, 0, 0, 0, // 0x0: pushq <bind offset>
168 0xe9, 0, 0, 0, 0, // 0x5: jmp <__stub_helper>
169 };
170
writeStubHelperEntry(uint8_t * buf,const Symbol & sym,uint64_t entryAddr) const171 void X86_64::writeStubHelperEntry(uint8_t *buf, const Symbol &sym,
172 uint64_t entryAddr) const {
173 memcpy(buf, stubHelperEntry, sizeof(stubHelperEntry));
174 write32le(buf + 1, sym.lazyBindOffset);
175 writeRipRelative({&sym, "stub helper"}, buf, entryAddr,
176 sizeof(stubHelperEntry), in.stubHelper->addr);
177 }
178
179 static constexpr uint8_t objcStubsFastCode[] = {
180 0x48, 0x8b, 0x35, 0, 0, 0, 0, // 0x0: movq selrefs@selector(%rip), %rsi
181 0xff, 0x25, 0, 0, 0, 0, // 0x7: jmpq *_objc_msgSend@GOT(%rip)
182 };
183
writeObjCMsgSendStub(uint8_t * buf,Symbol * sym,uint64_t stubsAddr,uint64_t stubOffset,uint64_t selrefsVA,uint64_t selectorIndex,uint64_t gotAddr,uint64_t msgSendIndex) const184 void X86_64::writeObjCMsgSendStub(uint8_t *buf, Symbol *sym, uint64_t stubsAddr,
185 uint64_t stubOffset, uint64_t selrefsVA,
186 uint64_t selectorIndex, uint64_t gotAddr,
187 uint64_t msgSendIndex) const {
188 memcpy(buf, objcStubsFastCode, sizeof(objcStubsFastCode));
189 SymbolDiagnostic d = {sym, sym->getName()};
190 uint64_t stubAddr = stubsAddr + stubOffset;
191 writeRipRelative(d, buf, stubAddr, 7,
192 selrefsVA + selectorIndex * LP64::wordSize);
193 writeRipRelative(d, buf, stubAddr, 0xd,
194 gotAddr + msgSendIndex * LP64::wordSize);
195 }
196
relaxGotLoad(uint8_t * loc,uint8_t type) const197 void X86_64::relaxGotLoad(uint8_t *loc, uint8_t type) const {
198 // Convert MOVQ to LEAQ
199 if (loc[-2] != 0x8b)
200 error(getRelocAttrs(type).name + " reloc requires MOVQ instruction");
201 loc[-2] = 0x8d;
202 }
203
X86_64()204 X86_64::X86_64() : TargetInfo(LP64()) {
205 cpuType = CPU_TYPE_X86_64;
206 cpuSubtype = CPU_SUBTYPE_X86_64_ALL;
207
208 modeDwarfEncoding = UNWIND_X86_MODE_DWARF;
209 subtractorRelocType = X86_64_RELOC_SUBTRACTOR;
210 unsignedRelocType = X86_64_RELOC_UNSIGNED;
211
212 stubSize = sizeof(stub);
213 stubHelperHeaderSize = sizeof(stubHelperHeader);
214 stubHelperEntrySize = sizeof(stubHelperEntry);
215
216 objcStubsFastSize = sizeof(objcStubsFastCode);
217 objcStubsAlignment = 1;
218
219 relocAttrs = {relocAttrsArray.data(), relocAttrsArray.size()};
220 }
221
createX86_64TargetInfo()222 TargetInfo *macho::createX86_64TargetInfo() {
223 static X86_64 t;
224 return &t;
225 }
226
handleDtraceReloc(const Symbol * sym,const Reloc & r,uint8_t * loc) const227 void X86_64::handleDtraceReloc(const Symbol *sym, const Reloc &r,
228 uint8_t *loc) const {
229 assert(r.type == X86_64_RELOC_BRANCH);
230
231 if (config->outputType == MH_OBJECT)
232 return;
233
234 if (sym->getName().startswith("___dtrace_probe")) {
235 // change call site to a NOP
236 loc[-1] = 0x90;
237 write32le(loc, 0x00401F0F);
238 } else if (sym->getName().startswith("___dtrace_isenabled")) {
239 // change call site to a clear eax
240 loc[-1] = 0x33;
241 write32le(loc, 0x909090C0);
242 } else {
243 error("Unrecognized dtrace symbol prefix: " + toString(*sym));
244 }
245 }
246