1 //===-- RuntimeDyldMachOAArch64.h -- MachO/AArch64 specific code. -*- C++ -*-=// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 10 #ifndef LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H 11 #define LLVM_LIB_EXECUTIONENGINE_RUNTIMEDYLD_TARGETS_RUNTIMEDYLDMACHOAARCH64_H 12 13 #include "../RuntimeDyldMachO.h" 14 #include "llvm/Support/Endian.h" 15 16 #define DEBUG_TYPE "dyld" 17 18 namespace llvm { 19 20 class RuntimeDyldMachOAArch64 21 : public RuntimeDyldMachOCRTPBase<RuntimeDyldMachOAArch64> { 22 public: 23 24 typedef uint64_t TargetPtrT; 25 RuntimeDyldMachOAArch64(RTDyldMemoryManager * MM)26 RuntimeDyldMachOAArch64(RTDyldMemoryManager *MM) 27 : RuntimeDyldMachOCRTPBase(MM) {} 28 getMaxStubSize()29 unsigned getMaxStubSize() override { return 8; } 30 getStubAlignment()31 unsigned getStubAlignment() override { return 8; } 32 33 /// Extract the addend encoded in the instruction / memory location. decodeAddend(const RelocationEntry & RE)34 int64_t decodeAddend(const RelocationEntry &RE) const { 35 const SectionEntry &Section = Sections[RE.SectionID]; 36 uint8_t *LocalAddress = Section.Address + RE.Offset; 37 unsigned NumBytes = 1 << RE.Size; 38 int64_t Addend = 0; 39 // Verify that the relocation has the correct size and alignment. 40 switch (RE.RelType) { 41 default: 42 llvm_unreachable("Unsupported relocation type!"); 43 case MachO::ARM64_RELOC_UNSIGNED: 44 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size."); 45 break; 46 case MachO::ARM64_RELOC_BRANCH26: 47 case MachO::ARM64_RELOC_PAGE21: 48 case MachO::ARM64_RELOC_PAGEOFF12: 49 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 50 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: 51 assert(NumBytes == 4 && "Invalid relocation size."); 52 assert((((uintptr_t)LocalAddress & 0x3) == 0) && 53 "Instruction address is not aligned to 4 bytes."); 54 break; 55 } 56 57 switch (RE.RelType) { 58 default: 59 llvm_unreachable("Unsupported relocation type!"); 60 case MachO::ARM64_RELOC_UNSIGNED: 61 // This could be an unaligned memory location. 62 if (NumBytes == 4) 63 Addend = *reinterpret_cast<support::ulittle32_t *>(LocalAddress); 64 else 65 Addend = *reinterpret_cast<support::ulittle64_t *>(LocalAddress); 66 break; 67 case MachO::ARM64_RELOC_BRANCH26: { 68 // Verify that the relocation points to the expected branch instruction. 69 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 70 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); 71 72 // Get the 26 bit addend encoded in the branch instruction and sign-extend 73 // to 64 bit. The lower 2 bits are always zeros and are therefore implicit 74 // (<< 2). 75 Addend = (*p & 0x03FFFFFF) << 2; 76 Addend = SignExtend64(Addend, 28); 77 break; 78 } 79 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 80 case MachO::ARM64_RELOC_PAGE21: { 81 // Verify that the relocation points to the expected adrp instruction. 82 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 83 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); 84 85 // Get the 21 bit addend encoded in the adrp instruction and sign-extend 86 // to 64 bit. The lower 12 bits (4096 byte page) are always zeros and are 87 // therefore implicit (<< 12). 88 Addend = ((*p & 0x60000000) >> 29) | ((*p & 0x01FFFFE0) >> 3) << 12; 89 Addend = SignExtend64(Addend, 33); 90 break; 91 } 92 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { 93 // Verify that the relocation points to one of the expected load / store 94 // instructions. 95 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 96 (void)p; 97 assert((*p & 0x3B000000) == 0x39000000 && 98 "Only expected load / store instructions."); 99 } // fall-through 100 case MachO::ARM64_RELOC_PAGEOFF12: { 101 // Verify that the relocation points to one of the expected load / store 102 // or add / sub instructions. 103 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 104 assert((((*p & 0x3B000000) == 0x39000000) || 105 ((*p & 0x11C00000) == 0x11000000) ) && 106 "Expected load / store or add/sub instruction."); 107 108 // Get the 12 bit addend encoded in the instruction. 109 Addend = (*p & 0x003FFC00) >> 10; 110 111 // Check which instruction we are decoding to obtain the implicit shift 112 // factor of the instruction. 113 int ImplicitShift = 0; 114 if ((*p & 0x3B000000) == 0x39000000) { // << load / store 115 // For load / store instructions the size is encoded in bits 31:30. 116 ImplicitShift = ((*p >> 30) & 0x3); 117 if (ImplicitShift == 0) { 118 // Check if this a vector op to get the correct shift value. 119 if ((*p & 0x04800000) == 0x04800000) 120 ImplicitShift = 4; 121 } 122 } 123 // Compensate for implicit shift. 124 Addend <<= ImplicitShift; 125 break; 126 } 127 } 128 return Addend; 129 } 130 131 /// Extract the addend encoded in the instruction. encodeAddend(uint8_t * LocalAddress,unsigned NumBytes,MachO::RelocationInfoType RelType,int64_t Addend)132 void encodeAddend(uint8_t *LocalAddress, unsigned NumBytes, 133 MachO::RelocationInfoType RelType, int64_t Addend) const { 134 // Verify that the relocation has the correct alignment. 135 switch (RelType) { 136 default: 137 llvm_unreachable("Unsupported relocation type!"); 138 case MachO::ARM64_RELOC_UNSIGNED: 139 assert((NumBytes == 4 || NumBytes == 8) && "Invalid relocation size."); 140 break; 141 case MachO::ARM64_RELOC_BRANCH26: 142 case MachO::ARM64_RELOC_PAGE21: 143 case MachO::ARM64_RELOC_PAGEOFF12: 144 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 145 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: 146 assert(NumBytes == 4 && "Invalid relocation size."); 147 assert((((uintptr_t)LocalAddress & 0x3) == 0) && 148 "Instruction address is not aligned to 4 bytes."); 149 break; 150 } 151 152 switch (RelType) { 153 default: 154 llvm_unreachable("Unsupported relocation type!"); 155 case MachO::ARM64_RELOC_UNSIGNED: 156 // This could be an unaligned memory location. 157 if (NumBytes == 4) 158 *reinterpret_cast<support::ulittle32_t *>(LocalAddress) = Addend; 159 else 160 *reinterpret_cast<support::ulittle64_t *>(LocalAddress) = Addend; 161 break; 162 case MachO::ARM64_RELOC_BRANCH26: { 163 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 164 // Verify that the relocation points to the expected branch instruction. 165 assert((*p & 0xFC000000) == 0x14000000 && "Expected branch instruction."); 166 167 // Verify addend value. 168 assert((Addend & 0x3) == 0 && "Branch target is not aligned"); 169 assert(isInt<28>(Addend) && "Branch target is out of range."); 170 171 // Encode the addend as 26 bit immediate in the branch instruction. 172 *p = (*p & 0xFC000000) | ((uint32_t)(Addend >> 2) & 0x03FFFFFF); 173 break; 174 } 175 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 176 case MachO::ARM64_RELOC_PAGE21: { 177 // Verify that the relocation points to the expected adrp instruction. 178 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 179 assert((*p & 0x9F000000) == 0x90000000 && "Expected adrp instruction."); 180 181 // Check that the addend fits into 21 bits (+ 12 lower bits). 182 assert((Addend & 0xFFF) == 0 && "ADRP target is not page aligned."); 183 assert(isInt<33>(Addend) && "Invalid page reloc value."); 184 185 // Encode the addend into the instruction. 186 uint32_t ImmLoValue = ((uint64_t)Addend << 17) & 0x60000000; 187 uint32_t ImmHiValue = ((uint64_t)Addend >> 9) & 0x00FFFFE0; 188 *p = (*p & 0x9F00001F) | ImmHiValue | ImmLoValue; 189 break; 190 } 191 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: { 192 // Verify that the relocation points to one of the expected load / store 193 // instructions. 194 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 195 assert((*p & 0x3B000000) == 0x39000000 && 196 "Only expected load / store instructions."); 197 (void)p; 198 } // fall-through 199 case MachO::ARM64_RELOC_PAGEOFF12: { 200 // Verify that the relocation points to one of the expected load / store 201 // or add / sub instructions. 202 auto *p = reinterpret_cast<support::aligned_ulittle32_t *>(LocalAddress); 203 assert((((*p & 0x3B000000) == 0x39000000) || 204 ((*p & 0x11C00000) == 0x11000000) ) && 205 "Expected load / store or add/sub instruction."); 206 207 // Check which instruction we are decoding to obtain the implicit shift 208 // factor of the instruction and verify alignment. 209 int ImplicitShift = 0; 210 if ((*p & 0x3B000000) == 0x39000000) { // << load / store 211 // For load / store instructions the size is encoded in bits 31:30. 212 ImplicitShift = ((*p >> 30) & 0x3); 213 switch (ImplicitShift) { 214 case 0: 215 // Check if this a vector op to get the correct shift value. 216 if ((*p & 0x04800000) == 0x04800000) { 217 ImplicitShift = 4; 218 assert(((Addend & 0xF) == 0) && 219 "128-bit LDR/STR not 16-byte aligned."); 220 } 221 break; 222 case 1: 223 assert(((Addend & 0x1) == 0) && "16-bit LDR/STR not 2-byte aligned."); 224 break; 225 case 2: 226 assert(((Addend & 0x3) == 0) && "32-bit LDR/STR not 4-byte aligned."); 227 break; 228 case 3: 229 assert(((Addend & 0x7) == 0) && "64-bit LDR/STR not 8-byte aligned."); 230 break; 231 } 232 } 233 // Compensate for implicit shift. 234 Addend >>= ImplicitShift; 235 assert(isUInt<12>(Addend) && "Addend cannot be encoded."); 236 237 // Encode the addend into the instruction. 238 *p = (*p & 0xFFC003FF) | ((uint32_t)(Addend << 10) & 0x003FFC00); 239 break; 240 } 241 } 242 } 243 244 relocation_iterator processRelocationRef(unsigned SectionID,relocation_iterator RelI,const ObjectFile & BaseObjT,ObjSectionToIDMap & ObjSectionToID,StubMap & Stubs)245 processRelocationRef(unsigned SectionID, relocation_iterator RelI, 246 const ObjectFile &BaseObjT, 247 ObjSectionToIDMap &ObjSectionToID, 248 StubMap &Stubs) override { 249 const MachOObjectFile &Obj = 250 static_cast<const MachOObjectFile &>(BaseObjT); 251 MachO::any_relocation_info RelInfo = 252 Obj.getRelocation(RelI->getRawDataRefImpl()); 253 254 assert(!Obj.isRelocationScattered(RelInfo) && ""); 255 256 // ARM64 has an ARM64_RELOC_ADDEND relocation type that carries an explicit 257 // addend for the following relocation. If found: (1) store the associated 258 // addend, (2) consume the next relocation, and (3) use the stored addend to 259 // override the addend. 260 int64_t ExplicitAddend = 0; 261 if (Obj.getAnyRelocationType(RelInfo) == MachO::ARM64_RELOC_ADDEND) { 262 assert(!Obj.getPlainRelocationExternal(RelInfo)); 263 assert(!Obj.getAnyRelocationPCRel(RelInfo)); 264 assert(Obj.getAnyRelocationLength(RelInfo) == 2); 265 int64_t RawAddend = Obj.getPlainRelocationSymbolNum(RelInfo); 266 // Sign-extend the 24-bit to 64-bit. 267 ExplicitAddend = SignExtend64(RawAddend, 24); 268 ++RelI; 269 RelInfo = Obj.getRelocation(RelI->getRawDataRefImpl()); 270 } 271 272 RelocationEntry RE(getRelocationEntry(SectionID, Obj, RelI)); 273 RE.Addend = decodeAddend(RE); 274 RelocationValueRef Value( 275 getRelocationValueRef(Obj, RelI, RE, ObjSectionToID)); 276 277 assert((ExplicitAddend == 0 || RE.Addend == 0) && "Relocation has "\ 278 "ARM64_RELOC_ADDEND and embedded addend in the instruction."); 279 if (ExplicitAddend) { 280 RE.Addend = ExplicitAddend; 281 Value.Offset = ExplicitAddend; 282 } 283 284 bool IsExtern = Obj.getPlainRelocationExternal(RelInfo); 285 if (!IsExtern && RE.IsPCRel) 286 makeValueAddendPCRel(Value, Obj, RelI, 1 << RE.Size); 287 288 RE.Addend = Value.Offset; 289 290 if (RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGE21 || 291 RE.RelType == MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12) 292 processGOTRelocation(RE, Value, Stubs); 293 else { 294 if (Value.SymbolName) 295 addRelocationForSymbol(RE, Value.SymbolName); 296 else 297 addRelocationForSection(RE, Value.SectionID); 298 } 299 300 return ++RelI; 301 } 302 resolveRelocation(const RelocationEntry & RE,uint64_t Value)303 void resolveRelocation(const RelocationEntry &RE, uint64_t Value) override { 304 DEBUG(dumpRelocationToResolve(RE, Value)); 305 306 const SectionEntry &Section = Sections[RE.SectionID]; 307 uint8_t *LocalAddress = Section.Address + RE.Offset; 308 MachO::RelocationInfoType RelType = 309 static_cast<MachO::RelocationInfoType>(RE.RelType); 310 311 switch (RelType) { 312 default: 313 llvm_unreachable("Invalid relocation type!"); 314 case MachO::ARM64_RELOC_UNSIGNED: { 315 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_UNSIGNED not supported"); 316 // Mask in the target value a byte at a time (we don't have an alignment 317 // guarantee for the target address, so this is safest). 318 if (RE.Size < 2) 319 llvm_unreachable("Invalid size for ARM64_RELOC_UNSIGNED"); 320 321 encodeAddend(LocalAddress, 1 << RE.Size, RelType, Value + RE.Addend); 322 break; 323 } 324 case MachO::ARM64_RELOC_BRANCH26: { 325 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_BRANCH26 not supported"); 326 // Check if branch is in range. 327 uint64_t FinalAddress = Section.LoadAddress + RE.Offset; 328 int64_t PCRelVal = Value - FinalAddress + RE.Addend; 329 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal); 330 break; 331 } 332 case MachO::ARM64_RELOC_GOT_LOAD_PAGE21: 333 case MachO::ARM64_RELOC_PAGE21: { 334 assert(RE.IsPCRel && "not PCRel and ARM64_RELOC_PAGE21 not supported"); 335 // Adjust for PC-relative relocation and offset. 336 uint64_t FinalAddress = Section.LoadAddress + RE.Offset; 337 int64_t PCRelVal = 338 ((Value + RE.Addend) & (-4096)) - (FinalAddress & (-4096)); 339 encodeAddend(LocalAddress, /*Size=*/4, RelType, PCRelVal); 340 break; 341 } 342 case MachO::ARM64_RELOC_GOT_LOAD_PAGEOFF12: 343 case MachO::ARM64_RELOC_PAGEOFF12: { 344 assert(!RE.IsPCRel && "PCRel and ARM64_RELOC_PAGEOFF21 not supported"); 345 // Add the offset from the symbol. 346 Value += RE.Addend; 347 // Mask out the page address and only use the lower 12 bits. 348 Value &= 0xFFF; 349 encodeAddend(LocalAddress, /*Size=*/4, RelType, Value); 350 break; 351 } 352 case MachO::ARM64_RELOC_SUBTRACTOR: 353 case MachO::ARM64_RELOC_POINTER_TO_GOT: 354 case MachO::ARM64_RELOC_TLVP_LOAD_PAGE21: 355 case MachO::ARM64_RELOC_TLVP_LOAD_PAGEOFF12: 356 llvm_unreachable("Relocation type not yet implemented!"); 357 case MachO::ARM64_RELOC_ADDEND: 358 llvm_unreachable("ARM64_RELOC_ADDEND should have been handeled by " 359 "processRelocationRef!"); 360 } 361 } 362 finalizeSection(const ObjectFile & Obj,unsigned SectionID,const SectionRef & Section)363 void finalizeSection(const ObjectFile &Obj, unsigned SectionID, 364 const SectionRef &Section) {} 365 366 private: processGOTRelocation(const RelocationEntry & RE,RelocationValueRef & Value,StubMap & Stubs)367 void processGOTRelocation(const RelocationEntry &RE, 368 RelocationValueRef &Value, StubMap &Stubs) { 369 assert(RE.Size == 2); 370 SectionEntry &Section = Sections[RE.SectionID]; 371 StubMap::const_iterator i = Stubs.find(Value); 372 int64_t Offset; 373 if (i != Stubs.end()) 374 Offset = static_cast<int64_t>(i->second); 375 else { 376 // FIXME: There must be a better way to do this then to check and fix the 377 // alignment every time!!! 378 uintptr_t BaseAddress = uintptr_t(Section.Address); 379 uintptr_t StubAlignment = getStubAlignment(); 380 uintptr_t StubAddress = 381 (BaseAddress + Section.StubOffset + StubAlignment - 1) & 382 -StubAlignment; 383 unsigned StubOffset = StubAddress - BaseAddress; 384 Stubs[Value] = StubOffset; 385 assert(((StubAddress % getStubAlignment()) == 0) && 386 "GOT entry not aligned"); 387 RelocationEntry GOTRE(RE.SectionID, StubOffset, 388 MachO::ARM64_RELOC_UNSIGNED, Value.Offset, 389 /*IsPCRel=*/false, /*Size=*/3); 390 if (Value.SymbolName) 391 addRelocationForSymbol(GOTRE, Value.SymbolName); 392 else 393 addRelocationForSection(GOTRE, Value.SectionID); 394 Section.StubOffset = StubOffset + getMaxStubSize(); 395 Offset = static_cast<int64_t>(StubOffset); 396 } 397 RelocationEntry TargetRE(RE.SectionID, RE.Offset, RE.RelType, Offset, 398 RE.IsPCRel, RE.Size); 399 addRelocationForSection(TargetRE, RE.SectionID); 400 } 401 }; 402 } 403 404 #undef DEBUG_TYPE 405 406 #endif 407