1 //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h" 10 #include "llvm/ExecutionEngine/JITLink/JITLink.h" 11 #include "llvm/Support/FormatVariadic.h" 12 #include "llvm/Support/Process.h" 13 14 #define DEBUG_TYPE "jitlink" 15 16 namespace llvm { 17 namespace jitlink { 18 19 JITLinkMemoryManager::~JITLinkMemoryManager() = default; 20 JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default; 21 22 static Error runAllocAction(JITLinkMemoryManager::AllocActionCall &C) { 23 using DeallocFnTy = char *(*)(const void *, size_t); 24 auto *Fn = jitTargetAddressToPointer<DeallocFnTy>(C.FnAddr); 25 26 if (char *ErrMsg = Fn(jitTargetAddressToPointer<const void *>(C.CtxAddr), 27 static_cast<size_t>(C.CtxSize))) { 28 auto E = make_error<StringError>(ErrMsg, inconvertibleErrorCode()); 29 free(ErrMsg); 30 return E; 31 } 32 33 return Error::success(); 34 } 35 36 // Align a JITTargetAddress to conform with block alignment requirements. 37 static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) { 38 uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment(); 39 return Addr + Delta; 40 } 41 42 BasicLayout::BasicLayout(LinkGraph &G) : G(G) { 43 44 for (auto &Sec : G.sections()) { 45 // Skip empty sections. 46 if (empty(Sec.blocks())) 47 continue; 48 49 auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}]; 50 for (auto *B : Sec.blocks()) 51 if (LLVM_LIKELY(!B->isZeroFill())) 52 Seg.ContentBlocks.push_back(B); 53 else 54 Seg.ZeroFillBlocks.push_back(B); 55 } 56 57 // Build Segments map. 58 auto CompareBlocks = [](const Block *LHS, const Block *RHS) { 59 // Sort by section, address and size 60 if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal()) 61 return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal(); 62 if (LHS->getAddress() != RHS->getAddress()) 63 return LHS->getAddress() < RHS->getAddress(); 64 return LHS->getSize() < RHS->getSize(); 65 }; 66 67 LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n"); 68 for (auto &KV : Segments) { 69 auto &Seg = KV.second; 70 71 llvm::sort(Seg.ContentBlocks, CompareBlocks); 72 llvm::sort(Seg.ZeroFillBlocks, CompareBlocks); 73 74 for (auto *B : Seg.ContentBlocks) { 75 Seg.ContentSize = alignToBlock(Seg.ContentSize, *B); 76 Seg.ContentSize += B->getSize(); 77 Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment())); 78 } 79 80 uint64_t SegEndOffset = Seg.ContentSize; 81 for (auto *B : Seg.ZeroFillBlocks) { 82 SegEndOffset = alignToBlock(SegEndOffset, *B); 83 SegEndOffset += B->getSize(); 84 Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment())); 85 } 86 Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize; 87 88 LLVM_DEBUG({ 89 dbgs() << " Seg " << KV.first 90 << ": content-size=" << formatv("{0:x}", Seg.ContentSize) 91 << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize) 92 << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n"; 93 }); 94 } 95 } 96 97 Expected<BasicLayout::ContiguousPageBasedLayoutSizes> 98 BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) { 99 ContiguousPageBasedLayoutSizes SegsSizes; 100 101 for (auto &KV : segments()) { 102 auto &AG = KV.first; 103 auto &Seg = KV.second; 104 105 if (Seg.Alignment > PageSize) 106 return make_error<StringError>("Segment alignment greater than page size", 107 inconvertibleErrorCode()); 108 109 uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize); 110 if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard) 111 SegsSizes.StandardSegs += SegSize; 112 else 113 SegsSizes.FinalizeSegs += SegSize; 114 } 115 116 return SegsSizes; 117 } 118 119 Error BasicLayout::apply() { 120 for (auto &KV : Segments) { 121 auto &Seg = KV.second; 122 123 assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) && 124 "Empty section recorded?"); 125 126 for (auto *B : Seg.ContentBlocks) { 127 // Align addr and working-mem-offset. 128 Seg.Addr = alignToBlock(Seg.Addr, *B); 129 Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B); 130 131 // Update block addr. 132 B->setAddress(Seg.Addr); 133 Seg.Addr += B->getSize(); 134 135 // Copy content to working memory, then update content to point at working 136 // memory. 137 memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(), 138 B->getSize()); 139 B->setMutableContent( 140 {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()}); 141 Seg.NextWorkingMemOffset += B->getSize(); 142 } 143 144 for (auto *B : Seg.ZeroFillBlocks) { 145 // Align addr. 146 Seg.Addr = alignToBlock(Seg.Addr, *B); 147 // Update block addr. 148 B->setAddress(Seg.Addr); 149 Seg.Addr += B->getSize(); 150 } 151 152 Seg.ContentBlocks.clear(); 153 Seg.ZeroFillBlocks.clear(); 154 } 155 156 return Error::success(); 157 } 158 159 JITLinkMemoryManager::AllocActions &BasicLayout::graphAllocActions() { 160 return G.allocActions(); 161 } 162 163 void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, 164 const JITLinkDylib *JD, SegmentMap Segments, 165 OnCreatedFunction OnCreated) { 166 167 static_assert(AllocGroup::NumGroups == 16, 168 "AllocGroup has changed. Section names below must be updated"); 169 StringRef AGSectionNames[] = { 170 "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard", 171 "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard", 172 "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize", 173 "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"}; 174 175 auto G = 176 std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr); 177 AllocGroupSmallMap<Block *> ContentBlocks; 178 179 JITTargetAddress NextAddr = 0x100000; 180 for (auto &KV : Segments) { 181 auto &AG = KV.first; 182 auto &Seg = KV.second; 183 184 auto AGSectionName = 185 AGSectionNames[static_cast<unsigned>(AG.getMemProt()) | 186 static_cast<bool>(AG.getMemDeallocPolicy()) << 3]; 187 188 auto &Sec = G->createSection(AGSectionName, AG.getMemProt()); 189 Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy()); 190 191 if (Seg.ContentSize != 0) { 192 NextAddr = alignTo(NextAddr, Seg.ContentAlign); 193 auto &B = 194 G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize), 195 NextAddr, Seg.ContentAlign.value(), 0); 196 ContentBlocks[AG] = &B; 197 NextAddr += Seg.ContentSize; 198 } 199 } 200 201 // GRef declared separately since order-of-argument-eval isn't specified. 202 auto &GRef = *G; 203 MemMgr.allocate(JD, GRef, 204 [G = std::move(G), ContentBlocks = std::move(ContentBlocks), 205 OnCreated = std::move(OnCreated)]( 206 JITLinkMemoryManager::AllocResult Alloc) mutable { 207 if (!Alloc) 208 OnCreated(Alloc.takeError()); 209 else 210 OnCreated(SimpleSegmentAlloc(std::move(G), 211 std::move(ContentBlocks), 212 std::move(*Alloc))); 213 }); 214 } 215 216 Expected<SimpleSegmentAlloc> 217 SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD, 218 SegmentMap Segments) { 219 std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP; 220 auto AllocF = AllocP.get_future(); 221 Create(MemMgr, JD, std::move(Segments), 222 [&](Expected<SimpleSegmentAlloc> Result) { 223 AllocP.set_value(std::move(Result)); 224 }); 225 return AllocF.get(); 226 } 227 228 SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default; 229 SimpleSegmentAlloc & 230 SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default; 231 SimpleSegmentAlloc::~SimpleSegmentAlloc() {} 232 233 SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) { 234 auto I = ContentBlocks.find(AG); 235 if (I != ContentBlocks.end()) { 236 auto &B = *I->second; 237 return {B.getAddress(), B.getAlreadyMutableContent()}; 238 } 239 return {}; 240 } 241 242 SimpleSegmentAlloc::SimpleSegmentAlloc( 243 std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks, 244 std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc) 245 : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)), 246 Alloc(std::move(Alloc)) {} 247 248 class InProcessMemoryManager::IPInFlightAlloc 249 : public JITLinkMemoryManager::InFlightAlloc { 250 public: 251 IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL, 252 sys::MemoryBlock StandardSegments, 253 sys::MemoryBlock FinalizationSegments) 254 : MemMgr(MemMgr), G(G), BL(std::move(BL)), 255 StandardSegments(std::move(StandardSegments)), 256 FinalizationSegments(std::move(FinalizationSegments)) {} 257 258 void finalize(OnFinalizedFunction OnFinalized) override { 259 260 // Apply memory protections to all segments. 261 if (auto Err = applyProtections()) { 262 OnFinalized(std::move(Err)); 263 return; 264 } 265 266 // Run finalization actions. 267 // FIXME: Roll back previous successful actions on failure. 268 std::vector<AllocActionCall> DeallocActions; 269 DeallocActions.reserve(G.allocActions().size()); 270 for (auto &ActPair : G.allocActions()) { 271 if (ActPair.Finalize.FnAddr) 272 if (auto Err = runAllocAction(ActPair.Finalize)) { 273 OnFinalized(std::move(Err)); 274 return; 275 } 276 if (ActPair.Dealloc.FnAddr) 277 DeallocActions.push_back(ActPair.Dealloc); 278 } 279 G.allocActions().clear(); 280 281 // Release the finalize segments slab. 282 if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) { 283 OnFinalized(errorCodeToError(EC)); 284 return; 285 } 286 287 // Continue with finalized allocation. 288 OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments), 289 std::move(DeallocActions))); 290 } 291 292 void abandon(OnAbandonedFunction OnAbandoned) override { 293 Error Err = Error::success(); 294 if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) 295 Err = joinErrors(std::move(Err), errorCodeToError(EC)); 296 if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments)) 297 Err = joinErrors(std::move(Err), errorCodeToError(EC)); 298 OnAbandoned(std::move(Err)); 299 } 300 301 private: 302 Error applyProtections() { 303 for (auto &KV : BL.segments()) { 304 const auto &AG = KV.first; 305 auto &Seg = KV.second; 306 307 auto Prot = toSysMemoryProtectionFlags(AG.getMemProt()); 308 309 uint64_t SegSize = 310 alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize); 311 sys::MemoryBlock MB(Seg.WorkingMem, SegSize); 312 if (auto EC = sys::Memory::protectMappedMemory(MB, Prot)) 313 return errorCodeToError(EC); 314 if (Prot & sys::Memory::MF_EXEC) 315 sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize()); 316 } 317 return Error::success(); 318 } 319 320 InProcessMemoryManager &MemMgr; 321 LinkGraph &G; 322 BasicLayout BL; 323 sys::MemoryBlock StandardSegments; 324 sys::MemoryBlock FinalizationSegments; 325 }; 326 327 Expected<std::unique_ptr<InProcessMemoryManager>> 328 InProcessMemoryManager::Create() { 329 if (auto PageSize = sys::Process::getPageSize()) 330 return std::make_unique<InProcessMemoryManager>(*PageSize); 331 else 332 return PageSize.takeError(); 333 } 334 335 void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G, 336 OnAllocatedFunction OnAllocated) { 337 338 // FIXME: Just check this once on startup. 339 if (!isPowerOf2_64((uint64_t)PageSize)) { 340 OnAllocated(make_error<StringError>("Page size is not a power of 2", 341 inconvertibleErrorCode())); 342 return; 343 } 344 345 BasicLayout BL(G); 346 347 /// Scan the request and calculate the group and total sizes. 348 /// Check that segment size is no larger than a page. 349 auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize); 350 if (!SegsSizes) { 351 OnAllocated(SegsSizes.takeError()); 352 return; 353 } 354 355 // Allocate one slab for the whole thing (to make sure everything is 356 // in-range), then partition into standard and finalization blocks. 357 // 358 // FIXME: Make two separate allocations in the future to reduce 359 // fragmentation: finalization segments will usually be a single page, and 360 // standard segments are likely to be more than one page. Where multiple 361 // allocations are in-flight at once (likely) the current approach will leave 362 // a lot of single-page holes. 363 sys::MemoryBlock Slab; 364 sys::MemoryBlock StandardSegsMem; 365 sys::MemoryBlock FinalizeSegsMem; 366 { 367 const sys::Memory::ProtectionFlags ReadWrite = 368 static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ | 369 sys::Memory::MF_WRITE); 370 371 std::error_code EC; 372 Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr, 373 ReadWrite, EC); 374 375 if (EC) { 376 OnAllocated(errorCodeToError(EC)); 377 return; 378 } 379 380 // Zero-fill the whole slab up-front. 381 memset(Slab.base(), 0, Slab.allocatedSize()); 382 383 StandardSegsMem = {Slab.base(), SegsSizes->StandardSegs}; 384 FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs), 385 SegsSizes->FinalizeSegs}; 386 } 387 388 auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base()); 389 auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base()); 390 391 LLVM_DEBUG({ 392 dbgs() << "InProcessMemoryManager allocated:\n"; 393 if (SegsSizes->StandardSegs) 394 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr, 395 NextStandardSegAddr + StandardSegsMem.allocatedSize()) 396 << " to stardard segs\n"; 397 else 398 dbgs() << " no standard segs\n"; 399 if (SegsSizes->FinalizeSegs) 400 dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr, 401 NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize()) 402 << " to finalize segs\n"; 403 else 404 dbgs() << " no finalize segs\n"; 405 }); 406 407 // Build ProtMap, assign addresses. 408 for (auto &KV : BL.segments()) { 409 auto &AG = KV.first; 410 auto &Seg = KV.second; 411 412 auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard) 413 ? NextStandardSegAddr 414 : NextFinalizeSegAddr; 415 416 Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr); 417 Seg.Addr = SegAddr; 418 419 SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize); 420 } 421 422 if (auto Err = BL.apply()) { 423 OnAllocated(std::move(Err)); 424 return; 425 } 426 427 OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL), 428 std::move(StandardSegsMem), 429 std::move(FinalizeSegsMem))); 430 } 431 432 void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs, 433 OnDeallocatedFunction OnDeallocated) { 434 std::vector<sys::MemoryBlock> StandardSegmentsList; 435 std::vector<std::vector<AllocActionCall>> DeallocActionsList; 436 437 { 438 std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex); 439 for (auto &Alloc : Allocs) { 440 auto *FA = 441 jitTargetAddressToPointer<FinalizedAllocInfo *>(Alloc.release()); 442 StandardSegmentsList.push_back(std::move(FA->StandardSegments)); 443 if (!FA->DeallocActions.empty()) 444 DeallocActionsList.push_back(std::move(FA->DeallocActions)); 445 FA->~FinalizedAllocInfo(); 446 FinalizedAllocInfos.Deallocate(FA); 447 } 448 } 449 450 Error DeallocErr = Error::success(); 451 452 while (!DeallocActionsList.empty()) { 453 auto &DeallocActions = DeallocActionsList.back(); 454 auto &StandardSegments = StandardSegmentsList.back(); 455 456 /// Run any deallocate calls. 457 while (!DeallocActions.empty()) { 458 if (auto Err = runAllocAction(DeallocActions.back())) 459 DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err)); 460 DeallocActions.pop_back(); 461 } 462 463 /// Release the standard segments slab. 464 if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments)) 465 DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC)); 466 467 DeallocActionsList.pop_back(); 468 StandardSegmentsList.pop_back(); 469 } 470 471 OnDeallocated(std::move(DeallocErr)); 472 } 473 474 JITLinkMemoryManager::FinalizedAlloc 475 InProcessMemoryManager::createFinalizedAlloc( 476 sys::MemoryBlock StandardSegments, 477 std::vector<AllocActionCall> DeallocActions) { 478 std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex); 479 auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>(); 480 new (FA) FinalizedAllocInfo( 481 {std::move(StandardSegments), std::move(DeallocActions)}); 482 return FinalizedAlloc(pointerToJITTargetAddress(FA)); 483 } 484 485 } // end namespace jitlink 486 } // end namespace llvm 487