//===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h" #include "llvm/ExecutionEngine/JITLink/JITLink.h" #include "llvm/Support/FormatVariadic.h" #include "llvm/Support/Process.h" #define DEBUG_TYPE "jitlink" using namespace llvm; namespace { // FIXME: Remove this copy of CWrapperFunctionResult as soon as JITLink can // depend on shared utils from Orc. // Must be kept in-sync with compiler-rt/lib/orc/c-api.h. union CWrapperFunctionResultDataUnion { char *ValuePtr; char Value[sizeof(ValuePtr)]; }; // Must be kept in-sync with compiler-rt/lib/orc/c-api.h. typedef struct { CWrapperFunctionResultDataUnion Data; size_t Size; } CWrapperFunctionResult; Error toError(CWrapperFunctionResult R) { bool HasError = false; std::string ErrMsg; if (R.Size) { bool Large = R.Size > sizeof(CWrapperFunctionResultDataUnion); char *Content = Large ? R.Data.ValuePtr : R.Data.Value; if (Content[0]) { HasError = true; constexpr unsigned StrStart = 1 + sizeof(uint64_t); ErrMsg.resize(R.Size - StrStart); memcpy(&ErrMsg[0], Content + StrStart, R.Size - StrStart); } if (Large) free(R.Data.ValuePtr); } else if (R.Data.ValuePtr) { HasError = true; ErrMsg = R.Data.ValuePtr; free(R.Data.ValuePtr); } if (HasError) return make_error(std::move(ErrMsg), inconvertibleErrorCode()); return Error::success(); } } // namespace namespace llvm { namespace jitlink { JITLinkMemoryManager::~JITLinkMemoryManager() = default; JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default; static Error runAllocAction(AllocActionCall &C) { using WrapperFnTy = CWrapperFunctionResult (*)(const void *, size_t); auto *Fn = jitTargetAddressToPointer(C.FnAddr); return toError(Fn(jitTargetAddressToPointer(C.CtxAddr), static_cast(C.CtxSize))); } BasicLayout::BasicLayout(LinkGraph &G) : G(G) { for (auto &Sec : G.sections()) { // Skip empty sections. if (empty(Sec.blocks())) continue; auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}]; for (auto *B : Sec.blocks()) if (LLVM_LIKELY(!B->isZeroFill())) Seg.ContentBlocks.push_back(B); else Seg.ZeroFillBlocks.push_back(B); } // Build Segments map. auto CompareBlocks = [](const Block *LHS, const Block *RHS) { // Sort by section, address and size if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal()) return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal(); if (LHS->getAddress() != RHS->getAddress()) return LHS->getAddress() < RHS->getAddress(); return LHS->getSize() < RHS->getSize(); }; LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n"); for (auto &KV : Segments) { auto &Seg = KV.second; llvm::sort(Seg.ContentBlocks, CompareBlocks); llvm::sort(Seg.ZeroFillBlocks, CompareBlocks); for (auto *B : Seg.ContentBlocks) { Seg.ContentSize = alignToBlock(Seg.ContentSize, *B); Seg.ContentSize += B->getSize(); Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment())); } uint64_t SegEndOffset = Seg.ContentSize; for (auto *B : Seg.ZeroFillBlocks) { SegEndOffset = alignToBlock(SegEndOffset, *B); SegEndOffset += B->getSize(); Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment())); } Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize; LLVM_DEBUG({ dbgs() << " Seg " << KV.first << ": content-size=" << formatv("{0:x}", Seg.ContentSize) << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize) << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n"; }); } } Expected BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) { ContiguousPageBasedLayoutSizes SegsSizes; for (auto &KV : segments()) { auto &AG = KV.first; auto &Seg = KV.second; if (Seg.Alignment > PageSize) return make_error("Segment alignment greater than page size", inconvertibleErrorCode()); uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize); if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard) SegsSizes.StandardSegs += SegSize; else SegsSizes.FinalizeSegs += SegSize; } return SegsSizes; } Error BasicLayout::apply() { for (auto &KV : Segments) { auto &Seg = KV.second; assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) && "Empty section recorded?"); for (auto *B : Seg.ContentBlocks) { // Align addr and working-mem-offset. Seg.Addr = alignToBlock(Seg.Addr, *B); Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B); // Update block addr. B->setAddress(Seg.Addr); Seg.Addr += B->getSize(); // Copy content to working memory, then update content to point at working // memory. memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(), B->getSize()); B->setMutableContent( {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()}); Seg.NextWorkingMemOffset += B->getSize(); } for (auto *B : Seg.ZeroFillBlocks) { // Align addr. Seg.Addr = alignToBlock(Seg.Addr, *B); // Update block addr. B->setAddress(Seg.Addr); Seg.Addr += B->getSize(); } Seg.ContentBlocks.clear(); Seg.ZeroFillBlocks.clear(); } return Error::success(); } AllocActions &BasicLayout::graphAllocActions() { return G.allocActions(); } void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD, SegmentMap Segments, OnCreatedFunction OnCreated) { static_assert(AllocGroup::NumGroups == 16, "AllocGroup has changed. Section names below must be updated"); StringRef AGSectionNames[] = { "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard", "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard", "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize", "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"}; auto G = std::make_unique("", Triple(), 0, support::native, nullptr); AllocGroupSmallMap ContentBlocks; JITTargetAddress NextAddr = 0x100000; for (auto &KV : Segments) { auto &AG = KV.first; auto &Seg = KV.second; auto AGSectionName = AGSectionNames[static_cast(AG.getMemProt()) | static_cast(AG.getMemDeallocPolicy()) << 3]; auto &Sec = G->createSection(AGSectionName, AG.getMemProt()); Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy()); if (Seg.ContentSize != 0) { NextAddr = alignTo(NextAddr, Seg.ContentAlign); auto &B = G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize), NextAddr, Seg.ContentAlign.value(), 0); ContentBlocks[AG] = &B; NextAddr += Seg.ContentSize; } } // GRef declared separately since order-of-argument-eval isn't specified. auto &GRef = *G; MemMgr.allocate(JD, GRef, [G = std::move(G), ContentBlocks = std::move(ContentBlocks), OnCreated = std::move(OnCreated)]( JITLinkMemoryManager::AllocResult Alloc) mutable { if (!Alloc) OnCreated(Alloc.takeError()); else OnCreated(SimpleSegmentAlloc(std::move(G), std::move(ContentBlocks), std::move(*Alloc))); }); } Expected SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD, SegmentMap Segments) { std::promise> AllocP; auto AllocF = AllocP.get_future(); Create(MemMgr, JD, std::move(Segments), [&](Expected Result) { AllocP.set_value(std::move(Result)); }); return AllocF.get(); } SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default; SimpleSegmentAlloc & SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default; SimpleSegmentAlloc::~SimpleSegmentAlloc() {} SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) { auto I = ContentBlocks.find(AG); if (I != ContentBlocks.end()) { auto &B = *I->second; return {B.getAddress(), B.getAlreadyMutableContent()}; } return {}; } SimpleSegmentAlloc::SimpleSegmentAlloc( std::unique_ptr G, AllocGroupSmallMap ContentBlocks, std::unique_ptr Alloc) : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)), Alloc(std::move(Alloc)) {} class InProcessMemoryManager::IPInFlightAlloc : public JITLinkMemoryManager::InFlightAlloc { public: IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL, sys::MemoryBlock StandardSegments, sys::MemoryBlock FinalizationSegments) : MemMgr(MemMgr), G(G), BL(std::move(BL)), StandardSegments(std::move(StandardSegments)), FinalizationSegments(std::move(FinalizationSegments)) {} void finalize(OnFinalizedFunction OnFinalized) override { // Apply memory protections to all segments. if (auto Err = applyProtections()) { OnFinalized(std::move(Err)); return; } // Run finalization actions. // FIXME: Roll back previous successful actions on failure. std::vector DeallocActions; DeallocActions.reserve(G.allocActions().size()); for (auto &ActPair : G.allocActions()) { if (ActPair.Finalize.FnAddr) if (auto Err = runAllocAction(ActPair.Finalize)) { OnFinalized(std::move(Err)); return; } if (ActPair.Dealloc.FnAddr) DeallocActions.push_back(ActPair.Dealloc); } G.allocActions().clear(); // Release the finalize segments slab. if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) { OnFinalized(errorCodeToError(EC)); return; } // Continue with finalized allocation. OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments), std::move(DeallocActions))); } void abandon(OnAbandonedFunction OnAbandoned) override { Error Err = Error::success(); if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) Err = joinErrors(std::move(Err), errorCodeToError(EC)); if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments)) Err = joinErrors(std::move(Err), errorCodeToError(EC)); OnAbandoned(std::move(Err)); } private: Error applyProtections() { for (auto &KV : BL.segments()) { const auto &AG = KV.first; auto &Seg = KV.second; auto Prot = toSysMemoryProtectionFlags(AG.getMemProt()); uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize); sys::MemoryBlock MB(Seg.WorkingMem, SegSize); if (auto EC = sys::Memory::protectMappedMemory(MB, Prot)) return errorCodeToError(EC); if (Prot & sys::Memory::MF_EXEC) sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize()); } return Error::success(); } InProcessMemoryManager &MemMgr; LinkGraph &G; BasicLayout BL; sys::MemoryBlock StandardSegments; sys::MemoryBlock FinalizationSegments; }; Expected> InProcessMemoryManager::Create() { if (auto PageSize = sys::Process::getPageSize()) return std::make_unique(*PageSize); else return PageSize.takeError(); } void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G, OnAllocatedFunction OnAllocated) { // FIXME: Just check this once on startup. if (!isPowerOf2_64((uint64_t)PageSize)) { OnAllocated(make_error("Page size is not a power of 2", inconvertibleErrorCode())); return; } BasicLayout BL(G); /// Scan the request and calculate the group and total sizes. /// Check that segment size is no larger than a page. auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize); if (!SegsSizes) { OnAllocated(SegsSizes.takeError()); return; } /// Check that the total size requested (including zero fill) is not larger /// than a size_t. if (SegsSizes->total() > std::numeric_limits::max()) { OnAllocated(make_error( "Total requested size " + formatv("{0:x}", SegsSizes->total()) + " for graph " + G.getName() + " exceeds address space")); return; } // Allocate one slab for the whole thing (to make sure everything is // in-range), then partition into standard and finalization blocks. // // FIXME: Make two separate allocations in the future to reduce // fragmentation: finalization segments will usually be a single page, and // standard segments are likely to be more than one page. Where multiple // allocations are in-flight at once (likely) the current approach will leave // a lot of single-page holes. sys::MemoryBlock Slab; sys::MemoryBlock StandardSegsMem; sys::MemoryBlock FinalizeSegsMem; { const sys::Memory::ProtectionFlags ReadWrite = static_cast(sys::Memory::MF_READ | sys::Memory::MF_WRITE); std::error_code EC; Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr, ReadWrite, EC); if (EC) { OnAllocated(errorCodeToError(EC)); return; } // Zero-fill the whole slab up-front. memset(Slab.base(), 0, Slab.allocatedSize()); StandardSegsMem = {Slab.base(), static_cast(SegsSizes->StandardSegs)}; FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs), static_cast(SegsSizes->FinalizeSegs)}; } auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base()); auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base()); LLVM_DEBUG({ dbgs() << "InProcessMemoryManager allocated:\n"; if (SegsSizes->StandardSegs) dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextStandardSegAddr, NextStandardSegAddr + StandardSegsMem.allocatedSize()) << " to stardard segs\n"; else dbgs() << " no standard segs\n"; if (SegsSizes->FinalizeSegs) dbgs() << formatv(" [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr, NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize()) << " to finalize segs\n"; else dbgs() << " no finalize segs\n"; }); // Build ProtMap, assign addresses. for (auto &KV : BL.segments()) { auto &AG = KV.first; auto &Seg = KV.second; auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard) ? NextStandardSegAddr : NextFinalizeSegAddr; Seg.WorkingMem = jitTargetAddressToPointer(SegAddr); Seg.Addr = SegAddr; SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize); } if (auto Err = BL.apply()) { OnAllocated(std::move(Err)); return; } OnAllocated(std::make_unique(*this, G, std::move(BL), std::move(StandardSegsMem), std::move(FinalizeSegsMem))); } void InProcessMemoryManager::deallocate(std::vector Allocs, OnDeallocatedFunction OnDeallocated) { std::vector StandardSegmentsList; std::vector> DeallocActionsList; { std::lock_guard Lock(FinalizedAllocsMutex); for (auto &Alloc : Allocs) { auto *FA = jitTargetAddressToPointer(Alloc.release()); StandardSegmentsList.push_back(std::move(FA->StandardSegments)); if (!FA->DeallocActions.empty()) DeallocActionsList.push_back(std::move(FA->DeallocActions)); FA->~FinalizedAllocInfo(); FinalizedAllocInfos.Deallocate(FA); } } Error DeallocErr = Error::success(); while (!DeallocActionsList.empty()) { auto &DeallocActions = DeallocActionsList.back(); auto &StandardSegments = StandardSegmentsList.back(); /// Run any deallocate calls. while (!DeallocActions.empty()) { if (auto Err = runAllocAction(DeallocActions.back())) DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err)); DeallocActions.pop_back(); } /// Release the standard segments slab. if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments)) DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC)); DeallocActionsList.pop_back(); StandardSegmentsList.pop_back(); } OnDeallocated(std::move(DeallocErr)); } JITLinkMemoryManager::FinalizedAlloc InProcessMemoryManager::createFinalizedAlloc( sys::MemoryBlock StandardSegments, std::vector DeallocActions) { std::lock_guard Lock(FinalizedAllocsMutex); auto *FA = FinalizedAllocInfos.Allocate(); new (FA) FinalizedAllocInfo( {std::move(StandardSegments), std::move(DeallocActions)}); return FinalizedAlloc(pointerToJITTargetAddress(FA)); } } // end namespace jitlink } // end namespace llvm