xref: /llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp (revision f55ba3525eb19baed7d3f23638cbbd880246a370)
1 //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
10 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
11 #include "llvm/Support/FormatVariadic.h"
12 #include "llvm/Support/Process.h"
13 
14 #define DEBUG_TYPE "jitlink"
15 
16 using namespace llvm;
17 
18 namespace {
19 
20 // FIXME: Remove this copy of CWrapperFunctionResult as soon as JITLink can
21 // depend on shared utils from Orc.
22 
23 // Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
24 union CWrapperFunctionResultDataUnion {
25   char *ValuePtr;
26   char Value[sizeof(ValuePtr)];
27 };
28 
29 // Must be kept in-sync with compiler-rt/lib/orc/c-api.h.
30 typedef struct {
31   CWrapperFunctionResultDataUnion Data;
32   size_t Size;
33 } CWrapperFunctionResult;
34 
35 Error toError(CWrapperFunctionResult R) {
36   bool HasError = false;
37   std::string ErrMsg;
38   if (R.Size) {
39     bool Large = R.Size > sizeof(CWrapperFunctionResultDataUnion);
40     char *Content = Large ? R.Data.ValuePtr : R.Data.Value;
41     if (Content[0]) {
42       HasError = true;
43       constexpr unsigned StrStart = 1 + sizeof(uint64_t);
44       ErrMsg.resize(R.Size - StrStart);
45       memcpy(&ErrMsg[0], Content + StrStart, R.Size - StrStart);
46     }
47     if (Large)
48       free(R.Data.ValuePtr);
49   } else if (R.Data.ValuePtr) {
50     HasError = true;
51     ErrMsg = R.Data.ValuePtr;
52     free(R.Data.ValuePtr);
53   }
54 
55   if (HasError)
56     return make_error<StringError>(std::move(ErrMsg), inconvertibleErrorCode());
57   return Error::success();
58 }
59 } // namespace
60 
61 namespace llvm {
62 namespace jitlink {
63 
64 JITLinkMemoryManager::~JITLinkMemoryManager() = default;
65 JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
66 
67 static Error runAllocAction(JITLinkMemoryManager::AllocActionCall &C) {
68   using WrapperFnTy = CWrapperFunctionResult (*)(const void *, size_t);
69   auto *Fn = jitTargetAddressToPointer<WrapperFnTy>(C.FnAddr);
70 
71   return toError(Fn(jitTargetAddressToPointer<const void *>(C.CtxAddr),
72                     static_cast<size_t>(C.CtxSize)));
73 }
74 
75 // Align a JITTargetAddress to conform with block alignment requirements.
76 static JITTargetAddress alignToBlock(JITTargetAddress Addr, Block &B) {
77   uint64_t Delta = (B.getAlignmentOffset() - Addr) % B.getAlignment();
78   return Addr + Delta;
79 }
80 
81 BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
82 
83   for (auto &Sec : G.sections()) {
84     // Skip empty sections.
85     if (empty(Sec.blocks()))
86       continue;
87 
88     auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}];
89     for (auto *B : Sec.blocks())
90       if (LLVM_LIKELY(!B->isZeroFill()))
91         Seg.ContentBlocks.push_back(B);
92       else
93         Seg.ZeroFillBlocks.push_back(B);
94   }
95 
96   // Build Segments map.
97   auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
98     // Sort by section, address and size
99     if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
100       return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
101     if (LHS->getAddress() != RHS->getAddress())
102       return LHS->getAddress() < RHS->getAddress();
103     return LHS->getSize() < RHS->getSize();
104   };
105 
106   LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
107   for (auto &KV : Segments) {
108     auto &Seg = KV.second;
109 
110     llvm::sort(Seg.ContentBlocks, CompareBlocks);
111     llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
112 
113     for (auto *B : Seg.ContentBlocks) {
114       Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
115       Seg.ContentSize += B->getSize();
116       Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
117     }
118 
119     uint64_t SegEndOffset = Seg.ContentSize;
120     for (auto *B : Seg.ZeroFillBlocks) {
121       SegEndOffset = alignToBlock(SegEndOffset, *B);
122       SegEndOffset += B->getSize();
123       Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
124     }
125     Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
126 
127     LLVM_DEBUG({
128       dbgs() << "  Seg " << KV.first
129              << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
130              << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
131              << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
132     });
133   }
134 }
135 
136 Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
137 BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
138   ContiguousPageBasedLayoutSizes SegsSizes;
139 
140   for (auto &KV : segments()) {
141     auto &AG = KV.first;
142     auto &Seg = KV.second;
143 
144     if (Seg.Alignment > PageSize)
145       return make_error<StringError>("Segment alignment greater than page size",
146                                      inconvertibleErrorCode());
147 
148     uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
149     if (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
150       SegsSizes.StandardSegs += SegSize;
151     else
152       SegsSizes.FinalizeSegs += SegSize;
153   }
154 
155   return SegsSizes;
156 }
157 
158 Error BasicLayout::apply() {
159   for (auto &KV : Segments) {
160     auto &Seg = KV.second;
161 
162     assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
163            "Empty section recorded?");
164 
165     for (auto *B : Seg.ContentBlocks) {
166       // Align addr and working-mem-offset.
167       Seg.Addr = alignToBlock(Seg.Addr, *B);
168       Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
169 
170       // Update block addr.
171       B->setAddress(Seg.Addr);
172       Seg.Addr += B->getSize();
173 
174       // Copy content to working memory, then update content to point at working
175       // memory.
176       memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
177              B->getSize());
178       B->setMutableContent(
179           {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
180       Seg.NextWorkingMemOffset += B->getSize();
181     }
182 
183     for (auto *B : Seg.ZeroFillBlocks) {
184       // Align addr.
185       Seg.Addr = alignToBlock(Seg.Addr, *B);
186       // Update block addr.
187       B->setAddress(Seg.Addr);
188       Seg.Addr += B->getSize();
189     }
190 
191     Seg.ContentBlocks.clear();
192     Seg.ZeroFillBlocks.clear();
193   }
194 
195   return Error::success();
196 }
197 
198 JITLinkMemoryManager::AllocActions &BasicLayout::graphAllocActions() {
199   return G.allocActions();
200 }
201 
202 void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
203                                 const JITLinkDylib *JD, SegmentMap Segments,
204                                 OnCreatedFunction OnCreated) {
205 
206   static_assert(AllocGroup::NumGroups == 16,
207                 "AllocGroup has changed. Section names below must be updated");
208   StringRef AGSectionNames[] = {
209       "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
210       "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
211       "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
212       "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
213 
214   auto G =
215       std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
216   AllocGroupSmallMap<Block *> ContentBlocks;
217 
218   JITTargetAddress NextAddr = 0x100000;
219   for (auto &KV : Segments) {
220     auto &AG = KV.first;
221     auto &Seg = KV.second;
222 
223     auto AGSectionName =
224         AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
225                        static_cast<bool>(AG.getMemDeallocPolicy()) << 3];
226 
227     auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
228     Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
229 
230     if (Seg.ContentSize != 0) {
231       NextAddr = alignTo(NextAddr, Seg.ContentAlign);
232       auto &B =
233           G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
234                                        NextAddr, Seg.ContentAlign.value(), 0);
235       ContentBlocks[AG] = &B;
236       NextAddr += Seg.ContentSize;
237     }
238   }
239 
240   // GRef declared separately since order-of-argument-eval isn't specified.
241   auto &GRef = *G;
242   MemMgr.allocate(JD, GRef,
243                   [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
244                    OnCreated = std::move(OnCreated)](
245                       JITLinkMemoryManager::AllocResult Alloc) mutable {
246                     if (!Alloc)
247                       OnCreated(Alloc.takeError());
248                     else
249                       OnCreated(SimpleSegmentAlloc(std::move(G),
250                                                    std::move(ContentBlocks),
251                                                    std::move(*Alloc)));
252                   });
253 }
254 
255 Expected<SimpleSegmentAlloc>
256 SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
257                            SegmentMap Segments) {
258   std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
259   auto AllocF = AllocP.get_future();
260   Create(MemMgr, JD, std::move(Segments),
261          [&](Expected<SimpleSegmentAlloc> Result) {
262            AllocP.set_value(std::move(Result));
263          });
264   return AllocF.get();
265 }
266 
267 SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
268 SimpleSegmentAlloc &
269 SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
270 SimpleSegmentAlloc::~SimpleSegmentAlloc() {}
271 
272 SimpleSegmentAlloc::SegmentInfo SimpleSegmentAlloc::getSegInfo(AllocGroup AG) {
273   auto I = ContentBlocks.find(AG);
274   if (I != ContentBlocks.end()) {
275     auto &B = *I->second;
276     return {B.getAddress(), B.getAlreadyMutableContent()};
277   }
278   return {};
279 }
280 
281 SimpleSegmentAlloc::SimpleSegmentAlloc(
282     std::unique_ptr<LinkGraph> G, AllocGroupSmallMap<Block *> ContentBlocks,
283     std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
284     : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
285       Alloc(std::move(Alloc)) {}
286 
287 class InProcessMemoryManager::IPInFlightAlloc
288     : public JITLinkMemoryManager::InFlightAlloc {
289 public:
290   IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
291                   sys::MemoryBlock StandardSegments,
292                   sys::MemoryBlock FinalizationSegments)
293       : MemMgr(MemMgr), G(G), BL(std::move(BL)),
294         StandardSegments(std::move(StandardSegments)),
295         FinalizationSegments(std::move(FinalizationSegments)) {}
296 
297   void finalize(OnFinalizedFunction OnFinalized) override {
298 
299     // Apply memory protections to all segments.
300     if (auto Err = applyProtections()) {
301       OnFinalized(std::move(Err));
302       return;
303     }
304 
305     // Run finalization actions.
306     // FIXME: Roll back previous successful actions on failure.
307     std::vector<AllocActionCall> DeallocActions;
308     DeallocActions.reserve(G.allocActions().size());
309     for (auto &ActPair : G.allocActions()) {
310       if (ActPair.Finalize.FnAddr)
311         if (auto Err = runAllocAction(ActPair.Finalize)) {
312           OnFinalized(std::move(Err));
313           return;
314         }
315       if (ActPair.Dealloc.FnAddr)
316         DeallocActions.push_back(ActPair.Dealloc);
317     }
318     G.allocActions().clear();
319 
320     // Release the finalize segments slab.
321     if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
322       OnFinalized(errorCodeToError(EC));
323       return;
324     }
325 
326     // Continue with finalized allocation.
327     OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
328                                             std::move(DeallocActions)));
329   }
330 
331   void abandon(OnAbandonedFunction OnAbandoned) override {
332     Error Err = Error::success();
333     if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
334       Err = joinErrors(std::move(Err), errorCodeToError(EC));
335     if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
336       Err = joinErrors(std::move(Err), errorCodeToError(EC));
337     OnAbandoned(std::move(Err));
338   }
339 
340 private:
341   Error applyProtections() {
342     for (auto &KV : BL.segments()) {
343       const auto &AG = KV.first;
344       auto &Seg = KV.second;
345 
346       auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
347 
348       uint64_t SegSize =
349           alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
350       sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
351       if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
352         return errorCodeToError(EC);
353       if (Prot & sys::Memory::MF_EXEC)
354         sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
355     }
356     return Error::success();
357   }
358 
359   InProcessMemoryManager &MemMgr;
360   LinkGraph &G;
361   BasicLayout BL;
362   sys::MemoryBlock StandardSegments;
363   sys::MemoryBlock FinalizationSegments;
364 };
365 
366 Expected<std::unique_ptr<InProcessMemoryManager>>
367 InProcessMemoryManager::Create() {
368   if (auto PageSize = sys::Process::getPageSize())
369     return std::make_unique<InProcessMemoryManager>(*PageSize);
370   else
371     return PageSize.takeError();
372 }
373 
374 void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
375                                       OnAllocatedFunction OnAllocated) {
376 
377   // FIXME: Just check this once on startup.
378   if (!isPowerOf2_64((uint64_t)PageSize)) {
379     OnAllocated(make_error<StringError>("Page size is not a power of 2",
380                                         inconvertibleErrorCode()));
381     return;
382   }
383 
384   BasicLayout BL(G);
385 
386   /// Scan the request and calculate the group and total sizes.
387   /// Check that segment size is no larger than a page.
388   auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
389   if (!SegsSizes) {
390     OnAllocated(SegsSizes.takeError());
391     return;
392   }
393 
394   /// Check that the total size requested (including zero fill) is not larger
395   /// than a size_t.
396   if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
397     OnAllocated(make_error<JITLinkError>(
398         "Total requested size " + formatv("{0:x}", SegsSizes->total()) +
399         " for graph " + G.getName() + " exceeds address space"));
400     return;
401   }
402 
403   // Allocate one slab for the whole thing (to make sure everything is
404   // in-range), then partition into standard and finalization blocks.
405   //
406   // FIXME: Make two separate allocations in the future to reduce
407   // fragmentation: finalization segments will usually be a single page, and
408   // standard segments are likely to be more than one page. Where multiple
409   // allocations are in-flight at once (likely) the current approach will leave
410   // a lot of single-page holes.
411   sys::MemoryBlock Slab;
412   sys::MemoryBlock StandardSegsMem;
413   sys::MemoryBlock FinalizeSegsMem;
414   {
415     const sys::Memory::ProtectionFlags ReadWrite =
416         static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
417                                                   sys::Memory::MF_WRITE);
418 
419     std::error_code EC;
420     Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
421                                              ReadWrite, EC);
422 
423     if (EC) {
424       OnAllocated(errorCodeToError(EC));
425       return;
426     }
427 
428     // Zero-fill the whole slab up-front.
429     memset(Slab.base(), 0, Slab.allocatedSize());
430 
431     StandardSegsMem = {Slab.base(),
432                        static_cast<size_t>(SegsSizes->StandardSegs)};
433     FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
434                        static_cast<size_t>(SegsSizes->FinalizeSegs)};
435   }
436 
437   auto NextStandardSegAddr = pointerToJITTargetAddress(StandardSegsMem.base());
438   auto NextFinalizeSegAddr = pointerToJITTargetAddress(FinalizeSegsMem.base());
439 
440   LLVM_DEBUG({
441     dbgs() << "InProcessMemoryManager allocated:\n";
442     if (SegsSizes->StandardSegs)
443       dbgs() << formatv("  [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
444                         NextStandardSegAddr + StandardSegsMem.allocatedSize())
445              << " to stardard segs\n";
446     else
447       dbgs() << "  no standard segs\n";
448     if (SegsSizes->FinalizeSegs)
449       dbgs() << formatv("  [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
450                         NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
451              << " to finalize segs\n";
452     else
453       dbgs() << "  no finalize segs\n";
454   });
455 
456   // Build ProtMap, assign addresses.
457   for (auto &KV : BL.segments()) {
458     auto &AG = KV.first;
459     auto &Seg = KV.second;
460 
461     auto &SegAddr = (AG.getMemDeallocPolicy() == MemDeallocPolicy::Standard)
462                         ? NextStandardSegAddr
463                         : NextFinalizeSegAddr;
464 
465     Seg.WorkingMem = jitTargetAddressToPointer<char *>(SegAddr);
466     Seg.Addr = SegAddr;
467 
468     SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
469   }
470 
471   if (auto Err = BL.apply()) {
472     OnAllocated(std::move(Err));
473     return;
474   }
475 
476   OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
477                                                 std::move(StandardSegsMem),
478                                                 std::move(FinalizeSegsMem)));
479 }
480 
481 void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
482                                         OnDeallocatedFunction OnDeallocated) {
483   std::vector<sys::MemoryBlock> StandardSegmentsList;
484   std::vector<std::vector<AllocActionCall>> DeallocActionsList;
485 
486   {
487     std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
488     for (auto &Alloc : Allocs) {
489       auto *FA =
490           jitTargetAddressToPointer<FinalizedAllocInfo *>(Alloc.release());
491       StandardSegmentsList.push_back(std::move(FA->StandardSegments));
492       if (!FA->DeallocActions.empty())
493         DeallocActionsList.push_back(std::move(FA->DeallocActions));
494       FA->~FinalizedAllocInfo();
495       FinalizedAllocInfos.Deallocate(FA);
496     }
497   }
498 
499   Error DeallocErr = Error::success();
500 
501   while (!DeallocActionsList.empty()) {
502     auto &DeallocActions = DeallocActionsList.back();
503     auto &StandardSegments = StandardSegmentsList.back();
504 
505     /// Run any deallocate calls.
506     while (!DeallocActions.empty()) {
507       if (auto Err = runAllocAction(DeallocActions.back()))
508         DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
509       DeallocActions.pop_back();
510     }
511 
512     /// Release the standard segments slab.
513     if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
514       DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
515 
516     DeallocActionsList.pop_back();
517     StandardSegmentsList.pop_back();
518   }
519 
520   OnDeallocated(std::move(DeallocErr));
521 }
522 
523 JITLinkMemoryManager::FinalizedAlloc
524 InProcessMemoryManager::createFinalizedAlloc(
525     sys::MemoryBlock StandardSegments,
526     std::vector<AllocActionCall> DeallocActions) {
527   std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
528   auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
529   new (FA) FinalizedAllocInfo(
530       {std::move(StandardSegments), std::move(DeallocActions)});
531   return FinalizedAlloc(pointerToJITTargetAddress(FA));
532 }
533 
534 } // end namespace jitlink
535 } // end namespace llvm
536