xref: /llvm-project/llvm/lib/ExecutionEngine/JITLink/JITLinkMemoryManager.cpp (revision d42c2352aa3a77fb8bf217191868c3eb6c53a5a2)
1 //===--- JITLinkMemoryManager.cpp - JITLinkMemoryManager implementation ---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ExecutionEngine/JITLink/JITLinkMemoryManager.h"
10 #include "llvm/ExecutionEngine/JITLink/JITLink.h"
11 #include "llvm/Support/FormatVariadic.h"
12 #include "llvm/Support/Process.h"
13 
14 #define DEBUG_TYPE "jitlink"
15 
16 using namespace llvm;
17 
18 namespace llvm {
19 namespace jitlink {
20 
21 JITLinkMemoryManager::~JITLinkMemoryManager() = default;
22 JITLinkMemoryManager::InFlightAlloc::~InFlightAlloc() = default;
23 
24 BasicLayout::BasicLayout(LinkGraph &G) : G(G) {
25 
26   for (auto &Sec : G.sections()) {
27     // Skip empty sections.
28     if (Sec.blocks().empty())
29       continue;
30 
31     auto &Seg = Segments[{Sec.getMemProt(), Sec.getMemDeallocPolicy()}];
32     for (auto *B : Sec.blocks())
33       if (LLVM_LIKELY(!B->isZeroFill()))
34         Seg.ContentBlocks.push_back(B);
35       else
36         Seg.ZeroFillBlocks.push_back(B);
37   }
38 
39   // Build Segments map.
40   auto CompareBlocks = [](const Block *LHS, const Block *RHS) {
41     // Sort by section, address and size
42     if (LHS->getSection().getOrdinal() != RHS->getSection().getOrdinal())
43       return LHS->getSection().getOrdinal() < RHS->getSection().getOrdinal();
44     if (LHS->getAddress() != RHS->getAddress())
45       return LHS->getAddress() < RHS->getAddress();
46     return LHS->getSize() < RHS->getSize();
47   };
48 
49   LLVM_DEBUG(dbgs() << "Generated BasicLayout for " << G.getName() << ":\n");
50   for (auto &KV : Segments) {
51     auto &Seg = KV.second;
52 
53     llvm::sort(Seg.ContentBlocks, CompareBlocks);
54     llvm::sort(Seg.ZeroFillBlocks, CompareBlocks);
55 
56     for (auto *B : Seg.ContentBlocks) {
57       Seg.ContentSize = alignToBlock(Seg.ContentSize, *B);
58       Seg.ContentSize += B->getSize();
59       Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
60     }
61 
62     uint64_t SegEndOffset = Seg.ContentSize;
63     for (auto *B : Seg.ZeroFillBlocks) {
64       SegEndOffset = alignToBlock(SegEndOffset, *B);
65       SegEndOffset += B->getSize();
66       Seg.Alignment = std::max(Seg.Alignment, Align(B->getAlignment()));
67     }
68     Seg.ZeroFillSize = SegEndOffset - Seg.ContentSize;
69 
70     LLVM_DEBUG({
71       dbgs() << "  Seg " << KV.first
72              << ": content-size=" << formatv("{0:x}", Seg.ContentSize)
73              << ", zero-fill-size=" << formatv("{0:x}", Seg.ZeroFillSize)
74              << ", align=" << formatv("{0:x}", Seg.Alignment.value()) << "\n";
75     });
76   }
77 }
78 
79 Expected<BasicLayout::ContiguousPageBasedLayoutSizes>
80 BasicLayout::getContiguousPageBasedLayoutSizes(uint64_t PageSize) {
81   ContiguousPageBasedLayoutSizes SegsSizes;
82 
83   for (auto &KV : segments()) {
84     auto &AG = KV.first;
85     auto &Seg = KV.second;
86 
87     if (Seg.Alignment > PageSize)
88       return make_error<StringError>("Segment alignment greater than page size",
89                                      inconvertibleErrorCode());
90 
91     uint64_t SegSize = alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
92     if (AG.getMemDeallocPolicy() == orc::MemDeallocPolicy::Standard)
93       SegsSizes.StandardSegs += SegSize;
94     else
95       SegsSizes.FinalizeSegs += SegSize;
96   }
97 
98   return SegsSizes;
99 }
100 
101 Error BasicLayout::apply() {
102   for (auto &KV : Segments) {
103     auto &Seg = KV.second;
104 
105     assert(!(Seg.ContentBlocks.empty() && Seg.ZeroFillBlocks.empty()) &&
106            "Empty section recorded?");
107 
108     for (auto *B : Seg.ContentBlocks) {
109       // Align addr and working-mem-offset.
110       Seg.Addr = alignToBlock(Seg.Addr, *B);
111       Seg.NextWorkingMemOffset = alignToBlock(Seg.NextWorkingMemOffset, *B);
112 
113       // Update block addr.
114       B->setAddress(Seg.Addr);
115       Seg.Addr += B->getSize();
116 
117       // Copy content to working memory, then update content to point at working
118       // memory.
119       memcpy(Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getContent().data(),
120              B->getSize());
121       B->setMutableContent(
122           {Seg.WorkingMem + Seg.NextWorkingMemOffset, B->getSize()});
123       Seg.NextWorkingMemOffset += B->getSize();
124     }
125 
126     for (auto *B : Seg.ZeroFillBlocks) {
127       // Align addr.
128       Seg.Addr = alignToBlock(Seg.Addr, *B);
129       // Update block addr.
130       B->setAddress(Seg.Addr);
131       Seg.Addr += B->getSize();
132     }
133 
134     Seg.ContentBlocks.clear();
135     Seg.ZeroFillBlocks.clear();
136   }
137 
138   return Error::success();
139 }
140 
141 orc::shared::AllocActions &BasicLayout::graphAllocActions() {
142   return G.allocActions();
143 }
144 
145 void SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr,
146                                 const JITLinkDylib *JD, SegmentMap Segments,
147                                 OnCreatedFunction OnCreated) {
148 
149   static_assert(orc::AllocGroup::NumGroups == 16,
150                 "AllocGroup has changed. Section names below must be updated");
151   StringRef AGSectionNames[] = {
152       "__---.standard", "__R--.standard", "__-W-.standard", "__RW-.standard",
153       "__--X.standard", "__R-X.standard", "__-WX.standard", "__RWX.standard",
154       "__---.finalize", "__R--.finalize", "__-W-.finalize", "__RW-.finalize",
155       "__--X.finalize", "__R-X.finalize", "__-WX.finalize", "__RWX.finalize"};
156 
157   auto G =
158       std::make_unique<LinkGraph>("", Triple(), 0, support::native, nullptr);
159   orc::AllocGroupSmallMap<Block *> ContentBlocks;
160 
161   orc::ExecutorAddr NextAddr(0x100000);
162   for (auto &KV : Segments) {
163     auto &AG = KV.first;
164     auto &Seg = KV.second;
165 
166     auto AGSectionName =
167         AGSectionNames[static_cast<unsigned>(AG.getMemProt()) |
168                        static_cast<bool>(AG.getMemDeallocPolicy()) << 3];
169 
170     auto &Sec = G->createSection(AGSectionName, AG.getMemProt());
171     Sec.setMemDeallocPolicy(AG.getMemDeallocPolicy());
172 
173     if (Seg.ContentSize != 0) {
174       NextAddr =
175           orc::ExecutorAddr(alignTo(NextAddr.getValue(), Seg.ContentAlign));
176       auto &B =
177           G->createMutableContentBlock(Sec, G->allocateBuffer(Seg.ContentSize),
178                                        NextAddr, Seg.ContentAlign.value(), 0);
179       ContentBlocks[AG] = &B;
180       NextAddr += Seg.ContentSize;
181     }
182   }
183 
184   // GRef declared separately since order-of-argument-eval isn't specified.
185   auto &GRef = *G;
186   MemMgr.allocate(JD, GRef,
187                   [G = std::move(G), ContentBlocks = std::move(ContentBlocks),
188                    OnCreated = std::move(OnCreated)](
189                       JITLinkMemoryManager::AllocResult Alloc) mutable {
190                     if (!Alloc)
191                       OnCreated(Alloc.takeError());
192                     else
193                       OnCreated(SimpleSegmentAlloc(std::move(G),
194                                                    std::move(ContentBlocks),
195                                                    std::move(*Alloc)));
196                   });
197 }
198 
199 Expected<SimpleSegmentAlloc>
200 SimpleSegmentAlloc::Create(JITLinkMemoryManager &MemMgr, const JITLinkDylib *JD,
201                            SegmentMap Segments) {
202   std::promise<MSVCPExpected<SimpleSegmentAlloc>> AllocP;
203   auto AllocF = AllocP.get_future();
204   Create(MemMgr, JD, std::move(Segments),
205          [&](Expected<SimpleSegmentAlloc> Result) {
206            AllocP.set_value(std::move(Result));
207          });
208   return AllocF.get();
209 }
210 
211 SimpleSegmentAlloc::SimpleSegmentAlloc(SimpleSegmentAlloc &&) = default;
212 SimpleSegmentAlloc &
213 SimpleSegmentAlloc::operator=(SimpleSegmentAlloc &&) = default;
214 SimpleSegmentAlloc::~SimpleSegmentAlloc() = default;
215 
216 SimpleSegmentAlloc::SegmentInfo
217 SimpleSegmentAlloc::getSegInfo(orc::AllocGroup AG) {
218   auto I = ContentBlocks.find(AG);
219   if (I != ContentBlocks.end()) {
220     auto &B = *I->second;
221     return {B.getAddress(), B.getAlreadyMutableContent()};
222   }
223   return {};
224 }
225 
226 SimpleSegmentAlloc::SimpleSegmentAlloc(
227     std::unique_ptr<LinkGraph> G,
228     orc::AllocGroupSmallMap<Block *> ContentBlocks,
229     std::unique_ptr<JITLinkMemoryManager::InFlightAlloc> Alloc)
230     : G(std::move(G)), ContentBlocks(std::move(ContentBlocks)),
231       Alloc(std::move(Alloc)) {}
232 
233 class InProcessMemoryManager::IPInFlightAlloc
234     : public JITLinkMemoryManager::InFlightAlloc {
235 public:
236   IPInFlightAlloc(InProcessMemoryManager &MemMgr, LinkGraph &G, BasicLayout BL,
237                   sys::MemoryBlock StandardSegments,
238                   sys::MemoryBlock FinalizationSegments)
239       : MemMgr(MemMgr), G(&G), BL(std::move(BL)),
240         StandardSegments(std::move(StandardSegments)),
241         FinalizationSegments(std::move(FinalizationSegments)) {}
242 
243   ~IPInFlightAlloc() {
244     assert(!G && "InFlight alloc neither abandoned nor finalized");
245   }
246 
247   void finalize(OnFinalizedFunction OnFinalized) override {
248 
249     // Apply memory protections to all segments.
250     if (auto Err = applyProtections()) {
251       OnFinalized(std::move(Err));
252       return;
253     }
254 
255     // Run finalization actions.
256     auto DeallocActions = runFinalizeActions(G->allocActions());
257     if (!DeallocActions) {
258       OnFinalized(DeallocActions.takeError());
259       return;
260     }
261 
262     // Release the finalize segments slab.
263     if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments)) {
264       OnFinalized(errorCodeToError(EC));
265       return;
266     }
267 
268 #ifndef NDEBUG
269     // Set 'G' to null to flag that we've been successfully finalized.
270     // This allows us to assert at destruction time that a call has been made
271     // to either finalize or abandon.
272     G = nullptr;
273 #endif
274 
275     // Continue with finalized allocation.
276     OnFinalized(MemMgr.createFinalizedAlloc(std::move(StandardSegments),
277                                             std::move(*DeallocActions)));
278   }
279 
280   void abandon(OnAbandonedFunction OnAbandoned) override {
281     Error Err = Error::success();
282     if (auto EC = sys::Memory::releaseMappedMemory(FinalizationSegments))
283       Err = joinErrors(std::move(Err), errorCodeToError(EC));
284     if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
285       Err = joinErrors(std::move(Err), errorCodeToError(EC));
286 
287 #ifndef NDEBUG
288     // Set 'G' to null to flag that we've been successfully finalized.
289     // This allows us to assert at destruction time that a call has been made
290     // to either finalize or abandon.
291     G = nullptr;
292 #endif
293 
294     OnAbandoned(std::move(Err));
295   }
296 
297 private:
298   Error applyProtections() {
299     for (auto &KV : BL.segments()) {
300       const auto &AG = KV.first;
301       auto &Seg = KV.second;
302 
303       auto Prot = toSysMemoryProtectionFlags(AG.getMemProt());
304 
305       uint64_t SegSize =
306           alignTo(Seg.ContentSize + Seg.ZeroFillSize, MemMgr.PageSize);
307       sys::MemoryBlock MB(Seg.WorkingMem, SegSize);
308       if (auto EC = sys::Memory::protectMappedMemory(MB, Prot))
309         return errorCodeToError(EC);
310       if (Prot & sys::Memory::MF_EXEC)
311         sys::Memory::InvalidateInstructionCache(MB.base(), MB.allocatedSize());
312     }
313     return Error::success();
314   }
315 
316   InProcessMemoryManager &MemMgr;
317   LinkGraph *G;
318   BasicLayout BL;
319   sys::MemoryBlock StandardSegments;
320   sys::MemoryBlock FinalizationSegments;
321 };
322 
323 Expected<std::unique_ptr<InProcessMemoryManager>>
324 InProcessMemoryManager::Create() {
325   if (auto PageSize = sys::Process::getPageSize())
326     return std::make_unique<InProcessMemoryManager>(*PageSize);
327   else
328     return PageSize.takeError();
329 }
330 
331 void InProcessMemoryManager::allocate(const JITLinkDylib *JD, LinkGraph &G,
332                                       OnAllocatedFunction OnAllocated) {
333 
334   // FIXME: Just check this once on startup.
335   if (!isPowerOf2_64((uint64_t)PageSize)) {
336     OnAllocated(make_error<StringError>("Page size is not a power of 2",
337                                         inconvertibleErrorCode()));
338     return;
339   }
340 
341   BasicLayout BL(G);
342 
343   /// Scan the request and calculate the group and total sizes.
344   /// Check that segment size is no larger than a page.
345   auto SegsSizes = BL.getContiguousPageBasedLayoutSizes(PageSize);
346   if (!SegsSizes) {
347     OnAllocated(SegsSizes.takeError());
348     return;
349   }
350 
351   /// Check that the total size requested (including zero fill) is not larger
352   /// than a size_t.
353   if (SegsSizes->total() > std::numeric_limits<size_t>::max()) {
354     OnAllocated(make_error<JITLinkError>(
355         "Total requested size " + formatv("{0:x}", SegsSizes->total()) +
356         " for graph " + G.getName() + " exceeds address space"));
357     return;
358   }
359 
360   // Allocate one slab for the whole thing (to make sure everything is
361   // in-range), then partition into standard and finalization blocks.
362   //
363   // FIXME: Make two separate allocations in the future to reduce
364   // fragmentation: finalization segments will usually be a single page, and
365   // standard segments are likely to be more than one page. Where multiple
366   // allocations are in-flight at once (likely) the current approach will leave
367   // a lot of single-page holes.
368   sys::MemoryBlock Slab;
369   sys::MemoryBlock StandardSegsMem;
370   sys::MemoryBlock FinalizeSegsMem;
371   {
372     const sys::Memory::ProtectionFlags ReadWrite =
373         static_cast<sys::Memory::ProtectionFlags>(sys::Memory::MF_READ |
374                                                   sys::Memory::MF_WRITE);
375 
376     std::error_code EC;
377     Slab = sys::Memory::allocateMappedMemory(SegsSizes->total(), nullptr,
378                                              ReadWrite, EC);
379 
380     if (EC) {
381       OnAllocated(errorCodeToError(EC));
382       return;
383     }
384 
385     // Zero-fill the whole slab up-front.
386     memset(Slab.base(), 0, Slab.allocatedSize());
387 
388     StandardSegsMem = {Slab.base(),
389                        static_cast<size_t>(SegsSizes->StandardSegs)};
390     FinalizeSegsMem = {(void *)((char *)Slab.base() + SegsSizes->StandardSegs),
391                        static_cast<size_t>(SegsSizes->FinalizeSegs)};
392   }
393 
394   auto NextStandardSegAddr = orc::ExecutorAddr::fromPtr(StandardSegsMem.base());
395   auto NextFinalizeSegAddr = orc::ExecutorAddr::fromPtr(FinalizeSegsMem.base());
396 
397   LLVM_DEBUG({
398     dbgs() << "InProcessMemoryManager allocated:\n";
399     if (SegsSizes->StandardSegs)
400       dbgs() << formatv("  [ {0:x16} -- {1:x16} ]", NextStandardSegAddr,
401                         NextStandardSegAddr + StandardSegsMem.allocatedSize())
402              << " to stardard segs\n";
403     else
404       dbgs() << "  no standard segs\n";
405     if (SegsSizes->FinalizeSegs)
406       dbgs() << formatv("  [ {0:x16} -- {1:x16} ]", NextFinalizeSegAddr,
407                         NextFinalizeSegAddr + FinalizeSegsMem.allocatedSize())
408              << " to finalize segs\n";
409     else
410       dbgs() << "  no finalize segs\n";
411   });
412 
413   // Build ProtMap, assign addresses.
414   for (auto &KV : BL.segments()) {
415     auto &AG = KV.first;
416     auto &Seg = KV.second;
417 
418     auto &SegAddr =
419         (AG.getMemDeallocPolicy() == orc::MemDeallocPolicy::Standard)
420             ? NextStandardSegAddr
421             : NextFinalizeSegAddr;
422 
423     Seg.WorkingMem = SegAddr.toPtr<char *>();
424     Seg.Addr = SegAddr;
425 
426     SegAddr += alignTo(Seg.ContentSize + Seg.ZeroFillSize, PageSize);
427   }
428 
429   if (auto Err = BL.apply()) {
430     OnAllocated(std::move(Err));
431     return;
432   }
433 
434   OnAllocated(std::make_unique<IPInFlightAlloc>(*this, G, std::move(BL),
435                                                 std::move(StandardSegsMem),
436                                                 std::move(FinalizeSegsMem)));
437 }
438 
439 void InProcessMemoryManager::deallocate(std::vector<FinalizedAlloc> Allocs,
440                                         OnDeallocatedFunction OnDeallocated) {
441   std::vector<sys::MemoryBlock> StandardSegmentsList;
442   std::vector<std::vector<orc::shared::WrapperFunctionCall>> DeallocActionsList;
443 
444   {
445     std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
446     for (auto &Alloc : Allocs) {
447       auto *FA = Alloc.release().toPtr<FinalizedAllocInfo *>();
448       StandardSegmentsList.push_back(std::move(FA->StandardSegments));
449       if (!FA->DeallocActions.empty())
450         DeallocActionsList.push_back(std::move(FA->DeallocActions));
451       FA->~FinalizedAllocInfo();
452       FinalizedAllocInfos.Deallocate(FA);
453     }
454   }
455 
456   Error DeallocErr = Error::success();
457 
458   while (!DeallocActionsList.empty()) {
459     auto &DeallocActions = DeallocActionsList.back();
460     auto &StandardSegments = StandardSegmentsList.back();
461 
462     /// Run any deallocate calls.
463     while (!DeallocActions.empty()) {
464       if (auto Err = DeallocActions.back().runWithSPSRetErrorMerged())
465         DeallocErr = joinErrors(std::move(DeallocErr), std::move(Err));
466       DeallocActions.pop_back();
467     }
468 
469     /// Release the standard segments slab.
470     if (auto EC = sys::Memory::releaseMappedMemory(StandardSegments))
471       DeallocErr = joinErrors(std::move(DeallocErr), errorCodeToError(EC));
472 
473     DeallocActionsList.pop_back();
474     StandardSegmentsList.pop_back();
475   }
476 
477   OnDeallocated(std::move(DeallocErr));
478 }
479 
480 JITLinkMemoryManager::FinalizedAlloc
481 InProcessMemoryManager::createFinalizedAlloc(
482     sys::MemoryBlock StandardSegments,
483     std::vector<orc::shared::WrapperFunctionCall> DeallocActions) {
484   std::lock_guard<std::mutex> Lock(FinalizedAllocsMutex);
485   auto *FA = FinalizedAllocInfos.Allocate<FinalizedAllocInfo>();
486   new (FA) FinalizedAllocInfo(
487       {std::move(StandardSegments), std::move(DeallocActions)});
488   return FinalizedAlloc(orc::ExecutorAddr::fromPtr(FA));
489 }
490 
491 } // end namespace jitlink
492 } // end namespace llvm
493