xref: /llvm-project/llvm/lib/ExecutionEngine/Orc/MemoryMapper.cpp (revision 1f4d91ecb8529678a3d3919d7523743bd21942ca)
1 //===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
9 #include "llvm/ExecutionEngine/Orc/MemoryMapper.h"
10 
11 #include "llvm/Config/llvm-config.h" // for LLVM_ON_UNIX
12 #include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h"
13 #include "llvm/Support/WindowsError.h"
14 
15 #if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
16 #include <fcntl.h>
17 #include <sys/mman.h>
18 #if defined(__MVS__)
19 #include "llvm/Support/BLAKE3.h"
20 #include <sys/shm.h>
21 #endif
22 #include <unistd.h>
23 #elif defined(_WIN32)
24 #include <windows.h>
25 #endif
26 
27 namespace llvm {
28 namespace orc {
29 
30 MemoryMapper::~MemoryMapper() {}
31 
32 InProcessMemoryMapper::InProcessMemoryMapper(size_t PageSize)
33     : PageSize(PageSize) {}
34 
35 Expected<std::unique_ptr<InProcessMemoryMapper>>
36 InProcessMemoryMapper::Create() {
37   auto PageSize = sys::Process::getPageSize();
38   if (!PageSize)
39     return PageSize.takeError();
40   return std::make_unique<InProcessMemoryMapper>(*PageSize);
41 }
42 
43 void InProcessMemoryMapper::reserve(size_t NumBytes,
44                                     OnReservedFunction OnReserved) {
45   std::error_code EC;
46   auto MB = sys::Memory::allocateMappedMemory(
47       NumBytes, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC);
48 
49   if (EC)
50     return OnReserved(errorCodeToError(EC));
51 
52   {
53     std::lock_guard<std::mutex> Lock(Mutex);
54     Reservations[MB.base()].Size = MB.allocatedSize();
55   }
56 
57   OnReserved(
58       ExecutorAddrRange(ExecutorAddr::fromPtr(MB.base()), MB.allocatedSize()));
59 }
60 
61 char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
62   return Addr.toPtr<char *>();
63 }
64 
65 void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
66                                        OnInitializedFunction OnInitialized) {
67   ExecutorAddr MinAddr(~0ULL);
68   ExecutorAddr MaxAddr(0);
69 
70   // FIXME: Release finalize lifetime segments.
71   for (auto &Segment : AI.Segments) {
72     auto Base = AI.MappingBase + Segment.Offset;
73     auto Size = Segment.ContentSize + Segment.ZeroFillSize;
74 
75     if (Base < MinAddr)
76       MinAddr = Base;
77 
78     if (Base + Size > MaxAddr)
79       MaxAddr = Base + Size;
80 
81     std::memset((Base + Segment.ContentSize).toPtr<void *>(), 0,
82                 Segment.ZeroFillSize);
83 
84     if (auto EC = sys::Memory::protectMappedMemory(
85             {Base.toPtr<void *>(), Size},
86             toSysMemoryProtectionFlags(Segment.AG.getMemProt()))) {
87       return OnInitialized(errorCodeToError(EC));
88     }
89     if ((Segment.AG.getMemProt() & MemProt::Exec) == MemProt::Exec)
90       sys::Memory::InvalidateInstructionCache(Base.toPtr<void *>(), Size);
91   }
92 
93   auto DeinitializeActions = shared::runFinalizeActions(AI.Actions);
94   if (!DeinitializeActions)
95     return OnInitialized(DeinitializeActions.takeError());
96 
97   {
98     std::lock_guard<std::mutex> Lock(Mutex);
99 
100     // This is the maximum range whose permission have been possibly modified
101     Allocations[MinAddr].Size = MaxAddr - MinAddr;
102     Allocations[MinAddr].DeinitializationActions =
103         std::move(*DeinitializeActions);
104     Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(MinAddr);
105   }
106 
107   OnInitialized(MinAddr);
108 }
109 
110 void InProcessMemoryMapper::deinitialize(
111     ArrayRef<ExecutorAddr> Bases,
112     MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
113   Error AllErr = Error::success();
114 
115   {
116     std::lock_guard<std::mutex> Lock(Mutex);
117 
118     for (auto Base : llvm::reverse(Bases)) {
119 
120       if (Error Err = shared::runDeallocActions(
121               Allocations[Base].DeinitializationActions)) {
122         AllErr = joinErrors(std::move(AllErr), std::move(Err));
123       }
124 
125       // Reset protections to read/write so the area can be reused
126       if (auto EC = sys::Memory::protectMappedMemory(
127               {Base.toPtr<void *>(), Allocations[Base].Size},
128               sys::Memory::ProtectionFlags::MF_READ |
129                   sys::Memory::ProtectionFlags::MF_WRITE)) {
130         AllErr = joinErrors(std::move(AllErr), errorCodeToError(EC));
131       }
132 
133       Allocations.erase(Base);
134     }
135   }
136 
137   OnDeinitialized(std::move(AllErr));
138 }
139 
140 void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
141                                     OnReleasedFunction OnReleased) {
142   Error Err = Error::success();
143 
144   for (auto Base : Bases) {
145     std::vector<ExecutorAddr> AllocAddrs;
146     size_t Size;
147     {
148       std::lock_guard<std::mutex> Lock(Mutex);
149       auto &R = Reservations[Base.toPtr<void *>()];
150       Size = R.Size;
151       AllocAddrs.swap(R.Allocations);
152     }
153 
154     // deinitialize sub allocations
155     std::promise<MSVCPError> P;
156     auto F = P.get_future();
157     deinitialize(AllocAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
158     if (Error E = F.get()) {
159       Err = joinErrors(std::move(Err), std::move(E));
160     }
161 
162     // free the memory
163     auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size);
164 
165     auto EC = sys::Memory::releaseMappedMemory(MB);
166     if (EC) {
167       Err = joinErrors(std::move(Err), errorCodeToError(EC));
168     }
169 
170     std::lock_guard<std::mutex> Lock(Mutex);
171     Reservations.erase(Base.toPtr<void *>());
172   }
173 
174   OnReleased(std::move(Err));
175 }
176 
177 InProcessMemoryMapper::~InProcessMemoryMapper() {
178   std::vector<ExecutorAddr> ReservationAddrs;
179   {
180     std::lock_guard<std::mutex> Lock(Mutex);
181 
182     ReservationAddrs.reserve(Reservations.size());
183     for (const auto &R : Reservations) {
184       ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst()));
185     }
186   }
187 
188   std::promise<MSVCPError> P;
189   auto F = P.get_future();
190   release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); });
191   cantFail(F.get());
192 }
193 
194 // SharedMemoryMapper
195 
196 SharedMemoryMapper::SharedMemoryMapper(ExecutorProcessControl &EPC,
197                                        SymbolAddrs SAs, size_t PageSize)
198     : EPC(EPC), SAs(SAs), PageSize(PageSize) {
199 #if (!defined(LLVM_ON_UNIX) || defined(__ANDROID__)) && !defined(_WIN32)
200   llvm_unreachable("SharedMemoryMapper is not supported on this platform yet");
201 #endif
202 }
203 
204 Expected<std::unique_ptr<SharedMemoryMapper>>
205 SharedMemoryMapper::Create(ExecutorProcessControl &EPC, SymbolAddrs SAs) {
206 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
207   auto PageSize = sys::Process::getPageSize();
208   if (!PageSize)
209     return PageSize.takeError();
210 
211   return std::make_unique<SharedMemoryMapper>(EPC, SAs, *PageSize);
212 #else
213   return make_error<StringError>(
214       "SharedMemoryMapper is not supported on this platform yet",
215       inconvertibleErrorCode());
216 #endif
217 }
218 
219 void SharedMemoryMapper::reserve(size_t NumBytes,
220                                  OnReservedFunction OnReserved) {
221 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
222 
223   EPC.callSPSWrapperAsync<
224       rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>(
225       SAs.Reserve,
226       [this, NumBytes, OnReserved = std::move(OnReserved)](
227           Error SerializationErr,
228           Expected<std::pair<ExecutorAddr, std::string>> Result) mutable {
229         if (SerializationErr) {
230           cantFail(Result.takeError());
231           return OnReserved(std::move(SerializationErr));
232         }
233 
234         if (!Result)
235           return OnReserved(Result.takeError());
236 
237         ExecutorAddr RemoteAddr;
238         std::string SharedMemoryName;
239         std::tie(RemoteAddr, SharedMemoryName) = std::move(*Result);
240 
241         void *LocalAddr = nullptr;
242 
243 #if defined(LLVM_ON_UNIX)
244 
245 #if defined(__MVS__)
246         ArrayRef<uint8_t> Data(
247             reinterpret_cast<const uint8_t *>(SharedMemoryName.c_str()),
248             SharedMemoryName.size());
249         auto HashedName = BLAKE3::hash<sizeof(key_t)>(Data);
250         key_t Key = *reinterpret_cast<key_t *>(HashedName.data());
251         int SharedMemoryId =
252             shmget(Key, NumBytes, IPC_CREAT | __IPC_SHAREAS | 0700);
253         if (SharedMemoryId < 0) {
254           return OnReserved(errorCodeToError(
255               std::error_code(errno, std::generic_category())));
256         }
257         LocalAddr = shmat(SharedMemoryId, nullptr, 0);
258         if (LocalAddr == reinterpret_cast<void *>(-1)) {
259           return OnReserved(errorCodeToError(
260               std::error_code(errno, std::generic_category())));
261         }
262 #else
263         int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700);
264         if (SharedMemoryFile < 0) {
265           return OnReserved(errorCodeToError(errnoAsErrorCode()));
266         }
267 
268         // this prevents other processes from accessing it by name
269         shm_unlink(SharedMemoryName.c_str());
270 
271         LocalAddr = mmap(nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED,
272                          SharedMemoryFile, 0);
273         if (LocalAddr == MAP_FAILED) {
274           return OnReserved(errorCodeToError(errnoAsErrorCode()));
275         }
276 
277         close(SharedMemoryFile);
278 #endif
279 
280 #elif defined(_WIN32)
281 
282         std::wstring WideSharedMemoryName(SharedMemoryName.begin(),
283                                           SharedMemoryName.end());
284         HANDLE SharedMemoryFile = OpenFileMappingW(
285             FILE_MAP_ALL_ACCESS, FALSE, WideSharedMemoryName.c_str());
286         if (!SharedMemoryFile)
287           return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
288 
289         LocalAddr =
290             MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0);
291         if (!LocalAddr) {
292           CloseHandle(SharedMemoryFile);
293           return OnReserved(errorCodeToError(mapWindowsError(GetLastError())));
294         }
295 
296         CloseHandle(SharedMemoryFile);
297 
298 #endif
299         {
300           std::lock_guard<std::mutex> Lock(Mutex);
301           Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}});
302         }
303 
304         OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes));
305       },
306       SAs.Instance, static_cast<uint64_t>(NumBytes));
307 
308 #else
309   OnReserved(make_error<StringError>(
310       "SharedMemoryMapper is not supported on this platform yet",
311       inconvertibleErrorCode()));
312 #endif
313 }
314 
315 char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) {
316   auto R = Reservations.upper_bound(Addr);
317   assert(R != Reservations.begin() && "Attempt to prepare unreserved range");
318   R--;
319 
320   ExecutorAddrDiff Offset = Addr - R->first;
321 
322   return static_cast<char *>(R->second.LocalAddr) + Offset;
323 }
324 
325 void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI,
326                                     OnInitializedFunction OnInitialized) {
327   auto Reservation = Reservations.upper_bound(AI.MappingBase);
328   assert(Reservation != Reservations.begin() && "Attempt to initialize unreserved range");
329   Reservation--;
330 
331   auto AllocationOffset = AI.MappingBase - Reservation->first;
332 
333   tpctypes::SharedMemoryFinalizeRequest FR;
334 
335   AI.Actions.swap(FR.Actions);
336 
337   FR.Segments.reserve(AI.Segments.size());
338 
339   for (auto Segment : AI.Segments) {
340     char *Base = static_cast<char *>(Reservation->second.LocalAddr) +
341                  AllocationOffset + Segment.Offset;
342     std::memset(Base + Segment.ContentSize, 0, Segment.ZeroFillSize);
343 
344     tpctypes::SharedMemorySegFinalizeRequest SegReq;
345     SegReq.RAG = {Segment.AG.getMemProt(),
346                   Segment.AG.getMemLifetime() == MemLifetime::Finalize};
347     SegReq.Addr = AI.MappingBase + Segment.Offset;
348     SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize;
349 
350     FR.Segments.push_back(SegReq);
351   }
352 
353   EPC.callSPSWrapperAsync<
354       rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>(
355       SAs.Initialize,
356       [OnInitialized = std::move(OnInitialized)](
357           Error SerializationErr, Expected<ExecutorAddr> Result) mutable {
358         if (SerializationErr) {
359           cantFail(Result.takeError());
360           return OnInitialized(std::move(SerializationErr));
361         }
362 
363         OnInitialized(std::move(Result));
364       },
365       SAs.Instance, Reservation->first, std::move(FR));
366 }
367 
368 void SharedMemoryMapper::deinitialize(
369     ArrayRef<ExecutorAddr> Allocations,
370     MemoryMapper::OnDeinitializedFunction OnDeinitialized) {
371   EPC.callSPSWrapperAsync<
372       rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>(
373       SAs.Deinitialize,
374       [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr,
375                                                      Error Result) mutable {
376         if (SerializationErr) {
377           cantFail(std::move(Result));
378           return OnDeinitialized(std::move(SerializationErr));
379         }
380 
381         OnDeinitialized(std::move(Result));
382       },
383       SAs.Instance, Allocations);
384 }
385 
386 void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases,
387                                  OnReleasedFunction OnReleased) {
388 #if (defined(LLVM_ON_UNIX) && !defined(__ANDROID__)) || defined(_WIN32)
389   Error Err = Error::success();
390 
391   {
392     std::lock_guard<std::mutex> Lock(Mutex);
393 
394     for (auto Base : Bases) {
395 
396 #if defined(LLVM_ON_UNIX)
397 
398 #if defined(__MVS__)
399       if (shmdt(Reservations[Base].LocalAddr) < 0)
400         Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
401 #else
402       if (munmap(Reservations[Base].LocalAddr, Reservations[Base].Size) != 0)
403         Err = joinErrors(std::move(Err), errorCodeToError(errnoAsErrorCode()));
404 #endif
405 
406 #elif defined(_WIN32)
407 
408       if (!UnmapViewOfFile(Reservations[Base].LocalAddr))
409         Err = joinErrors(std::move(Err),
410                          errorCodeToError(mapWindowsError(GetLastError())));
411 
412 #endif
413 
414       Reservations.erase(Base);
415     }
416   }
417 
418   EPC.callSPSWrapperAsync<
419       rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>(
420       SAs.Release,
421       [OnReleased = std::move(OnReleased),
422        Err = std::move(Err)](Error SerializationErr, Error Result) mutable {
423         if (SerializationErr) {
424           cantFail(std::move(Result));
425           return OnReleased(
426               joinErrors(std::move(Err), std::move(SerializationErr)));
427         }
428 
429         return OnReleased(joinErrors(std::move(Err), std::move(Result)));
430       },
431       SAs.Instance, Bases);
432 #else
433   OnReleased(make_error<StringError>(
434       "SharedMemoryMapper is not supported on this platform yet",
435       inconvertibleErrorCode()));
436 #endif
437 }
438 
439 SharedMemoryMapper::~SharedMemoryMapper() {
440   std::lock_guard<std::mutex> Lock(Mutex);
441   for (const auto &R : Reservations) {
442 
443 #if defined(LLVM_ON_UNIX) && !defined(__ANDROID__)
444 
445 #if defined(__MVS__)
446     shmdt(R.second.LocalAddr);
447 #else
448     munmap(R.second.LocalAddr, R.second.Size);
449 #endif
450 
451 #elif defined(_WIN32)
452 
453     UnmapViewOfFile(R.second.LocalAddr);
454 
455 #else
456 
457     (void)R;
458 
459 #endif
460   }
461 }
462 
463 } // namespace orc
464 
465 } // namespace llvm
466