1 //===- MemoryMapper.cpp - Cross-process memory mapper ------------*- C++ -*-==// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ExecutionEngine/Orc/MemoryMapper.h" 10 11 #include "llvm/ExecutionEngine/Orc/Shared/OrcRTBridge.h" 12 #include "llvm/Support/WindowsError.h" 13 14 #if defined(LLVM_ON_UNIX) 15 #include <fcntl.h> 16 #include <sys/mman.h> 17 #include <unistd.h> 18 #elif defined(_WIN32) 19 #include <windows.h> 20 #endif 21 22 namespace llvm { 23 namespace orc { 24 25 MemoryMapper::~MemoryMapper() {} 26 27 void InProcessMemoryMapper::reserve(size_t NumBytes, 28 OnReservedFunction OnReserved) { 29 std::error_code EC; 30 auto MB = sys::Memory::allocateMappedMemory( 31 NumBytes, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC); 32 33 if (EC) 34 return OnReserved(errorCodeToError(EC)); 35 36 { 37 std::lock_guard<std::mutex> Lock(Mutex); 38 Reservations[MB.base()].Size = MB.allocatedSize(); 39 } 40 41 OnReserved( 42 ExecutorAddrRange(ExecutorAddr::fromPtr(MB.base()), MB.allocatedSize())); 43 } 44 45 char *InProcessMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) { 46 return Addr.toPtr<char *>(); 47 } 48 49 void InProcessMemoryMapper::initialize(MemoryMapper::AllocInfo &AI, 50 OnInitializedFunction OnInitialized) { 51 ExecutorAddr MinAddr(~0ULL); 52 53 for (auto &Segment : AI.Segments) { 54 auto Base = AI.MappingBase + Segment.Offset; 55 auto Size = Segment.ContentSize + Segment.ZeroFillSize; 56 57 if (Base < MinAddr) 58 MinAddr = Base; 59 60 std::memset((Base + Segment.ContentSize).toPtr<void *>(), 0, 61 Segment.ZeroFillSize); 62 63 if (auto EC = sys::Memory::protectMappedMemory({Base.toPtr<void *>(), Size}, 64 Segment.Prot)) { 65 return OnInitialized(errorCodeToError(EC)); 66 } 67 if (Segment.Prot & sys::Memory::MF_EXEC) 68 sys::Memory::InvalidateInstructionCache(Base.toPtr<void *>(), Size); 69 } 70 71 auto DeinitializeActions = shared::runFinalizeActions(AI.Actions); 72 if (!DeinitializeActions) 73 return OnInitialized(DeinitializeActions.takeError()); 74 75 { 76 std::lock_guard<std::mutex> Lock(Mutex); 77 Allocations[MinAddr].DeinitializationActions = 78 std::move(*DeinitializeActions); 79 Reservations[AI.MappingBase.toPtr<void *>()].Allocations.push_back(MinAddr); 80 } 81 82 OnInitialized(MinAddr); 83 } 84 85 void InProcessMemoryMapper::deinitialize( 86 ArrayRef<ExecutorAddr> Bases, 87 MemoryMapper::OnDeinitializedFunction OnDeinitialized) { 88 Error AllErr = Error::success(); 89 90 { 91 std::lock_guard<std::mutex> Lock(Mutex); 92 93 for (auto Base : Bases) { 94 95 if (Error Err = shared::runDeallocActions( 96 Allocations[Base].DeinitializationActions)) { 97 AllErr = joinErrors(std::move(AllErr), std::move(Err)); 98 } 99 100 Allocations.erase(Base); 101 } 102 } 103 104 OnDeinitialized(std::move(AllErr)); 105 } 106 107 void InProcessMemoryMapper::release(ArrayRef<ExecutorAddr> Bases, 108 OnReleasedFunction OnReleased) { 109 Error Err = Error::success(); 110 111 for (auto Base : Bases) { 112 std::vector<ExecutorAddr> AllocAddrs; 113 size_t Size; 114 { 115 std::lock_guard<std::mutex> Lock(Mutex); 116 auto &R = Reservations[Base.toPtr<void *>()]; 117 Size = R.Size; 118 AllocAddrs.swap(R.Allocations); 119 } 120 121 // deinitialize sub allocations 122 std::promise<MSVCPError> P; 123 auto F = P.get_future(); 124 deinitialize(AllocAddrs, [&](Error Err) { P.set_value(std::move(Err)); }); 125 if (Error E = F.get()) { 126 Err = joinErrors(std::move(Err), std::move(E)); 127 } 128 129 // free the memory 130 auto MB = sys::MemoryBlock(Base.toPtr<void *>(), Size); 131 132 auto EC = sys::Memory::releaseMappedMemory(MB); 133 if (EC) { 134 Err = joinErrors(std::move(Err), errorCodeToError(EC)); 135 } 136 137 std::lock_guard<std::mutex> Lock(Mutex); 138 Reservations.erase(Base.toPtr<void *>()); 139 } 140 141 OnReleased(std::move(Err)); 142 } 143 144 InProcessMemoryMapper::~InProcessMemoryMapper() { 145 std::vector<ExecutorAddr> ReservationAddrs; 146 { 147 std::lock_guard<std::mutex> Lock(Mutex); 148 149 ReservationAddrs.reserve(Reservations.size()); 150 for (const auto &R : Reservations) { 151 ReservationAddrs.push_back(ExecutorAddr::fromPtr(R.getFirst())); 152 } 153 } 154 155 std::promise<MSVCPError> P; 156 auto F = P.get_future(); 157 release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); }); 158 cantFail(F.get()); 159 } 160 161 // SharedMemoryMapper 162 163 void SharedMemoryMapper::reserve(size_t NumBytes, 164 OnReservedFunction OnReserved) { 165 #if defined(LLVM_ON_UNIX) || defined(_WIN32) 166 167 EPC.callSPSWrapperAsync< 168 rt::SPSExecutorSharedMemoryMapperServiceReserveSignature>( 169 SAs.Reserve, 170 [this, NumBytes, OnReserved = std::move(OnReserved)]( 171 Error SerializationErr, 172 Expected<std::pair<ExecutorAddr, std::string>> Result) mutable { 173 if (SerializationErr) { 174 cantFail(Result.takeError()); 175 return OnReserved(std::move(SerializationErr)); 176 } 177 178 if (!Result) 179 return OnReserved(Result.takeError()); 180 181 ExecutorAddr RemoteAddr; 182 std::string SharedMemoryName; 183 std::tie(RemoteAddr, SharedMemoryName) = std::move(*Result); 184 185 void *LocalAddr = nullptr; 186 187 #if defined(LLVM_ON_UNIX) 188 189 int SharedMemoryFile = shm_open(SharedMemoryName.c_str(), O_RDWR, 0700); 190 if (SharedMemoryFile < 0) { 191 return OnReserved(errorCodeToError( 192 std::error_code(errno, std::generic_category()))); 193 } 194 195 // this prevents other processes from accessing it by name 196 shm_unlink(SharedMemoryName.c_str()); 197 198 LocalAddr = mmap(nullptr, NumBytes, PROT_READ | PROT_WRITE, MAP_SHARED, 199 SharedMemoryFile, 0); 200 if (LocalAddr == MAP_FAILED) { 201 return OnReserved(errorCodeToError( 202 std::error_code(errno, std::generic_category()))); 203 } 204 205 close(SharedMemoryFile); 206 207 #elif defined(_WIN32) 208 209 std::wstring WideSharedMemoryName(SharedMemoryName.begin(), 210 SharedMemoryName.end()); 211 HANDLE SharedMemoryFile = OpenFileMappingW(FILE_MAP_ALL_ACCESS, FALSE, 212 WideSharedMemoryName.c_str()); 213 if (!SharedMemoryFile) 214 return OnReserved(errorCodeToError(mapWindowsError(GetLastError()))); 215 216 LocalAddr = 217 MapViewOfFile(SharedMemoryFile, FILE_MAP_ALL_ACCESS, 0, 0, 0); 218 if (!LocalAddr) { 219 CloseHandle(SharedMemoryFile); 220 return OnReserved(errorCodeToError(mapWindowsError(GetLastError()))); 221 } 222 223 CloseHandle(SharedMemoryFile); 224 225 #endif 226 { 227 std::lock_guard<std::mutex> Lock(Mutex); 228 Reservations.insert({RemoteAddr, {LocalAddr, NumBytes}}); 229 } 230 231 OnReserved(ExecutorAddrRange(RemoteAddr, NumBytes)); 232 }, 233 SAs.Instance, static_cast<uint64_t>(NumBytes)); 234 235 #else 236 OnReserved(make_error<StringError>( 237 "SharedMemoryMapper is not supported on this platform yet", 238 inconvertibleErrorCode())); 239 #endif 240 } 241 242 char *SharedMemoryMapper::prepare(ExecutorAddr Addr, size_t ContentSize) { 243 auto R = Reservations.upper_bound(Addr); 244 assert(R != Reservations.begin() && "Attempt to prepare unknown range"); 245 R--; 246 247 ExecutorAddrDiff Offset = Addr - R->first; 248 249 return static_cast<char *>(R->second.LocalAddr) + Offset; 250 } 251 252 void SharedMemoryMapper::initialize(MemoryMapper::AllocInfo &AI, 253 OnInitializedFunction OnInitialized) { 254 auto Reservation = Reservations.find(AI.MappingBase); 255 assert(Reservation != Reservations.end() && 256 "Attempt to initialize unreserved range"); 257 258 tpctypes::FinalizeRequest FR; 259 260 AI.Actions.swap(FR.Actions); 261 262 FR.Segments.reserve(AI.Segments.size()); 263 264 for (auto Segment : AI.Segments) { 265 char *Base = 266 static_cast<char *>(Reservation->second.LocalAddr) + Segment.Offset; 267 std::memset(Base + Segment.ContentSize, 0, Segment.ZeroFillSize); 268 269 tpctypes::SegFinalizeRequest SegReq; 270 SegReq.Prot = tpctypes::toWireProtectionFlags( 271 static_cast<sys::Memory::ProtectionFlags>(Segment.Prot)); 272 SegReq.Addr = AI.MappingBase + Segment.Offset; 273 SegReq.Size = Segment.ContentSize + Segment.ZeroFillSize; 274 275 FR.Segments.push_back(SegReq); 276 } 277 278 EPC.callSPSWrapperAsync< 279 rt::SPSExecutorSharedMemoryMapperServiceInitializeSignature>( 280 SAs.Initialize, 281 [OnInitialized = std::move(OnInitialized)]( 282 Error SerializationErr, Expected<ExecutorAddr> Result) mutable { 283 if (SerializationErr) { 284 cantFail(Result.takeError()); 285 return OnInitialized(std::move(SerializationErr)); 286 } 287 288 OnInitialized(std::move(Result)); 289 }, 290 SAs.Instance, AI.MappingBase, std::move(FR)); 291 } 292 293 void SharedMemoryMapper::deinitialize( 294 ArrayRef<ExecutorAddr> Allocations, 295 MemoryMapper::OnDeinitializedFunction OnDeinitialized) { 296 EPC.callSPSWrapperAsync< 297 rt::SPSExecutorSharedMemoryMapperServiceDeinitializeSignature>( 298 SAs.Deinitialize, 299 [OnDeinitialized = std::move(OnDeinitialized)](Error SerializationErr, 300 Error Result) mutable { 301 if (SerializationErr) { 302 cantFail(std::move(Result)); 303 return OnDeinitialized(std::move(SerializationErr)); 304 } 305 306 OnDeinitialized(std::move(Result)); 307 }, 308 SAs.Instance, Allocations); 309 } 310 311 void SharedMemoryMapper::release(ArrayRef<ExecutorAddr> Bases, 312 OnReleasedFunction OnReleased) { 313 #if defined(LLVM_ON_UNIX) || defined(_WIN32) 314 Error Err = Error::success(); 315 316 { 317 std::lock_guard<std::mutex> Lock(Mutex); 318 319 for (auto Base : Bases) { 320 321 #if defined(LLVM_ON_UNIX) 322 323 if (munmap(Reservations[Base].LocalAddr, Reservations[Base].Size) != 0) 324 Err = joinErrors(std::move(Err), errorCodeToError(std::error_code( 325 errno, std::generic_category()))); 326 327 #elif defined(_WIN32) 328 329 if (!UnmapViewOfFile(Reservations[Base].LocalAddr)) 330 joinErrors(std::move(Err), 331 errorCodeToError(mapWindowsError(GetLastError()))); 332 333 #endif 334 335 Reservations.erase(Base); 336 } 337 } 338 339 EPC.callSPSWrapperAsync< 340 rt::SPSExecutorSharedMemoryMapperServiceReleaseSignature>( 341 SAs.Release, 342 [OnReleased = std::move(OnReleased), 343 Err = std::move(Err)](Error SerializationErr, Error Result) mutable { 344 if (SerializationErr) { 345 cantFail(std::move(Result)); 346 return OnReleased( 347 joinErrors(std::move(Err), std::move(SerializationErr))); 348 } 349 350 return OnReleased(joinErrors(std::move(Err), std::move(Result))); 351 }, 352 SAs.Instance, Bases); 353 #else 354 OnReleased(make_error<StringError>( 355 "SharedMemoryMapper is not supported on this platform yet", 356 inconvertibleErrorCode())); 357 #endif 358 } 359 360 SharedMemoryMapper::~SharedMemoryMapper() { 361 std::vector<ExecutorAddr> ReservationAddrs; 362 if (!Reservations.empty()) { 363 std::lock_guard<std::mutex> Lock(Mutex); 364 { 365 ReservationAddrs.reserve(Reservations.size()); 366 for (const auto &R : Reservations) { 367 ReservationAddrs.push_back(R.first); 368 } 369 } 370 } 371 372 std::promise<MSVCPError> P; 373 auto F = P.get_future(); 374 release(ReservationAddrs, [&](Error Err) { P.set_value(std::move(Err)); }); 375 // FIXME: Release can actually fail. The error should be propagated. 376 // Meanwhile, a better option is to explicitly call release(). 377 cantFail(F.get()); 378 } 379 380 } // namespace orc 381 382 } // namespace llvm 383