1 //===------- ObjectLinkingLayer.cpp - JITLink backed ORC ObjectLayer ------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "llvm/ExecutionEngine/Orc/ObjectLinkingLayer.h" 10 11 #include "llvm/ADT/Optional.h" 12 #include "llvm/ExecutionEngine/JITLink/EHFrameSupport.h" 13 14 #include <vector> 15 16 #define DEBUG_TYPE "orc" 17 18 using namespace llvm; 19 using namespace llvm::jitlink; 20 using namespace llvm::orc; 21 22 namespace llvm { 23 namespace orc { 24 25 class ObjectLinkingLayerJITLinkContext final : public JITLinkContext { 26 public: 27 ObjectLinkingLayerJITLinkContext(ObjectLinkingLayer &Layer, 28 MaterializationResponsibility MR, 29 std::unique_ptr<MemoryBuffer> ObjBuffer) 30 : Layer(Layer), MR(std::move(MR)), ObjBuffer(std::move(ObjBuffer)) {} 31 32 JITLinkMemoryManager &getMemoryManager() override { return Layer.MemMgr; } 33 34 MemoryBufferRef getObjectBuffer() const override { 35 return ObjBuffer->getMemBufferRef(); 36 } 37 38 void notifyFailed(Error Err) override { 39 Layer.getExecutionSession().reportError(std::move(Err)); 40 MR.failMaterialization(); 41 } 42 43 void lookup(const DenseSet<StringRef> &Symbols, 44 JITLinkAsyncLookupContinuation LookupContinuation) override { 45 46 JITDylibSearchList SearchOrder; 47 MR.getTargetJITDylib().withSearchOrderDo( 48 [&](const JITDylibSearchList &JDs) { SearchOrder = JDs; }); 49 50 auto &ES = Layer.getExecutionSession(); 51 52 SymbolNameSet InternedSymbols; 53 for (auto &S : Symbols) 54 InternedSymbols.insert(ES.intern(S)); 55 56 // OnResolve -- De-intern the symbols and pass the result to the linker. 57 // FIXME: Capture LookupContinuation by move once we have c++14. 58 auto SharedLookupContinuation = 59 std::make_shared<JITLinkAsyncLookupContinuation>( 60 std::move(LookupContinuation)); 61 auto OnResolve = [this, SharedLookupContinuation](Expected<SymbolMap> Result) { 62 auto Main = Layer.getExecutionSession().intern("_main"); 63 if (!Result) 64 (*SharedLookupContinuation)(Result.takeError()); 65 else { 66 AsyncLookupResult LR; 67 for (auto &KV : *Result) 68 LR[*KV.first] = KV.second; 69 (*SharedLookupContinuation)(std::move(LR)); 70 } 71 }; 72 73 ES.lookup(SearchOrder, std::move(InternedSymbols), SymbolState::Resolved, 74 std::move(OnResolve), [this](const SymbolDependenceMap &Deps) { 75 registerDependencies(Deps); 76 }); 77 } 78 79 void notifyResolved(AtomGraph &G) override { 80 auto &ES = Layer.getExecutionSession(); 81 82 SymbolFlagsMap ExtraSymbolsToClaim; 83 bool AutoClaim = Layer.AutoClaimObjectSymbols; 84 85 SymbolMap InternedResult; 86 for (auto *DA : G.defined_atoms()) 87 if (DA->hasName() && DA->isGlobal()) { 88 auto InternedName = ES.intern(DA->getName()); 89 JITSymbolFlags Flags; 90 91 if (DA->isExported()) 92 Flags |= JITSymbolFlags::Exported; 93 if (DA->isWeak()) 94 Flags |= JITSymbolFlags::Weak; 95 if (DA->isCallable()) 96 Flags |= JITSymbolFlags::Callable; 97 if (DA->isCommon()) 98 Flags |= JITSymbolFlags::Common; 99 100 InternedResult[InternedName] = 101 JITEvaluatedSymbol(DA->getAddress(), Flags); 102 if (AutoClaim && !MR.getSymbols().count(InternedName)) { 103 assert(!ExtraSymbolsToClaim.count(InternedName) && 104 "Duplicate symbol to claim?"); 105 ExtraSymbolsToClaim[InternedName] = Flags; 106 } 107 } 108 109 for (auto *A : G.absolute_atoms()) 110 if (A->hasName()) { 111 auto InternedName = ES.intern(A->getName()); 112 JITSymbolFlags Flags; 113 Flags |= JITSymbolFlags::Absolute; 114 if (A->isWeak()) 115 Flags |= JITSymbolFlags::Weak; 116 if (A->isCallable()) 117 Flags |= JITSymbolFlags::Callable; 118 InternedResult[InternedName] = 119 JITEvaluatedSymbol(A->getAddress(), Flags); 120 if (AutoClaim && !MR.getSymbols().count(InternedName)) { 121 assert(!ExtraSymbolsToClaim.count(InternedName) && 122 "Duplicate symbol to claim?"); 123 ExtraSymbolsToClaim[InternedName] = Flags; 124 } 125 } 126 127 if (!ExtraSymbolsToClaim.empty()) 128 if (auto Err = MR.defineMaterializing(ExtraSymbolsToClaim)) 129 return notifyFailed(std::move(Err)); 130 if (auto Err = MR.notifyResolved(InternedResult)) { 131 Layer.getExecutionSession().reportError(std::move(Err)); 132 MR.failMaterialization(); 133 return; 134 } 135 Layer.notifyLoaded(MR); 136 } 137 138 void notifyFinalized( 139 std::unique_ptr<JITLinkMemoryManager::Allocation> A) override { 140 if (auto Err = Layer.notifyEmitted(MR, std::move(A))) { 141 Layer.getExecutionSession().reportError(std::move(Err)); 142 MR.failMaterialization(); 143 return; 144 } 145 if (auto Err = MR.notifyEmitted()) { 146 Layer.getExecutionSession().reportError(std::move(Err)); 147 MR.failMaterialization(); 148 } 149 } 150 151 AtomGraphPassFunction getMarkLivePass(const Triple &TT) const override { 152 return [this](AtomGraph &G) { return markResponsibilitySymbolsLive(G); }; 153 } 154 155 Error modifyPassConfig(const Triple &TT, PassConfiguration &Config) override { 156 // Add passes to mark duplicate defs as should-discard, and to walk the 157 // atom graph to build the symbol dependence graph. 158 Config.PrePrunePasses.push_back( 159 [this](AtomGraph &G) { return markSymbolsToDiscard(G); }); 160 Config.PostPrunePasses.push_back( 161 [this](AtomGraph &G) { return computeNamedSymbolDependencies(G); }); 162 163 Layer.modifyPassConfig(MR, TT, Config); 164 165 return Error::success(); 166 } 167 168 private: 169 using AnonAtomNamedDependenciesMap = 170 DenseMap<const DefinedAtom *, SymbolNameSet>; 171 172 Error markSymbolsToDiscard(AtomGraph &G) { 173 auto &ES = Layer.getExecutionSession(); 174 for (auto *DA : G.defined_atoms()) 175 if (DA->isWeak() && DA->hasName()) { 176 auto S = ES.intern(DA->getName()); 177 auto I = MR.getSymbols().find(S); 178 if (I == MR.getSymbols().end()) 179 DA->setShouldDiscard(true); 180 } 181 182 for (auto *A : G.absolute_atoms()) 183 if (A->isWeak() && A->hasName()) { 184 auto S = ES.intern(A->getName()); 185 auto I = MR.getSymbols().find(S); 186 if (I == MR.getSymbols().end()) 187 A->setShouldDiscard(true); 188 } 189 190 return Error::success(); 191 } 192 193 Error markResponsibilitySymbolsLive(AtomGraph &G) const { 194 auto &ES = Layer.getExecutionSession(); 195 for (auto *DA : G.defined_atoms()) 196 if (DA->hasName() && 197 MR.getSymbols().count(ES.intern(DA->getName()))) 198 DA->setLive(true); 199 return Error::success(); 200 } 201 202 Error computeNamedSymbolDependencies(AtomGraph &G) { 203 auto &ES = MR.getTargetJITDylib().getExecutionSession(); 204 auto AnonDeps = computeAnonDeps(G); 205 206 for (auto *DA : G.defined_atoms()) { 207 208 // Skip anonymous and non-global atoms: we do not need dependencies for 209 // these. 210 if (!DA->hasName() || !DA->isGlobal()) 211 continue; 212 213 auto DAName = ES.intern(DA->getName()); 214 SymbolNameSet &DADeps = NamedSymbolDeps[DAName]; 215 216 for (auto &E : DA->edges()) { 217 auto &TA = E.getTarget(); 218 219 if (TA.hasName()) 220 DADeps.insert(ES.intern(TA.getName())); 221 else { 222 assert(TA.isDefined() && "Anonymous atoms must be defined"); 223 auto &DTA = static_cast<DefinedAtom &>(TA); 224 auto I = AnonDeps.find(&DTA); 225 if (I != AnonDeps.end()) 226 for (auto &S : I->second) 227 DADeps.insert(S); 228 } 229 } 230 } 231 232 return Error::success(); 233 } 234 235 AnonAtomNamedDependenciesMap computeAnonDeps(AtomGraph &G) { 236 237 auto &ES = MR.getTargetJITDylib().getExecutionSession(); 238 AnonAtomNamedDependenciesMap DepMap; 239 240 // For all anonymous atoms: 241 // (1) Add their named dependencies. 242 // (2) Add them to the worklist for further iteration if they have any 243 // depend on any other anonymous atoms. 244 struct WorklistEntry { 245 WorklistEntry(DefinedAtom *DA, DenseSet<DefinedAtom *> DAAnonDeps) 246 : DA(DA), DAAnonDeps(std::move(DAAnonDeps)) {} 247 248 DefinedAtom *DA = nullptr; 249 DenseSet<DefinedAtom *> DAAnonDeps; 250 }; 251 std::vector<WorklistEntry> Worklist; 252 for (auto *DA : G.defined_atoms()) 253 if (!DA->hasName()) { 254 auto &DANamedDeps = DepMap[DA]; 255 DenseSet<DefinedAtom *> DAAnonDeps; 256 257 for (auto &E : DA->edges()) { 258 auto &TA = E.getTarget(); 259 if (TA.hasName()) 260 DANamedDeps.insert(ES.intern(TA.getName())); 261 else { 262 assert(TA.isDefined() && "Anonymous atoms must be defined"); 263 DAAnonDeps.insert(static_cast<DefinedAtom *>(&TA)); 264 } 265 } 266 267 if (!DAAnonDeps.empty()) 268 Worklist.push_back(WorklistEntry(DA, std::move(DAAnonDeps))); 269 } 270 271 // Loop over all anonymous atoms with anonymous dependencies, propagating 272 // their respective *named* dependencies. Iterate until we hit a stable 273 // state. 274 bool Changed; 275 do { 276 Changed = false; 277 for (auto &WLEntry : Worklist) { 278 auto *DA = WLEntry.DA; 279 auto &DANamedDeps = DepMap[DA]; 280 auto &DAAnonDeps = WLEntry.DAAnonDeps; 281 282 for (auto *TA : DAAnonDeps) { 283 auto I = DepMap.find(TA); 284 if (I != DepMap.end()) 285 for (const auto &S : I->second) 286 Changed |= DANamedDeps.insert(S).second; 287 } 288 } 289 } while (Changed); 290 291 return DepMap; 292 } 293 294 void registerDependencies(const SymbolDependenceMap &QueryDeps) { 295 for (auto &NamedDepsEntry : NamedSymbolDeps) { 296 auto &Name = NamedDepsEntry.first; 297 auto &NameDeps = NamedDepsEntry.second; 298 SymbolDependenceMap SymbolDeps; 299 300 for (const auto &QueryDepsEntry : QueryDeps) { 301 JITDylib &SourceJD = *QueryDepsEntry.first; 302 const SymbolNameSet &Symbols = QueryDepsEntry.second; 303 auto &DepsForJD = SymbolDeps[&SourceJD]; 304 305 for (const auto &S : Symbols) 306 if (NameDeps.count(S)) 307 DepsForJD.insert(S); 308 309 if (DepsForJD.empty()) 310 SymbolDeps.erase(&SourceJD); 311 } 312 313 MR.addDependencies(Name, SymbolDeps); 314 } 315 } 316 317 ObjectLinkingLayer &Layer; 318 MaterializationResponsibility MR; 319 std::unique_ptr<MemoryBuffer> ObjBuffer; 320 DenseMap<SymbolStringPtr, SymbolNameSet> NamedSymbolDeps; 321 }; 322 323 ObjectLinkingLayer::Plugin::~Plugin() {} 324 325 ObjectLinkingLayer::ObjectLinkingLayer(ExecutionSession &ES, 326 JITLinkMemoryManager &MemMgr) 327 : ObjectLayer(ES), MemMgr(MemMgr) {} 328 329 ObjectLinkingLayer::~ObjectLinkingLayer() { 330 if (auto Err = removeAllModules()) 331 getExecutionSession().reportError(std::move(Err)); 332 } 333 334 void ObjectLinkingLayer::emit(MaterializationResponsibility R, 335 std::unique_ptr<MemoryBuffer> O) { 336 assert(O && "Object must not be null"); 337 jitLink(std::make_unique<ObjectLinkingLayerJITLinkContext>( 338 *this, std::move(R), std::move(O))); 339 } 340 341 void ObjectLinkingLayer::modifyPassConfig(MaterializationResponsibility &MR, 342 const Triple &TT, 343 PassConfiguration &PassConfig) { 344 for (auto &P : Plugins) 345 P->modifyPassConfig(MR, TT, PassConfig); 346 } 347 348 void ObjectLinkingLayer::notifyLoaded(MaterializationResponsibility &MR) { 349 for (auto &P : Plugins) 350 P->notifyLoaded(MR); 351 } 352 353 Error ObjectLinkingLayer::notifyEmitted(MaterializationResponsibility &MR, 354 AllocPtr Alloc) { 355 Error Err = Error::success(); 356 for (auto &P : Plugins) 357 Err = joinErrors(std::move(Err), P->notifyEmitted(MR)); 358 359 if (Err) 360 return Err; 361 362 { 363 std::lock_guard<std::mutex> Lock(LayerMutex); 364 UntrackedAllocs.push_back(std::move(Alloc)); 365 } 366 367 return Error::success(); 368 } 369 370 Error ObjectLinkingLayer::removeModule(VModuleKey K) { 371 Error Err = Error::success(); 372 373 for (auto &P : Plugins) 374 Err = joinErrors(std::move(Err), P->notifyRemovingModule(K)); 375 376 AllocPtr Alloc; 377 378 { 379 std::lock_guard<std::mutex> Lock(LayerMutex); 380 auto AllocItr = TrackedAllocs.find(K); 381 Alloc = std::move(AllocItr->second); 382 TrackedAllocs.erase(AllocItr); 383 } 384 385 assert(Alloc && "No allocation for key K"); 386 387 return joinErrors(std::move(Err), Alloc->deallocate()); 388 } 389 390 Error ObjectLinkingLayer::removeAllModules() { 391 392 Error Err = Error::success(); 393 394 for (auto &P : Plugins) 395 Err = joinErrors(std::move(Err), P->notifyRemovingAllModules()); 396 397 std::vector<AllocPtr> Allocs; 398 { 399 std::lock_guard<std::mutex> Lock(LayerMutex); 400 Allocs = std::move(UntrackedAllocs); 401 402 for (auto &KV : TrackedAllocs) 403 Allocs.push_back(std::move(KV.second)); 404 405 TrackedAllocs.clear(); 406 } 407 408 while (!Allocs.empty()) { 409 Err = joinErrors(std::move(Err), Allocs.back()->deallocate()); 410 Allocs.pop_back(); 411 } 412 413 return Err; 414 } 415 416 EHFrameRegistrationPlugin::EHFrameRegistrationPlugin( 417 jitlink::EHFrameRegistrar &Registrar) 418 : Registrar(Registrar) {} 419 420 void EHFrameRegistrationPlugin::modifyPassConfig( 421 MaterializationResponsibility &MR, const Triple &TT, 422 PassConfiguration &PassConfig) { 423 assert(!InProcessLinks.count(&MR) && "Link for MR already being tracked?"); 424 425 PassConfig.PostFixupPasses.push_back( 426 createEHFrameRecorderPass(TT, [this, &MR](JITTargetAddress Addr, 427 size_t Size) { 428 if (Addr) 429 InProcessLinks[&MR] = { Addr, Size }; 430 })); 431 } 432 433 Error EHFrameRegistrationPlugin::notifyEmitted( 434 MaterializationResponsibility &MR) { 435 436 auto EHFrameRangeItr = InProcessLinks.find(&MR); 437 if (EHFrameRangeItr == InProcessLinks.end()) 438 return Error::success(); 439 440 auto EHFrameRange = EHFrameRangeItr->second; 441 assert(EHFrameRange.Addr && 442 "eh-frame addr to register can not be null"); 443 444 InProcessLinks.erase(EHFrameRangeItr); 445 if (auto Key = MR.getVModuleKey()) 446 TrackedEHFrameRanges[Key] = EHFrameRange; 447 else 448 UntrackedEHFrameRanges.push_back(EHFrameRange); 449 450 return Registrar.registerEHFrames(EHFrameRange.Addr, EHFrameRange.Size); 451 } 452 453 Error EHFrameRegistrationPlugin::notifyRemovingModule(VModuleKey K) { 454 auto EHFrameRangeItr = TrackedEHFrameRanges.find(K); 455 if (EHFrameRangeItr == TrackedEHFrameRanges.end()) 456 return Error::success(); 457 458 auto EHFrameRange = EHFrameRangeItr->second; 459 assert(EHFrameRange.Addr && "Tracked eh-frame range must not be null"); 460 461 TrackedEHFrameRanges.erase(EHFrameRangeItr); 462 463 return Registrar.deregisterEHFrames(EHFrameRange.Addr, EHFrameRange.Size); 464 } 465 466 Error EHFrameRegistrationPlugin::notifyRemovingAllModules() { 467 468 std::vector<EHFrameRange> EHFrameRanges = 469 std::move(UntrackedEHFrameRanges); 470 EHFrameRanges.reserve(EHFrameRanges.size() + TrackedEHFrameRanges.size()); 471 472 for (auto &KV : TrackedEHFrameRanges) 473 EHFrameRanges.push_back(KV.second); 474 475 TrackedEHFrameRanges.clear(); 476 477 Error Err = Error::success(); 478 479 while (!EHFrameRanges.empty()) { 480 auto EHFrameRange = EHFrameRanges.back(); 481 assert(EHFrameRange.Addr && "Untracked eh-frame range must not be null"); 482 EHFrameRanges.pop_back(); 483 Err = joinErrors(std::move(Err), 484 Registrar.deregisterEHFrames(EHFrameRange.Addr, 485 EHFrameRange.Size)); 486 } 487 488 return Err; 489 } 490 491 } // End namespace orc. 492 } // End namespace llvm. 493