1 //===- LoopLoadElimination.cpp - Loop Load Elimination Pass ---------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implement a loop-aware load elimination pass. 11 // 12 // It uses LoopAccessAnalysis to identify loop-carried dependences with a 13 // distance of one between stores and loads. These form the candidates for the 14 // transformation. The source value of each store then propagated to the user 15 // of the corresponding load. This makes the load dead. 16 // 17 // The pass can also version the loop and add memchecks in order to prove that 18 // may-aliasing stores can't change the value in memory before it's read by the 19 // load. 20 // 21 //===----------------------------------------------------------------------===// 22 23 #include "llvm/ADT/Statistic.h" 24 #include "llvm/Analysis/LoopAccessAnalysis.h" 25 #include "llvm/Analysis/LoopInfo.h" 26 #include "llvm/Analysis/ScalarEvolutionExpander.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/Module.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Transforms/Utils/LoopVersioning.h" 32 #include <forward_list> 33 34 #define LLE_OPTION "loop-load-elim" 35 #define DEBUG_TYPE LLE_OPTION 36 37 using namespace llvm; 38 39 static cl::opt<unsigned> CheckPerElim( 40 "runtime-check-per-loop-load-elim", cl::Hidden, 41 cl::desc("Max number of memchecks allowed per eliminated load on average"), 42 cl::init(1)); 43 44 static cl::opt<unsigned> LoadElimSCEVCheckThreshold( 45 "loop-load-elimination-scev-check-threshold", cl::init(8), cl::Hidden, 46 cl::desc("The maximum number of SCEV checks allowed for Loop " 47 "Load Elimination")); 48 49 50 STATISTIC(NumLoopLoadEliminted, "Number of loads eliminated by LLE"); 51 52 namespace { 53 54 /// \brief Represent a store-to-forwarding candidate. 55 struct StoreToLoadForwardingCandidate { 56 LoadInst *Load; 57 StoreInst *Store; 58 59 StoreToLoadForwardingCandidate(LoadInst *Load, StoreInst *Store) 60 : Load(Load), Store(Store) {} 61 62 /// \brief Return true if the dependence from the store to the load has a 63 /// distance of one. E.g. A[i+1] = A[i] 64 bool isDependenceDistanceOfOne(PredicatedScalarEvolution &PSE, 65 Loop *L) const { 66 Value *LoadPtr = Load->getPointerOperand(); 67 Value *StorePtr = Store->getPointerOperand(); 68 Type *LoadPtrType = LoadPtr->getType(); 69 Type *LoadType = LoadPtrType->getPointerElementType(); 70 71 assert(LoadPtrType->getPointerAddressSpace() == 72 StorePtr->getType()->getPointerAddressSpace() && 73 LoadType == StorePtr->getType()->getPointerElementType() && 74 "Should be a known dependence"); 75 76 // Currently we only support accesses with unit stride. FIXME: we should be 77 // able to handle non unit stirde as well as long as the stride is equal to 78 // the dependence distance. 79 if (isStridedPtr(PSE, LoadPtr, L) != 1 || 80 isStridedPtr(PSE, LoadPtr, L) != 1) 81 return false; 82 83 auto &DL = Load->getParent()->getModule()->getDataLayout(); 84 unsigned TypeByteSize = DL.getTypeAllocSize(const_cast<Type *>(LoadType)); 85 86 auto *LoadPtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(LoadPtr)); 87 auto *StorePtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(StorePtr)); 88 89 // We don't need to check non-wrapping here because forward/backward 90 // dependence wouldn't be valid if these weren't monotonic accesses. 91 auto *Dist = cast<SCEVConstant>( 92 PSE.getSE()->getMinusSCEV(StorePtrSCEV, LoadPtrSCEV)); 93 const APInt &Val = Dist->getAPInt(); 94 return Val == TypeByteSize; 95 } 96 97 Value *getLoadPtr() const { return Load->getPointerOperand(); } 98 99 #ifndef NDEBUG 100 friend raw_ostream &operator<<(raw_ostream &OS, 101 const StoreToLoadForwardingCandidate &Cand) { 102 OS << *Cand.Store << " -->\n"; 103 OS.indent(2) << *Cand.Load << "\n"; 104 return OS; 105 } 106 #endif 107 }; 108 109 /// \brief Check if the store dominates all latches, so as long as there is no 110 /// intervening store this value will be loaded in the next iteration. 111 bool doesStoreDominatesAllLatches(BasicBlock *StoreBlock, Loop *L, 112 DominatorTree *DT) { 113 SmallVector<BasicBlock *, 8> Latches; 114 L->getLoopLatches(Latches); 115 return std::all_of(Latches.begin(), Latches.end(), 116 [&](const BasicBlock *Latch) { 117 return DT->dominates(StoreBlock, Latch); 118 }); 119 } 120 121 /// \brief The per-loop class that does most of the work. 122 class LoadEliminationForLoop { 123 public: 124 LoadEliminationForLoop(Loop *L, LoopInfo *LI, const LoopAccessInfo &LAI, 125 DominatorTree *DT) 126 : L(L), LI(LI), LAI(LAI), DT(DT), PSE(LAI.PSE) {} 127 128 /// \brief Look through the loop-carried and loop-independent dependences in 129 /// this loop and find store->load dependences. 130 /// 131 /// Note that no candidate is returned if LAA has failed to analyze the loop 132 /// (e.g. if it's not bottom-tested, contains volatile memops, etc.) 133 std::forward_list<StoreToLoadForwardingCandidate> 134 findStoreToLoadDependences(const LoopAccessInfo &LAI) { 135 std::forward_list<StoreToLoadForwardingCandidate> Candidates; 136 137 const auto *Deps = LAI.getDepChecker().getDependences(); 138 if (!Deps) 139 return Candidates; 140 141 // Find store->load dependences (consequently true dep). Both lexically 142 // forward and backward dependences qualify. Disqualify loads that have 143 // other unknown dependences. 144 145 SmallSet<Instruction *, 4> LoadsWithUnknownDepedence; 146 147 for (const auto &Dep : *Deps) { 148 Instruction *Source = Dep.getSource(LAI); 149 Instruction *Destination = Dep.getDestination(LAI); 150 151 if (Dep.Type == MemoryDepChecker::Dependence::Unknown) { 152 if (isa<LoadInst>(Source)) 153 LoadsWithUnknownDepedence.insert(Source); 154 if (isa<LoadInst>(Destination)) 155 LoadsWithUnknownDepedence.insert(Destination); 156 continue; 157 } 158 159 if (Dep.isBackward()) 160 // Note that the designations source and destination follow the program 161 // order, i.e. source is always first. (The direction is given by the 162 // DepType.) 163 std::swap(Source, Destination); 164 else 165 assert(Dep.isForward() && "Needs to be a forward dependence"); 166 167 auto *Store = dyn_cast<StoreInst>(Source); 168 if (!Store) 169 continue; 170 auto *Load = dyn_cast<LoadInst>(Destination); 171 if (!Load) 172 continue; 173 Candidates.emplace_front(Load, Store); 174 } 175 176 if (!LoadsWithUnknownDepedence.empty()) 177 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &C) { 178 return LoadsWithUnknownDepedence.count(C.Load); 179 }); 180 181 return Candidates; 182 } 183 184 /// \brief Return the index of the instruction according to program order. 185 unsigned getInstrIndex(Instruction *Inst) { 186 auto I = InstOrder.find(Inst); 187 assert(I != InstOrder.end() && "No index for instruction"); 188 return I->second; 189 } 190 191 /// \brief If a load has multiple candidates associated (i.e. different 192 /// stores), it means that it could be forwarding from multiple stores 193 /// depending on control flow. Remove these candidates. 194 /// 195 /// Here, we rely on LAA to include the relevant loop-independent dependences. 196 /// LAA is known to omit these in the very simple case when the read and the 197 /// write within an alias set always takes place using the *same* pointer. 198 /// 199 /// However, we know that this is not the case here, i.e. we can rely on LAA 200 /// to provide us with loop-independent dependences for the cases we're 201 /// interested. Consider the case for example where a loop-independent 202 /// dependece S1->S2 invalidates the forwarding S3->S2. 203 /// 204 /// A[i] = ... (S1) 205 /// ... = A[i] (S2) 206 /// A[i+1] = ... (S3) 207 /// 208 /// LAA will perform dependence analysis here because there are two 209 /// *different* pointers involved in the same alias set (&A[i] and &A[i+1]). 210 void removeDependencesFromMultipleStores( 211 std::forward_list<StoreToLoadForwardingCandidate> &Candidates) { 212 // If Store is nullptr it means that we have multiple stores forwarding to 213 // this store. 214 typedef DenseMap<LoadInst *, const StoreToLoadForwardingCandidate *> 215 LoadToSingleCandT; 216 LoadToSingleCandT LoadToSingleCand; 217 218 for (const auto &Cand : Candidates) { 219 bool NewElt; 220 LoadToSingleCandT::iterator Iter; 221 222 std::tie(Iter, NewElt) = 223 LoadToSingleCand.insert(std::make_pair(Cand.Load, &Cand)); 224 if (!NewElt) { 225 const StoreToLoadForwardingCandidate *&OtherCand = Iter->second; 226 // Already multiple stores forward to this load. 227 if (OtherCand == nullptr) 228 continue; 229 230 // Handle the very basic case when the two stores are in the same block 231 // so deciding which one forwards is easy. The later one forwards as 232 // long as they both have a dependence distance of one to the load. 233 if (Cand.Store->getParent() == OtherCand->Store->getParent() && 234 Cand.isDependenceDistanceOfOne(PSE, L) && 235 OtherCand->isDependenceDistanceOfOne(PSE, L)) { 236 // They are in the same block, the later one will forward to the load. 237 if (getInstrIndex(OtherCand->Store) < getInstrIndex(Cand.Store)) 238 OtherCand = &Cand; 239 } else 240 OtherCand = nullptr; 241 } 242 } 243 244 Candidates.remove_if([&](const StoreToLoadForwardingCandidate &Cand) { 245 if (LoadToSingleCand[Cand.Load] != &Cand) { 246 DEBUG(dbgs() << "Removing from candidates: \n" << Cand 247 << " The load may have multiple stores forwarding to " 248 << "it\n"); 249 return true; 250 } 251 return false; 252 }); 253 } 254 255 /// \brief Given two pointers operations by their RuntimePointerChecking 256 /// indices, return true if they require an alias check. 257 /// 258 /// We need a check if one is a pointer for a candidate load and the other is 259 /// a pointer for a possibly intervening store. 260 bool needsChecking(unsigned PtrIdx1, unsigned PtrIdx2, 261 const SmallSet<Value *, 4> &PtrsWrittenOnFwdingPath, 262 const std::set<Value *> &CandLoadPtrs) { 263 Value *Ptr1 = 264 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx1).PointerValue; 265 Value *Ptr2 = 266 LAI.getRuntimePointerChecking()->getPointerInfo(PtrIdx2).PointerValue; 267 return ((PtrsWrittenOnFwdingPath.count(Ptr1) && CandLoadPtrs.count(Ptr2)) || 268 (PtrsWrittenOnFwdingPath.count(Ptr2) && CandLoadPtrs.count(Ptr1))); 269 } 270 271 /// \brief Return pointers that are possibly written to on the path from a 272 /// forwarding store to a load. 273 /// 274 /// These pointers need to be alias-checked against the forwarding candidates. 275 SmallSet<Value *, 4> findPointersWrittenOnForwardingPath( 276 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) { 277 // From FirstStore to LastLoad neither of the elimination candidate loads 278 // should overlap with any of the stores. 279 // 280 // E.g.: 281 // 282 // st1 C[i] 283 // ld1 B[i] <-------, 284 // ld0 A[i] <----, | * LastLoad 285 // ... | | 286 // st2 E[i] | | 287 // st3 B[i+1] -- | -' * FirstStore 288 // st0 A[i+1] ---' 289 // st4 D[i] 290 // 291 // st0 forwards to ld0 if the accesses in st4 and st1 don't overlap with 292 // ld0. 293 294 LoadInst *LastLoad = 295 std::max_element(Candidates.begin(), Candidates.end(), 296 [&](const StoreToLoadForwardingCandidate &A, 297 const StoreToLoadForwardingCandidate &B) { 298 return getInstrIndex(A.Load) < getInstrIndex(B.Load); 299 }) 300 ->Load; 301 StoreInst *FirstStore = 302 std::min_element(Candidates.begin(), Candidates.end(), 303 [&](const StoreToLoadForwardingCandidate &A, 304 const StoreToLoadForwardingCandidate &B) { 305 return getInstrIndex(A.Store) < 306 getInstrIndex(B.Store); 307 }) 308 ->Store; 309 310 // We're looking for stores after the first forwarding store until the end 311 // of the loop, then from the beginning of the loop until the last 312 // forwarded-to load. Collect the pointer for the stores. 313 SmallSet<Value *, 4> PtrsWrittenOnFwdingPath; 314 315 auto InsertStorePtr = [&](Instruction *I) { 316 if (auto *S = dyn_cast<StoreInst>(I)) 317 PtrsWrittenOnFwdingPath.insert(S->getPointerOperand()); 318 }; 319 const auto &MemInstrs = LAI.getDepChecker().getMemoryInstructions(); 320 std::for_each(MemInstrs.begin() + getInstrIndex(FirstStore) + 1, 321 MemInstrs.end(), InsertStorePtr); 322 std::for_each(MemInstrs.begin(), &MemInstrs[getInstrIndex(LastLoad)], 323 InsertStorePtr); 324 325 return PtrsWrittenOnFwdingPath; 326 } 327 328 /// \brief Determine the pointer alias checks to prove that there are no 329 /// intervening stores. 330 SmallVector<RuntimePointerChecking::PointerCheck, 4> collectMemchecks( 331 const SmallVectorImpl<StoreToLoadForwardingCandidate> &Candidates) { 332 333 SmallSet<Value *, 4> PtrsWrittenOnFwdingPath = 334 findPointersWrittenOnForwardingPath(Candidates); 335 336 // Collect the pointers of the candidate loads. 337 // FIXME: SmallSet does not work with std::inserter. 338 std::set<Value *> CandLoadPtrs; 339 std::transform(Candidates.begin(), Candidates.end(), 340 std::inserter(CandLoadPtrs, CandLoadPtrs.begin()), 341 std::mem_fn(&StoreToLoadForwardingCandidate::getLoadPtr)); 342 343 const auto &AllChecks = LAI.getRuntimePointerChecking()->getChecks(); 344 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks; 345 346 std::copy_if(AllChecks.begin(), AllChecks.end(), std::back_inserter(Checks), 347 [&](const RuntimePointerChecking::PointerCheck &Check) { 348 for (auto PtrIdx1 : Check.first->Members) 349 for (auto PtrIdx2 : Check.second->Members) 350 if (needsChecking(PtrIdx1, PtrIdx2, 351 PtrsWrittenOnFwdingPath, CandLoadPtrs)) 352 return true; 353 return false; 354 }); 355 356 DEBUG(dbgs() << "\nPointer Checks (count: " << Checks.size() << "):\n"); 357 DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks)); 358 359 return Checks; 360 } 361 362 /// \brief Perform the transformation for a candidate. 363 void 364 propagateStoredValueToLoadUsers(const StoreToLoadForwardingCandidate &Cand, 365 SCEVExpander &SEE) { 366 // 367 // loop: 368 // %x = load %gep_i 369 // = ... %x 370 // store %y, %gep_i_plus_1 371 // 372 // => 373 // 374 // ph: 375 // %x.initial = load %gep_0 376 // loop: 377 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop] 378 // %x = load %gep_i <---- now dead 379 // = ... %x.storeforward 380 // store %y, %gep_i_plus_1 381 382 Value *Ptr = Cand.Load->getPointerOperand(); 383 auto *PtrSCEV = cast<SCEVAddRecExpr>(PSE.getSCEV(Ptr)); 384 auto *PH = L->getLoopPreheader(); 385 Value *InitialPtr = SEE.expandCodeFor(PtrSCEV->getStart(), Ptr->getType(), 386 PH->getTerminator()); 387 Value *Initial = 388 new LoadInst(InitialPtr, "load_initial", PH->getTerminator()); 389 PHINode *PHI = PHINode::Create(Initial->getType(), 2, "store_forwarded", 390 &L->getHeader()->front()); 391 PHI->addIncoming(Initial, PH); 392 PHI->addIncoming(Cand.Store->getOperand(0), L->getLoopLatch()); 393 394 Cand.Load->replaceAllUsesWith(PHI); 395 } 396 397 /// \brief Top-level driver for each loop: find store->load forwarding 398 /// candidates, add run-time checks and perform transformation. 399 bool processLoop() { 400 DEBUG(dbgs() << "\nIn \"" << L->getHeader()->getParent()->getName() 401 << "\" checking " << *L << "\n"); 402 // Look for store-to-load forwarding cases across the 403 // backedge. E.g.: 404 // 405 // loop: 406 // %x = load %gep_i 407 // = ... %x 408 // store %y, %gep_i_plus_1 409 // 410 // => 411 // 412 // ph: 413 // %x.initial = load %gep_0 414 // loop: 415 // %x.storeforward = phi [%x.initial, %ph] [%y, %loop] 416 // %x = load %gep_i <---- now dead 417 // = ... %x.storeforward 418 // store %y, %gep_i_plus_1 419 420 // First start with store->load dependences. 421 auto StoreToLoadDependences = findStoreToLoadDependences(LAI); 422 if (StoreToLoadDependences.empty()) 423 return false; 424 425 // Generate an index for each load and store according to the original 426 // program order. This will be used later. 427 InstOrder = LAI.getDepChecker().generateInstructionOrderMap(); 428 429 // To keep things simple for now, remove those where the load is potentially 430 // fed by multiple stores. 431 removeDependencesFromMultipleStores(StoreToLoadDependences); 432 if (StoreToLoadDependences.empty()) 433 return false; 434 435 // Filter the candidates further. 436 SmallVector<StoreToLoadForwardingCandidate, 4> Candidates; 437 unsigned NumForwarding = 0; 438 for (const StoreToLoadForwardingCandidate Cand : StoreToLoadDependences) { 439 DEBUG(dbgs() << "Candidate " << Cand); 440 // Only progagate value if they are of the same type. 441 if (Cand.Store->getPointerOperand()->getType() != 442 Cand.Load->getPointerOperand()->getType()) 443 continue; 444 445 // Make sure that the stored values is available everywhere in the loop in 446 // the next iteration. 447 if (!doesStoreDominatesAllLatches(Cand.Store->getParent(), L, DT)) 448 continue; 449 450 // Check whether the SCEV difference is the same as the induction step, 451 // thus we load the value in the next iteration. 452 if (!Cand.isDependenceDistanceOfOne(PSE, L)) 453 continue; 454 455 ++NumForwarding; 456 DEBUG(dbgs() 457 << NumForwarding 458 << ". Valid store-to-load forwarding across the loop backedge\n"); 459 Candidates.push_back(Cand); 460 } 461 if (Candidates.empty()) 462 return false; 463 464 // Check intervening may-alias stores. These need runtime checks for alias 465 // disambiguation. 466 SmallVector<RuntimePointerChecking::PointerCheck, 4> Checks = 467 collectMemchecks(Candidates); 468 469 // Too many checks are likely to outweigh the benefits of forwarding. 470 if (Checks.size() > Candidates.size() * CheckPerElim) { 471 DEBUG(dbgs() << "Too many run-time checks needed.\n"); 472 return false; 473 } 474 475 if (LAI.PSE.getUnionPredicate().getComplexity() > 476 LoadElimSCEVCheckThreshold) { 477 DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n"); 478 return false; 479 } 480 481 if (!Checks.empty() || !LAI.PSE.getUnionPredicate().isAlwaysTrue()) { 482 if (L->getHeader()->getParent()->optForSize()) { 483 DEBUG(dbgs() << "Versioning is needed but not allowed when optimizing " 484 "for size.\n"); 485 return false; 486 } 487 488 // Point of no-return, start the transformation. First, version the loop 489 // if necessary. 490 491 LoopVersioning LV(LAI, L, LI, DT, PSE.getSE(), false); 492 LV.setAliasChecks(std::move(Checks)); 493 LV.setSCEVChecks(LAI.PSE.getUnionPredicate()); 494 LV.versionLoop(); 495 } 496 497 // Next, propagate the value stored by the store to the users of the load. 498 // Also for the first iteration, generate the initial value of the load. 499 SCEVExpander SEE(*PSE.getSE(), L->getHeader()->getModule()->getDataLayout(), 500 "storeforward"); 501 for (const auto &Cand : Candidates) 502 propagateStoredValueToLoadUsers(Cand, SEE); 503 NumLoopLoadEliminted += NumForwarding; 504 505 return true; 506 } 507 508 private: 509 Loop *L; 510 511 /// \brief Maps the load/store instructions to their index according to 512 /// program order. 513 DenseMap<Instruction *, unsigned> InstOrder; 514 515 // Analyses used. 516 LoopInfo *LI; 517 const LoopAccessInfo &LAI; 518 DominatorTree *DT; 519 PredicatedScalarEvolution PSE; 520 }; 521 522 /// \brief The pass. Most of the work is delegated to the per-loop 523 /// LoadEliminationForLoop class. 524 class LoopLoadElimination : public FunctionPass { 525 public: 526 LoopLoadElimination() : FunctionPass(ID) { 527 initializeLoopLoadEliminationPass(*PassRegistry::getPassRegistry()); 528 } 529 530 bool runOnFunction(Function &F) override { 531 auto *LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(); 532 auto *LAA = &getAnalysis<LoopAccessAnalysis>(); 533 auto *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 534 535 // Build up a worklist of inner-loops to vectorize. This is necessary as the 536 // act of distributing a loop creates new loops and can invalidate iterators 537 // across the loops. 538 SmallVector<Loop *, 8> Worklist; 539 540 for (Loop *TopLevelLoop : *LI) 541 for (Loop *L : depth_first(TopLevelLoop)) 542 // We only handle inner-most loops. 543 if (L->empty()) 544 Worklist.push_back(L); 545 546 // Now walk the identified inner loops. 547 bool Changed = false; 548 for (Loop *L : Worklist) { 549 const LoopAccessInfo &LAI = LAA->getInfo(L, ValueToValueMap()); 550 // The actual work is performed by LoadEliminationForLoop. 551 LoadEliminationForLoop LEL(L, LI, LAI, DT); 552 Changed |= LEL.processLoop(); 553 } 554 555 // Process each loop nest in the function. 556 return Changed; 557 } 558 559 void getAnalysisUsage(AnalysisUsage &AU) const override { 560 AU.addRequired<LoopInfoWrapperPass>(); 561 AU.addPreserved<LoopInfoWrapperPass>(); 562 AU.addRequired<LoopAccessAnalysis>(); 563 AU.addRequired<ScalarEvolutionWrapperPass>(); 564 AU.addRequired<DominatorTreeWrapperPass>(); 565 AU.addPreserved<DominatorTreeWrapperPass>(); 566 } 567 568 static char ID; 569 }; 570 } 571 572 char LoopLoadElimination::ID; 573 static const char LLE_name[] = "Loop Load Elimination"; 574 575 INITIALIZE_PASS_BEGIN(LoopLoadElimination, LLE_OPTION, LLE_name, false, false) 576 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass) 577 INITIALIZE_PASS_DEPENDENCY(LoopAccessAnalysis) 578 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 579 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass) 580 INITIALIZE_PASS_END(LoopLoadElimination, LLE_OPTION, LLE_name, false, false) 581 582 namespace llvm { 583 FunctionPass *createLoopLoadEliminationPass() { 584 return new LoopLoadElimination(); 585 } 586 } 587