1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/EarlyCSE.h" 16 #include "llvm/ADT/DenseMapInfo.h" 17 #include "llvm/ADT/Hashing.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/ScopedHashTable.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemorySSA.h" 27 #include "llvm/Analysis/MemorySSAUpdater.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/PassManager.h" 43 #include "llvm/IR/PatternMatch.h" 44 #include "llvm/IR/Type.h" 45 #include "llvm/IR/Use.h" 46 #include "llvm/IR/Value.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/Allocator.h" 49 #include "llvm/Support/AtomicOrdering.h" 50 #include "llvm/Support/Casting.h" 51 #include "llvm/Support/Debug.h" 52 #include "llvm/Support/RecyclingAllocator.h" 53 #include "llvm/Support/raw_ostream.h" 54 #include "llvm/Transforms/Scalar.h" 55 #include "llvm/Transforms/Utils/Local.h" 56 #include <cassert> 57 #include <deque> 58 #include <memory> 59 #include <utility> 60 61 using namespace llvm; 62 using namespace llvm::PatternMatch; 63 64 #define DEBUG_TYPE "early-cse" 65 66 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 67 STATISTIC(NumCSE, "Number of instructions CSE'd"); 68 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd"); 69 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 70 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 71 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 72 73 //===----------------------------------------------------------------------===// 74 // SimpleValue 75 //===----------------------------------------------------------------------===// 76 77 namespace { 78 79 /// \brief Struct representing the available values in the scoped hash table. 80 struct SimpleValue { 81 Instruction *Inst; 82 83 SimpleValue(Instruction *I) : Inst(I) { 84 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 85 } 86 87 bool isSentinel() const { 88 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 89 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 90 } 91 92 static bool canHandle(Instruction *Inst) { 93 // This can only handle non-void readnone functions. 94 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 95 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 96 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 97 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 98 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 99 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 100 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 101 } 102 }; 103 104 } // end anonymous namespace 105 106 namespace llvm { 107 108 template <> struct DenseMapInfo<SimpleValue> { 109 static inline SimpleValue getEmptyKey() { 110 return DenseMapInfo<Instruction *>::getEmptyKey(); 111 } 112 113 static inline SimpleValue getTombstoneKey() { 114 return DenseMapInfo<Instruction *>::getTombstoneKey(); 115 } 116 117 static unsigned getHashValue(SimpleValue Val); 118 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 119 }; 120 121 } // end namespace llvm 122 123 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 124 Instruction *Inst = Val.Inst; 125 // Hash in all of the operands as pointers. 126 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 127 Value *LHS = BinOp->getOperand(0); 128 Value *RHS = BinOp->getOperand(1); 129 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 130 std::swap(LHS, RHS); 131 132 return hash_combine(BinOp->getOpcode(), LHS, RHS); 133 } 134 135 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 136 Value *LHS = CI->getOperand(0); 137 Value *RHS = CI->getOperand(1); 138 CmpInst::Predicate Pred = CI->getPredicate(); 139 if (Inst->getOperand(0) > Inst->getOperand(1)) { 140 std::swap(LHS, RHS); 141 Pred = CI->getSwappedPredicate(); 142 } 143 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 144 } 145 146 // Hash min/max/abs (cmp + select) to allow for commuted operands. 147 // Min/max may also have non-canonical compare predicate (eg, the compare for 148 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the 149 // compare. 150 Value *A, *B; 151 SelectPatternFlavor SPF = matchSelectPattern(Inst, A, B).Flavor; 152 // TODO: We should also detect FP min/max. 153 if (SPF == SPF_SMIN || SPF == SPF_SMAX || 154 SPF == SPF_UMIN || SPF == SPF_UMAX || 155 SPF == SPF_ABS || SPF == SPF_NABS) { 156 if (A > B) 157 std::swap(A, B); 158 return hash_combine(Inst->getOpcode(), SPF, A, B); 159 } 160 161 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 162 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 163 164 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 165 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 166 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 167 168 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 169 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 170 IVI->getOperand(1), 171 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 172 173 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 174 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 175 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 176 isa<ShuffleVectorInst>(Inst)) && 177 "Invalid/unknown instruction"); 178 179 // Mix in the opcode. 180 return hash_combine( 181 Inst->getOpcode(), 182 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 183 } 184 185 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 186 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 187 188 if (LHS.isSentinel() || RHS.isSentinel()) 189 return LHSI == RHSI; 190 191 if (LHSI->getOpcode() != RHSI->getOpcode()) 192 return false; 193 if (LHSI->isIdenticalToWhenDefined(RHSI)) 194 return true; 195 196 // If we're not strictly identical, we still might be a commutable instruction 197 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 198 if (!LHSBinOp->isCommutative()) 199 return false; 200 201 assert(isa<BinaryOperator>(RHSI) && 202 "same opcode, but different instruction type?"); 203 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 204 205 // Commuted equality 206 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 207 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 208 } 209 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 210 assert(isa<CmpInst>(RHSI) && 211 "same opcode, but different instruction type?"); 212 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 213 // Commuted equality 214 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 215 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 216 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 217 } 218 219 // Min/max/abs can occur with commuted operands, non-canonical predicates, 220 // and/or non-canonical operands. 221 Value *LHSA, *LHSB; 222 SelectPatternFlavor LSPF = matchSelectPattern(LHSI, LHSA, LHSB).Flavor; 223 // TODO: We should also detect FP min/max. 224 if (LSPF == SPF_SMIN || LSPF == SPF_SMAX || 225 LSPF == SPF_UMIN || LSPF == SPF_UMAX || 226 LSPF == SPF_ABS || LSPF == SPF_NABS) { 227 Value *RHSA, *RHSB; 228 SelectPatternFlavor RSPF = matchSelectPattern(RHSI, RHSA, RHSB).Flavor; 229 return (LSPF == RSPF && ((LHSA == RHSA && LHSB == RHSB) || 230 (LHSA == RHSB && LHSB == RHSA))); 231 } 232 233 return false; 234 } 235 236 //===----------------------------------------------------------------------===// 237 // CallValue 238 //===----------------------------------------------------------------------===// 239 240 namespace { 241 242 /// \brief Struct representing the available call values in the scoped hash 243 /// table. 244 struct CallValue { 245 Instruction *Inst; 246 247 CallValue(Instruction *I) : Inst(I) { 248 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 249 } 250 251 bool isSentinel() const { 252 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 253 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 254 } 255 256 static bool canHandle(Instruction *Inst) { 257 // Don't value number anything that returns void. 258 if (Inst->getType()->isVoidTy()) 259 return false; 260 261 CallInst *CI = dyn_cast<CallInst>(Inst); 262 if (!CI || !CI->onlyReadsMemory()) 263 return false; 264 return true; 265 } 266 }; 267 268 } // end anonymous namespace 269 270 namespace llvm { 271 272 template <> struct DenseMapInfo<CallValue> { 273 static inline CallValue getEmptyKey() { 274 return DenseMapInfo<Instruction *>::getEmptyKey(); 275 } 276 277 static inline CallValue getTombstoneKey() { 278 return DenseMapInfo<Instruction *>::getTombstoneKey(); 279 } 280 281 static unsigned getHashValue(CallValue Val); 282 static bool isEqual(CallValue LHS, CallValue RHS); 283 }; 284 285 } // end namespace llvm 286 287 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 288 Instruction *Inst = Val.Inst; 289 // Hash all of the operands as pointers and mix in the opcode. 290 return hash_combine( 291 Inst->getOpcode(), 292 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 293 } 294 295 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 296 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 297 if (LHS.isSentinel() || RHS.isSentinel()) 298 return LHSI == RHSI; 299 return LHSI->isIdenticalTo(RHSI); 300 } 301 302 //===----------------------------------------------------------------------===// 303 // EarlyCSE implementation 304 //===----------------------------------------------------------------------===// 305 306 namespace { 307 308 /// \brief A simple and fast domtree-based CSE pass. 309 /// 310 /// This pass does a simple depth-first walk over the dominator tree, 311 /// eliminating trivially redundant instructions and using instsimplify to 312 /// canonicalize things as it goes. It is intended to be fast and catch obvious 313 /// cases so that instcombine and other passes are more effective. It is 314 /// expected that a later pass of GVN will catch the interesting/hard cases. 315 class EarlyCSE { 316 public: 317 const TargetLibraryInfo &TLI; 318 const TargetTransformInfo &TTI; 319 DominatorTree &DT; 320 AssumptionCache &AC; 321 const SimplifyQuery SQ; 322 MemorySSA *MSSA; 323 std::unique_ptr<MemorySSAUpdater> MSSAUpdater; 324 325 using AllocatorTy = 326 RecyclingAllocator<BumpPtrAllocator, 327 ScopedHashTableVal<SimpleValue, Value *>>; 328 using ScopedHTType = 329 ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 330 AllocatorTy>; 331 332 /// \brief A scoped hash table of the current values of all of our simple 333 /// scalar expressions. 334 /// 335 /// As we walk down the domtree, we look to see if instructions are in this: 336 /// if so, we replace them with what we find, otherwise we insert them so 337 /// that dominated values can succeed in their lookup. 338 ScopedHTType AvailableValues; 339 340 /// A scoped hash table of the current values of previously encounted memory 341 /// locations. 342 /// 343 /// This allows us to get efficient access to dominating loads or stores when 344 /// we have a fully redundant load. In addition to the most recent load, we 345 /// keep track of a generation count of the read, which is compared against 346 /// the current generation count. The current generation count is incremented 347 /// after every possibly writing memory operation, which ensures that we only 348 /// CSE loads with other loads that have no intervening store. Ordering 349 /// events (such as fences or atomic instructions) increment the generation 350 /// count as well; essentially, we model these as writes to all possible 351 /// locations. Note that atomic and/or volatile loads and stores can be 352 /// present the table; it is the responsibility of the consumer to inspect 353 /// the atomicity/volatility if needed. 354 struct LoadValue { 355 Instruction *DefInst = nullptr; 356 unsigned Generation = 0; 357 int MatchingId = -1; 358 bool IsAtomic = false; 359 360 // TODO: Remove this flag. It would be strictly stronger to add a record 361 // to the AvailableInvariant table when passing the invariant load instead. 362 bool IsInvariant = false; 363 364 LoadValue() = default; 365 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId, 366 bool IsAtomic, bool IsInvariant) 367 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), 368 IsAtomic(IsAtomic), IsInvariant(IsInvariant) {} 369 }; 370 371 using LoadMapAllocator = 372 RecyclingAllocator<BumpPtrAllocator, 373 ScopedHashTableVal<Value *, LoadValue>>; 374 using LoadHTType = 375 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>, 376 LoadMapAllocator>; 377 378 LoadHTType AvailableLoads; 379 380 // A scoped hash table mapping memory locations (represented as typed 381 // addresses) to generation numbers at which that memory location became 382 // (henceforth indefinitely) invariant. 383 using InvariantMapAllocator = 384 RecyclingAllocator<BumpPtrAllocator, 385 ScopedHashTableVal<MemoryLocation, unsigned>>; 386 using InvariantHTType = 387 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>, 388 InvariantMapAllocator>; 389 InvariantHTType AvailableInvariants; 390 391 /// \brief A scoped hash table of the current values of read-only call 392 /// values. 393 /// 394 /// It uses the same generation count as loads. 395 using CallHTType = 396 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>; 397 CallHTType AvailableCalls; 398 399 /// \brief This is the current generation of the memory value. 400 unsigned CurrentGeneration = 0; 401 402 /// \brief Set up the EarlyCSE runner for a particular function. 403 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI, 404 const TargetTransformInfo &TTI, DominatorTree &DT, 405 AssumptionCache &AC, MemorySSA *MSSA) 406 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA), 407 MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {} 408 409 bool run(); 410 411 private: 412 // Almost a POD, but needs to call the constructors for the scoped hash 413 // tables so that a new scope gets pushed on. These are RAII so that the 414 // scope gets popped when the NodeScope is destroyed. 415 class NodeScope { 416 public: 417 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 418 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls) 419 : Scope(AvailableValues), LoadScope(AvailableLoads), 420 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {} 421 NodeScope(const NodeScope &) = delete; 422 NodeScope &operator=(const NodeScope &) = delete; 423 424 private: 425 ScopedHTType::ScopeTy Scope; 426 LoadHTType::ScopeTy LoadScope; 427 InvariantHTType::ScopeTy InvariantScope; 428 CallHTType::ScopeTy CallScope; 429 }; 430 431 // Contains all the needed information to create a stack for doing a depth 432 // first traversal of the tree. This includes scopes for values, loads, and 433 // calls as well as the generation. There is a child iterator so that the 434 // children do not need to be store separately. 435 class StackNode { 436 public: 437 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 438 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls, 439 unsigned cg, DomTreeNode *n, DomTreeNode::iterator child, 440 DomTreeNode::iterator end) 441 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 442 EndIter(end), 443 Scopes(AvailableValues, AvailableLoads, AvailableInvariants, 444 AvailableCalls) 445 {} 446 StackNode(const StackNode &) = delete; 447 StackNode &operator=(const StackNode &) = delete; 448 449 // Accessors. 450 unsigned currentGeneration() { return CurrentGeneration; } 451 unsigned childGeneration() { return ChildGeneration; } 452 void childGeneration(unsigned generation) { ChildGeneration = generation; } 453 DomTreeNode *node() { return Node; } 454 DomTreeNode::iterator childIter() { return ChildIter; } 455 456 DomTreeNode *nextChild() { 457 DomTreeNode *child = *ChildIter; 458 ++ChildIter; 459 return child; 460 } 461 462 DomTreeNode::iterator end() { return EndIter; } 463 bool isProcessed() { return Processed; } 464 void process() { Processed = true; } 465 466 private: 467 unsigned CurrentGeneration; 468 unsigned ChildGeneration; 469 DomTreeNode *Node; 470 DomTreeNode::iterator ChildIter; 471 DomTreeNode::iterator EndIter; 472 NodeScope Scopes; 473 bool Processed = false; 474 }; 475 476 /// \brief Wrapper class to handle memory instructions, including loads, 477 /// stores and intrinsic loads and stores defined by the target. 478 class ParseMemoryInst { 479 public: 480 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 481 : Inst(Inst) { 482 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 483 if (TTI.getTgtMemIntrinsic(II, Info)) 484 IsTargetMemInst = true; 485 } 486 487 bool isLoad() const { 488 if (IsTargetMemInst) return Info.ReadMem; 489 return isa<LoadInst>(Inst); 490 } 491 492 bool isStore() const { 493 if (IsTargetMemInst) return Info.WriteMem; 494 return isa<StoreInst>(Inst); 495 } 496 497 bool isAtomic() const { 498 if (IsTargetMemInst) 499 return Info.Ordering != AtomicOrdering::NotAtomic; 500 return Inst->isAtomic(); 501 } 502 503 bool isUnordered() const { 504 if (IsTargetMemInst) 505 return Info.isUnordered(); 506 507 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 508 return LI->isUnordered(); 509 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 510 return SI->isUnordered(); 511 } 512 // Conservative answer 513 return !Inst->isAtomic(); 514 } 515 516 bool isVolatile() const { 517 if (IsTargetMemInst) 518 return Info.IsVolatile; 519 520 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 521 return LI->isVolatile(); 522 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 523 return SI->isVolatile(); 524 } 525 // Conservative answer 526 return true; 527 } 528 529 bool isInvariantLoad() const { 530 if (auto *LI = dyn_cast<LoadInst>(Inst)) 531 return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr; 532 return false; 533 } 534 535 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const { 536 return (getPointerOperand() == Inst.getPointerOperand() && 537 getMatchingId() == Inst.getMatchingId()); 538 } 539 540 bool isValid() const { return getPointerOperand() != nullptr; } 541 542 // For regular (non-intrinsic) loads/stores, this is set to -1. For 543 // intrinsic loads/stores, the id is retrieved from the corresponding 544 // field in the MemIntrinsicInfo structure. That field contains 545 // non-negative values only. 546 int getMatchingId() const { 547 if (IsTargetMemInst) return Info.MatchingId; 548 return -1; 549 } 550 551 Value *getPointerOperand() const { 552 if (IsTargetMemInst) return Info.PtrVal; 553 return getLoadStorePointerOperand(Inst); 554 } 555 556 bool mayReadFromMemory() const { 557 if (IsTargetMemInst) return Info.ReadMem; 558 return Inst->mayReadFromMemory(); 559 } 560 561 bool mayWriteToMemory() const { 562 if (IsTargetMemInst) return Info.WriteMem; 563 return Inst->mayWriteToMemory(); 564 } 565 566 private: 567 bool IsTargetMemInst = false; 568 MemIntrinsicInfo Info; 569 Instruction *Inst; 570 }; 571 572 bool processNode(DomTreeNode *Node); 573 574 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 575 if (auto *LI = dyn_cast<LoadInst>(Inst)) 576 return LI; 577 if (auto *SI = dyn_cast<StoreInst>(Inst)) 578 return SI->getValueOperand(); 579 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 580 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 581 ExpectedType); 582 } 583 584 /// Return true if the instruction is known to only operate on memory 585 /// provably invariant in the given "generation". 586 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt); 587 588 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration, 589 Instruction *EarlierInst, Instruction *LaterInst); 590 591 void removeMSSA(Instruction *Inst) { 592 if (!MSSA) 593 return; 594 // Removing a store here can leave MemorySSA in an unoptimized state by 595 // creating MemoryPhis that have identical arguments and by creating 596 // MemoryUses whose defining access is not an actual clobber. We handle the 597 // phi case eagerly here. The non-optimized MemoryUse case is lazily 598 // updated by MemorySSA getClobberingMemoryAccess. 599 if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) { 600 // Optimize MemoryPhi nodes that may become redundant by having all the 601 // same input values once MA is removed. 602 SmallSetVector<MemoryPhi *, 4> PhisToCheck; 603 SmallVector<MemoryAccess *, 8> WorkQueue; 604 WorkQueue.push_back(MA); 605 // Process MemoryPhi nodes in FIFO order using a ever-growing vector since 606 // we shouldn't be processing that many phis and this will avoid an 607 // allocation in almost all cases. 608 for (unsigned I = 0; I < WorkQueue.size(); ++I) { 609 MemoryAccess *WI = WorkQueue[I]; 610 611 for (auto *U : WI->users()) 612 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U)) 613 PhisToCheck.insert(MP); 614 615 MSSAUpdater->removeMemoryAccess(WI); 616 617 for (MemoryPhi *MP : PhisToCheck) { 618 MemoryAccess *FirstIn = MP->getIncomingValue(0); 619 if (llvm::all_of(MP->incoming_values(), 620 [=](Use &In) { return In == FirstIn; })) 621 WorkQueue.push_back(MP); 622 } 623 PhisToCheck.clear(); 624 } 625 } 626 } 627 }; 628 629 } // end anonymous namespace 630 631 /// Determine if the memory referenced by LaterInst is from the same heap 632 /// version as EarlierInst. 633 /// This is currently called in two scenarios: 634 /// 635 /// load p 636 /// ... 637 /// load p 638 /// 639 /// and 640 /// 641 /// x = load p 642 /// ... 643 /// store x, p 644 /// 645 /// in both cases we want to verify that there are no possible writes to the 646 /// memory referenced by p between the earlier and later instruction. 647 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration, 648 unsigned LaterGeneration, 649 Instruction *EarlierInst, 650 Instruction *LaterInst) { 651 // Check the simple memory generation tracking first. 652 if (EarlierGeneration == LaterGeneration) 653 return true; 654 655 if (!MSSA) 656 return false; 657 658 // If MemorySSA has determined that one of EarlierInst or LaterInst does not 659 // read/write memory, then we can safely return true here. 660 // FIXME: We could be more aggressive when checking doesNotAccessMemory(), 661 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass 662 // by also checking the MemorySSA MemoryAccess on the instruction. Initial 663 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled 664 // with the default optimization pipeline. 665 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst); 666 if (!EarlierMA) 667 return true; 668 auto *LaterMA = MSSA->getMemoryAccess(LaterInst); 669 if (!LaterMA) 670 return true; 671 672 // Since we know LaterDef dominates LaterInst and EarlierInst dominates 673 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between 674 // EarlierInst and LaterInst and neither can any other write that potentially 675 // clobbers LaterInst. 676 MemoryAccess *LaterDef = 677 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst); 678 return MSSA->dominates(LaterDef, EarlierMA); 679 } 680 681 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) { 682 // A location loaded from with an invariant_load is assumed to *never* change 683 // within the visible scope of the compilation. 684 if (auto *LI = dyn_cast<LoadInst>(I)) 685 if (LI->getMetadata(LLVMContext::MD_invariant_load)) 686 return true; 687 688 auto MemLocOpt = MemoryLocation::getOrNone(I); 689 if (!MemLocOpt) 690 // "target" intrinsic forms of loads aren't currently known to 691 // MemoryLocation::get. TODO 692 return false; 693 MemoryLocation MemLoc = *MemLocOpt; 694 if (!AvailableInvariants.count(MemLoc)) 695 return false; 696 697 // Is the generation at which this became invariant older than the 698 // current one? 699 return AvailableInvariants.lookup(MemLoc) <= GenAt; 700 } 701 702 bool EarlyCSE::processNode(DomTreeNode *Node) { 703 bool Changed = false; 704 BasicBlock *BB = Node->getBlock(); 705 706 // If this block has a single predecessor, then the predecessor is the parent 707 // of the domtree node and all of the live out memory values are still current 708 // in this block. If this block has multiple predecessors, then they could 709 // have invalidated the live-out memory values of our parent value. For now, 710 // just be conservative and invalidate memory if this block has multiple 711 // predecessors. 712 if (!BB->getSinglePredecessor()) 713 ++CurrentGeneration; 714 715 // If this node has a single predecessor which ends in a conditional branch, 716 // we can infer the value of the branch condition given that we took this 717 // path. We need the single predecessor to ensure there's not another path 718 // which reaches this block where the condition might hold a different 719 // value. Since we're adding this to the scoped hash table (like any other 720 // def), it will have been popped if we encounter a future merge block. 721 if (BasicBlock *Pred = BB->getSinglePredecessor()) { 722 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()); 723 if (BI && BI->isConditional()) { 724 auto *CondInst = dyn_cast<Instruction>(BI->getCondition()); 725 if (CondInst && SimpleValue::canHandle(CondInst)) { 726 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); 727 auto *TorF = (BI->getSuccessor(0) == BB) 728 ? ConstantInt::getTrue(BB->getContext()) 729 : ConstantInt::getFalse(BB->getContext()); 730 AvailableValues.insert(CondInst, TorF); 731 DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" 732 << CondInst->getName() << "' as " << *TorF << " in " 733 << BB->getName() << "\n"); 734 // Replace all dominated uses with the known value. 735 if (unsigned Count = replaceDominatedUsesWith( 736 CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) { 737 Changed = true; 738 NumCSECVP += Count; 739 } 740 } 741 } 742 } 743 744 /// LastStore - Keep track of the last non-volatile store that we saw... for 745 /// as long as there in no instruction that reads memory. If we see a store 746 /// to the same location, we delete the dead store. This zaps trivial dead 747 /// stores which can occur in bitfield code among other things. 748 Instruction *LastStore = nullptr; 749 750 // See if any instructions in the block can be eliminated. If so, do it. If 751 // not, add them to AvailableValues. 752 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 753 Instruction *Inst = &*I++; 754 755 // Dead instructions should just be removed. 756 if (isInstructionTriviallyDead(Inst, &TLI)) { 757 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 758 salvageDebugInfo(*Inst); 759 removeMSSA(Inst); 760 Inst->eraseFromParent(); 761 Changed = true; 762 ++NumSimplify; 763 continue; 764 } 765 766 // Skip assume intrinsics, they don't really have side effects (although 767 // they're marked as such to ensure preservation of control dependencies), 768 // and this pass will not bother with its removal. However, we should mark 769 // its condition as true for all dominated blocks. 770 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { 771 auto *CondI = 772 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0)); 773 if (CondI && SimpleValue::canHandle(CondI)) { 774 DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n'); 775 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 776 } else 777 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); 778 continue; 779 } 780 781 // Skip sideeffect intrinsics, for the same reason as assume intrinsics. 782 if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) { 783 DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n'); 784 continue; 785 } 786 787 // We can skip all invariant.start intrinsics since they only read memory, 788 // and we can forward values across it. For invariant starts without 789 // invariant ends, we can use the fact that the invariantness never ends to 790 // start a scope in the current generaton which is true for all future 791 // generations. Also, we dont need to consume the last store since the 792 // semantics of invariant.start allow us to perform DSE of the last 793 // store, if there was a store following invariant.start. Consider: 794 // 795 // store 30, i8* p 796 // invariant.start(p) 797 // store 40, i8* p 798 // We can DSE the store to 30, since the store 40 to invariant location p 799 // causes undefined behaviour. 800 if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) { 801 // If there are any uses, the scope might end. 802 if (!Inst->use_empty()) 803 continue; 804 auto *CI = cast<CallInst>(Inst); 805 MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI); 806 AvailableInvariants.insert(MemLoc, CurrentGeneration); 807 continue; 808 } 809 810 if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) { 811 if (auto *CondI = 812 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) { 813 if (SimpleValue::canHandle(CondI)) { 814 // Do we already know the actual value of this condition? 815 if (auto *KnownCond = AvailableValues.lookup(CondI)) { 816 // Is the condition known to be true? 817 if (isa<ConstantInt>(KnownCond) && 818 cast<ConstantInt>(KnownCond)->isOne()) { 819 DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n'); 820 removeMSSA(Inst); 821 Inst->eraseFromParent(); 822 Changed = true; 823 continue; 824 } else 825 // Use the known value if it wasn't true. 826 cast<CallInst>(Inst)->setArgOperand(0, KnownCond); 827 } 828 // The condition we're on guarding here is true for all dominated 829 // locations. 830 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 831 } 832 } 833 834 // Guard intrinsics read all memory, but don't write any memory. 835 // Accordingly, don't update the generation but consume the last store (to 836 // avoid an incorrect DSE). 837 LastStore = nullptr; 838 continue; 839 } 840 841 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 842 // its simpler value. 843 if (Value *V = SimplifyInstruction(Inst, SQ)) { 844 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 845 bool Killed = false; 846 if (!Inst->use_empty()) { 847 Inst->replaceAllUsesWith(V); 848 Changed = true; 849 } 850 if (isInstructionTriviallyDead(Inst, &TLI)) { 851 removeMSSA(Inst); 852 Inst->eraseFromParent(); 853 Changed = true; 854 Killed = true; 855 } 856 if (Changed) 857 ++NumSimplify; 858 if (Killed) 859 continue; 860 } 861 862 // If this is a simple instruction that we can value number, process it. 863 if (SimpleValue::canHandle(Inst)) { 864 // See if the instruction has an available value. If so, use it. 865 if (Value *V = AvailableValues.lookup(Inst)) { 866 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 867 if (auto *I = dyn_cast<Instruction>(V)) 868 I->andIRFlags(Inst); 869 Inst->replaceAllUsesWith(V); 870 removeMSSA(Inst); 871 Inst->eraseFromParent(); 872 Changed = true; 873 ++NumCSE; 874 continue; 875 } 876 877 // Otherwise, just remember that this value is available. 878 AvailableValues.insert(Inst, Inst); 879 continue; 880 } 881 882 ParseMemoryInst MemInst(Inst, TTI); 883 // If this is a non-volatile load, process it. 884 if (MemInst.isValid() && MemInst.isLoad()) { 885 // (conservatively) we can't peak past the ordering implied by this 886 // operation, but we can add this load to our set of available values 887 if (MemInst.isVolatile() || !MemInst.isUnordered()) { 888 LastStore = nullptr; 889 ++CurrentGeneration; 890 } 891 892 // If we have an available version of this load, and if it is the right 893 // generation or the load is known to be from an invariant location, 894 // replace this instruction. 895 // 896 // If either the dominating load or the current load are invariant, then 897 // we can assume the current load loads the same value as the dominating 898 // load. 899 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 900 if (InVal.DefInst != nullptr && 901 InVal.MatchingId == MemInst.getMatchingId() && 902 // We don't yet handle removing loads with ordering of any kind. 903 !MemInst.isVolatile() && MemInst.isUnordered() && 904 // We can't replace an atomic load with one which isn't also atomic. 905 InVal.IsAtomic >= MemInst.isAtomic() && 906 (InVal.IsInvariant || 907 isOperatingOnInvariantMemAt(Inst, InVal.Generation) || 908 isSameMemGeneration(InVal.Generation, CurrentGeneration, 909 InVal.DefInst, Inst))) { 910 Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType()); 911 if (Op != nullptr) { 912 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst 913 << " to: " << *InVal.DefInst << '\n'); 914 if (!Inst->use_empty()) 915 Inst->replaceAllUsesWith(Op); 916 removeMSSA(Inst); 917 Inst->eraseFromParent(); 918 Changed = true; 919 ++NumCSELoad; 920 continue; 921 } 922 } 923 924 // Otherwise, remember that we have this instruction. 925 AvailableLoads.insert( 926 MemInst.getPointerOperand(), 927 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 928 MemInst.isAtomic(), MemInst.isInvariantLoad())); 929 LastStore = nullptr; 930 continue; 931 } 932 933 // If this instruction may read from memory or throw (and potentially read 934 // from memory in the exception handler), forget LastStore. Load/store 935 // intrinsics will indicate both a read and a write to memory. The target 936 // may override this (e.g. so that a store intrinsic does not read from 937 // memory, and thus will be treated the same as a regular store for 938 // commoning purposes). 939 if ((Inst->mayReadFromMemory() || Inst->mayThrow()) && 940 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 941 LastStore = nullptr; 942 943 // If this is a read-only call, process it. 944 if (CallValue::canHandle(Inst)) { 945 // If we have an available version of this call, and if it is the right 946 // generation, replace this instruction. 947 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst); 948 if (InVal.first != nullptr && 949 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first, 950 Inst)) { 951 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst 952 << " to: " << *InVal.first << '\n'); 953 if (!Inst->use_empty()) 954 Inst->replaceAllUsesWith(InVal.first); 955 removeMSSA(Inst); 956 Inst->eraseFromParent(); 957 Changed = true; 958 ++NumCSECall; 959 continue; 960 } 961 962 // Otherwise, remember that we have this instruction. 963 AvailableCalls.insert( 964 Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration)); 965 continue; 966 } 967 968 // A release fence requires that all stores complete before it, but does 969 // not prevent the reordering of following loads 'before' the fence. As a 970 // result, we don't need to consider it as writing to memory and don't need 971 // to advance the generation. We do need to prevent DSE across the fence, 972 // but that's handled above. 973 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 974 if (FI->getOrdering() == AtomicOrdering::Release) { 975 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above"); 976 continue; 977 } 978 979 // write back DSE - If we write back the same value we just loaded from 980 // the same location and haven't passed any intervening writes or ordering 981 // operations, we can remove the write. The primary benefit is in allowing 982 // the available load table to remain valid and value forward past where 983 // the store originally was. 984 if (MemInst.isValid() && MemInst.isStore()) { 985 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 986 if (InVal.DefInst && 987 InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) && 988 InVal.MatchingId == MemInst.getMatchingId() && 989 // We don't yet handle removing stores with ordering of any kind. 990 !MemInst.isVolatile() && MemInst.isUnordered() && 991 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) || 992 isSameMemGeneration(InVal.Generation, CurrentGeneration, 993 InVal.DefInst, Inst))) { 994 // It is okay to have a LastStore to a different pointer here if MemorySSA 995 // tells us that the load and store are from the same memory generation. 996 // In that case, LastStore should keep its present value since we're 997 // removing the current store. 998 assert((!LastStore || 999 ParseMemoryInst(LastStore, TTI).getPointerOperand() == 1000 MemInst.getPointerOperand() || 1001 MSSA) && 1002 "can't have an intervening store if not using MemorySSA!"); 1003 DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n'); 1004 removeMSSA(Inst); 1005 Inst->eraseFromParent(); 1006 Changed = true; 1007 ++NumDSE; 1008 // We can avoid incrementing the generation count since we were able 1009 // to eliminate this store. 1010 continue; 1011 } 1012 } 1013 1014 // Okay, this isn't something we can CSE at all. Check to see if it is 1015 // something that could modify memory. If so, our available memory values 1016 // cannot be used so bump the generation count. 1017 if (Inst->mayWriteToMemory()) { 1018 ++CurrentGeneration; 1019 1020 if (MemInst.isValid() && MemInst.isStore()) { 1021 // We do a trivial form of DSE if there are two stores to the same 1022 // location with no intervening loads. Delete the earlier store. 1023 // At the moment, we don't remove ordered stores, but do remove 1024 // unordered atomic stores. There's no special requirement (for 1025 // unordered atomics) about removing atomic stores only in favor of 1026 // other atomic stores since we we're going to execute the non-atomic 1027 // one anyway and the atomic one might never have become visible. 1028 if (LastStore) { 1029 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 1030 assert(LastStoreMemInst.isUnordered() && 1031 !LastStoreMemInst.isVolatile() && 1032 "Violated invariant"); 1033 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 1034 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 1035 << " due to: " << *Inst << '\n'); 1036 removeMSSA(LastStore); 1037 LastStore->eraseFromParent(); 1038 Changed = true; 1039 ++NumDSE; 1040 LastStore = nullptr; 1041 } 1042 // fallthrough - we can exploit information about this store 1043 } 1044 1045 // Okay, we just invalidated anything we knew about loaded values. Try 1046 // to salvage *something* by remembering that the stored value is a live 1047 // version of the pointer. It is safe to forward from volatile stores 1048 // to non-volatile loads, so we don't have to check for volatility of 1049 // the store. 1050 AvailableLoads.insert( 1051 MemInst.getPointerOperand(), 1052 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 1053 MemInst.isAtomic(), /*IsInvariant=*/false)); 1054 1055 // Remember that this was the last unordered store we saw for DSE. We 1056 // don't yet handle DSE on ordered or volatile stores since we don't 1057 // have a good way to model the ordering requirement for following 1058 // passes once the store is removed. We could insert a fence, but 1059 // since fences are slightly stronger than stores in their ordering, 1060 // it's not clear this is a profitable transform. Another option would 1061 // be to merge the ordering with that of the post dominating store. 1062 if (MemInst.isUnordered() && !MemInst.isVolatile()) 1063 LastStore = Inst; 1064 else 1065 LastStore = nullptr; 1066 } 1067 } 1068 } 1069 1070 return Changed; 1071 } 1072 1073 bool EarlyCSE::run() { 1074 // Note, deque is being used here because there is significant performance 1075 // gains over vector when the container becomes very large due to the 1076 // specific access patterns. For more information see the mailing list 1077 // discussion on this: 1078 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 1079 std::deque<StackNode *> nodesToProcess; 1080 1081 bool Changed = false; 1082 1083 // Process the root node. 1084 nodesToProcess.push_back(new StackNode( 1085 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls, 1086 CurrentGeneration, DT.getRootNode(), 1087 DT.getRootNode()->begin(), DT.getRootNode()->end())); 1088 1089 // Save the current generation. 1090 unsigned LiveOutGeneration = CurrentGeneration; 1091 1092 // Process the stack. 1093 while (!nodesToProcess.empty()) { 1094 // Grab the first item off the stack. Set the current generation, remove 1095 // the node from the stack, and process it. 1096 StackNode *NodeToProcess = nodesToProcess.back(); 1097 1098 // Initialize class members. 1099 CurrentGeneration = NodeToProcess->currentGeneration(); 1100 1101 // Check if the node needs to be processed. 1102 if (!NodeToProcess->isProcessed()) { 1103 // Process the node. 1104 Changed |= processNode(NodeToProcess->node()); 1105 NodeToProcess->childGeneration(CurrentGeneration); 1106 NodeToProcess->process(); 1107 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 1108 // Push the next child onto the stack. 1109 DomTreeNode *child = NodeToProcess->nextChild(); 1110 nodesToProcess.push_back( 1111 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants, 1112 AvailableCalls, NodeToProcess->childGeneration(), 1113 child, child->begin(), child->end())); 1114 } else { 1115 // It has been processed, and there are no more children to process, 1116 // so delete it and pop it off the stack. 1117 delete NodeToProcess; 1118 nodesToProcess.pop_back(); 1119 } 1120 } // while (!nodes...) 1121 1122 // Reset the current generation. 1123 CurrentGeneration = LiveOutGeneration; 1124 1125 return Changed; 1126 } 1127 1128 PreservedAnalyses EarlyCSEPass::run(Function &F, 1129 FunctionAnalysisManager &AM) { 1130 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1131 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 1132 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1133 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1134 auto *MSSA = 1135 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr; 1136 1137 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1138 1139 if (!CSE.run()) 1140 return PreservedAnalyses::all(); 1141 1142 PreservedAnalyses PA; 1143 PA.preserveSet<CFGAnalyses>(); 1144 PA.preserve<GlobalsAA>(); 1145 if (UseMemorySSA) 1146 PA.preserve<MemorySSAAnalysis>(); 1147 return PA; 1148 } 1149 1150 namespace { 1151 1152 /// \brief A simple and fast domtree-based CSE pass. 1153 /// 1154 /// This pass does a simple depth-first walk over the dominator tree, 1155 /// eliminating trivially redundant instructions and using instsimplify to 1156 /// canonicalize things as it goes. It is intended to be fast and catch obvious 1157 /// cases so that instcombine and other passes are more effective. It is 1158 /// expected that a later pass of GVN will catch the interesting/hard cases. 1159 template<bool UseMemorySSA> 1160 class EarlyCSELegacyCommonPass : public FunctionPass { 1161 public: 1162 static char ID; 1163 1164 EarlyCSELegacyCommonPass() : FunctionPass(ID) { 1165 if (UseMemorySSA) 1166 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry()); 1167 else 1168 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 1169 } 1170 1171 bool runOnFunction(Function &F) override { 1172 if (skipFunction(F)) 1173 return false; 1174 1175 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1176 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1177 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1178 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1179 auto *MSSA = 1180 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr; 1181 1182 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1183 1184 return CSE.run(); 1185 } 1186 1187 void getAnalysisUsage(AnalysisUsage &AU) const override { 1188 AU.addRequired<AssumptionCacheTracker>(); 1189 AU.addRequired<DominatorTreeWrapperPass>(); 1190 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1191 AU.addRequired<TargetTransformInfoWrapperPass>(); 1192 if (UseMemorySSA) { 1193 AU.addRequired<MemorySSAWrapperPass>(); 1194 AU.addPreserved<MemorySSAWrapperPass>(); 1195 } 1196 AU.addPreserved<GlobalsAAWrapperPass>(); 1197 AU.setPreservesCFG(); 1198 } 1199 }; 1200 1201 } // end anonymous namespace 1202 1203 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>; 1204 1205 template<> 1206 char EarlyCSELegacyPass::ID = 0; 1207 1208 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 1209 false) 1210 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1211 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1212 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1213 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1214 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 1215 1216 using EarlyCSEMemSSALegacyPass = 1217 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>; 1218 1219 template<> 1220 char EarlyCSEMemSSALegacyPass::ID = 0; 1221 1222 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) { 1223 if (UseMemorySSA) 1224 return new EarlyCSEMemSSALegacyPass(); 1225 else 1226 return new EarlyCSELegacyPass(); 1227 } 1228 1229 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1230 "Early CSE w/ MemorySSA", false, false) 1231 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1232 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1233 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1234 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1235 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 1236 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1237 "Early CSE w/ MemorySSA", false, false) 1238