1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/EarlyCSE.h" 16 #include "llvm/ADT/DenseMapInfo.h" 17 #include "llvm/ADT/Hashing.h" 18 #include "llvm/ADT/STLExtras.h" 19 #include "llvm/ADT/ScopedHashTable.h" 20 #include "llvm/ADT/SetVector.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/AssumptionCache.h" 24 #include "llvm/Analysis/GlobalsModRef.h" 25 #include "llvm/Analysis/InstructionSimplify.h" 26 #include "llvm/Analysis/MemorySSA.h" 27 #include "llvm/Analysis/MemorySSAUpdater.h" 28 #include "llvm/Analysis/TargetLibraryInfo.h" 29 #include "llvm/Analysis/TargetTransformInfo.h" 30 #include "llvm/Analysis/ValueTracking.h" 31 #include "llvm/IR/BasicBlock.h" 32 #include "llvm/IR/Constants.h" 33 #include "llvm/IR/DataLayout.h" 34 #include "llvm/IR/Dominators.h" 35 #include "llvm/IR/Function.h" 36 #include "llvm/IR/InstrTypes.h" 37 #include "llvm/IR/Instruction.h" 38 #include "llvm/IR/Instructions.h" 39 #include "llvm/IR/IntrinsicInst.h" 40 #include "llvm/IR/Intrinsics.h" 41 #include "llvm/IR/LLVMContext.h" 42 #include "llvm/IR/PassManager.h" 43 #include "llvm/IR/PatternMatch.h" 44 #include "llvm/IR/Type.h" 45 #include "llvm/IR/Use.h" 46 #include "llvm/IR/Value.h" 47 #include "llvm/Pass.h" 48 #include "llvm/Support/Allocator.h" 49 #include "llvm/Support/AtomicOrdering.h" 50 #include "llvm/Support/Casting.h" 51 #include "llvm/Support/Debug.h" 52 #include "llvm/Support/RecyclingAllocator.h" 53 #include "llvm/Support/raw_ostream.h" 54 #include "llvm/Transforms/Scalar.h" 55 #include "llvm/Transforms/Utils/Local.h" 56 #include <cassert> 57 #include <deque> 58 #include <memory> 59 #include <utility> 60 61 using namespace llvm; 62 using namespace llvm::PatternMatch; 63 64 #define DEBUG_TYPE "early-cse" 65 66 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 67 STATISTIC(NumCSE, "Number of instructions CSE'd"); 68 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd"); 69 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 70 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 71 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 72 73 //===----------------------------------------------------------------------===// 74 // SimpleValue 75 //===----------------------------------------------------------------------===// 76 77 namespace { 78 79 /// \brief Struct representing the available values in the scoped hash table. 80 struct SimpleValue { 81 Instruction *Inst; 82 83 SimpleValue(Instruction *I) : Inst(I) { 84 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 85 } 86 87 bool isSentinel() const { 88 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 89 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 90 } 91 92 static bool canHandle(Instruction *Inst) { 93 // This can only handle non-void readnone functions. 94 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 95 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 96 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 97 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 98 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 99 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 100 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 101 } 102 }; 103 104 } // end anonymous namespace 105 106 namespace llvm { 107 108 template <> struct DenseMapInfo<SimpleValue> { 109 static inline SimpleValue getEmptyKey() { 110 return DenseMapInfo<Instruction *>::getEmptyKey(); 111 } 112 113 static inline SimpleValue getTombstoneKey() { 114 return DenseMapInfo<Instruction *>::getTombstoneKey(); 115 } 116 117 static unsigned getHashValue(SimpleValue Val); 118 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 119 }; 120 121 } // end namespace llvm 122 123 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 124 Instruction *Inst = Val.Inst; 125 // Hash in all of the operands as pointers. 126 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 127 Value *LHS = BinOp->getOperand(0); 128 Value *RHS = BinOp->getOperand(1); 129 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 130 std::swap(LHS, RHS); 131 132 return hash_combine(BinOp->getOpcode(), LHS, RHS); 133 } 134 135 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 136 Value *LHS = CI->getOperand(0); 137 Value *RHS = CI->getOperand(1); 138 CmpInst::Predicate Pred = CI->getPredicate(); 139 if (Inst->getOperand(0) > Inst->getOperand(1)) { 140 std::swap(LHS, RHS); 141 Pred = CI->getSwappedPredicate(); 142 } 143 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 144 } 145 146 // Hash min/max/abs (cmp + select) to allow for commuted operands. 147 // Min/max may also have non-canonical compare predicate (eg, the compare for 148 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the 149 // compare. 150 Value *A, *B; 151 SelectPatternFlavor SPF = matchSelectPattern(Inst, A, B).Flavor; 152 // TODO: We should also detect FP min/max. 153 if (SPF == SPF_SMIN || SPF == SPF_SMAX || 154 SPF == SPF_UMIN || SPF == SPF_UMAX || 155 SPF == SPF_ABS || SPF == SPF_NABS) { 156 if (A > B) 157 std::swap(A, B); 158 return hash_combine(Inst->getOpcode(), SPF, A, B); 159 } 160 161 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 162 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 163 164 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 165 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 166 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 167 168 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 169 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 170 IVI->getOperand(1), 171 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 172 173 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 174 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 175 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 176 isa<ShuffleVectorInst>(Inst)) && 177 "Invalid/unknown instruction"); 178 179 // Mix in the opcode. 180 return hash_combine( 181 Inst->getOpcode(), 182 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 183 } 184 185 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 186 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 187 188 if (LHS.isSentinel() || RHS.isSentinel()) 189 return LHSI == RHSI; 190 191 if (LHSI->getOpcode() != RHSI->getOpcode()) 192 return false; 193 if (LHSI->isIdenticalToWhenDefined(RHSI)) 194 return true; 195 196 // If we're not strictly identical, we still might be a commutable instruction 197 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 198 if (!LHSBinOp->isCommutative()) 199 return false; 200 201 assert(isa<BinaryOperator>(RHSI) && 202 "same opcode, but different instruction type?"); 203 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 204 205 // Commuted equality 206 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 207 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 208 } 209 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 210 assert(isa<CmpInst>(RHSI) && 211 "same opcode, but different instruction type?"); 212 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 213 // Commuted equality 214 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 215 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 216 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 217 } 218 219 // Min/max/abs can occur with commuted operands, non-canonical predicates, 220 // and/or non-canonical operands. 221 Value *LHSA, *LHSB; 222 SelectPatternFlavor LSPF = matchSelectPattern(LHSI, LHSA, LHSB).Flavor; 223 // TODO: We should also detect FP min/max. 224 if (LSPF == SPF_SMIN || LSPF == SPF_SMAX || 225 LSPF == SPF_UMIN || LSPF == SPF_UMAX || 226 LSPF == SPF_ABS || LSPF == SPF_NABS) { 227 Value *RHSA, *RHSB; 228 SelectPatternFlavor RSPF = matchSelectPattern(RHSI, RHSA, RHSB).Flavor; 229 return (LSPF == RSPF && ((LHSA == RHSA && LHSB == RHSB) || 230 (LHSA == RHSB && LHSB == RHSA))); 231 } 232 233 return false; 234 } 235 236 //===----------------------------------------------------------------------===// 237 // CallValue 238 //===----------------------------------------------------------------------===// 239 240 namespace { 241 242 /// \brief Struct representing the available call values in the scoped hash 243 /// table. 244 struct CallValue { 245 Instruction *Inst; 246 247 CallValue(Instruction *I) : Inst(I) { 248 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 249 } 250 251 bool isSentinel() const { 252 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 253 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 254 } 255 256 static bool canHandle(Instruction *Inst) { 257 // Don't value number anything that returns void. 258 if (Inst->getType()->isVoidTy()) 259 return false; 260 261 CallInst *CI = dyn_cast<CallInst>(Inst); 262 if (!CI || !CI->onlyReadsMemory()) 263 return false; 264 return true; 265 } 266 }; 267 268 } // end anonymous namespace 269 270 namespace llvm { 271 272 template <> struct DenseMapInfo<CallValue> { 273 static inline CallValue getEmptyKey() { 274 return DenseMapInfo<Instruction *>::getEmptyKey(); 275 } 276 277 static inline CallValue getTombstoneKey() { 278 return DenseMapInfo<Instruction *>::getTombstoneKey(); 279 } 280 281 static unsigned getHashValue(CallValue Val); 282 static bool isEqual(CallValue LHS, CallValue RHS); 283 }; 284 285 } // end namespace llvm 286 287 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 288 Instruction *Inst = Val.Inst; 289 // Hash all of the operands as pointers and mix in the opcode. 290 return hash_combine( 291 Inst->getOpcode(), 292 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 293 } 294 295 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 296 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 297 if (LHS.isSentinel() || RHS.isSentinel()) 298 return LHSI == RHSI; 299 return LHSI->isIdenticalTo(RHSI); 300 } 301 302 //===----------------------------------------------------------------------===// 303 // EarlyCSE implementation 304 //===----------------------------------------------------------------------===// 305 306 namespace { 307 308 /// \brief A simple and fast domtree-based CSE pass. 309 /// 310 /// This pass does a simple depth-first walk over the dominator tree, 311 /// eliminating trivially redundant instructions and using instsimplify to 312 /// canonicalize things as it goes. It is intended to be fast and catch obvious 313 /// cases so that instcombine and other passes are more effective. It is 314 /// expected that a later pass of GVN will catch the interesting/hard cases. 315 class EarlyCSE { 316 public: 317 const TargetLibraryInfo &TLI; 318 const TargetTransformInfo &TTI; 319 DominatorTree &DT; 320 AssumptionCache &AC; 321 const SimplifyQuery SQ; 322 MemorySSA *MSSA; 323 std::unique_ptr<MemorySSAUpdater> MSSAUpdater; 324 325 using AllocatorTy = 326 RecyclingAllocator<BumpPtrAllocator, 327 ScopedHashTableVal<SimpleValue, Value *>>; 328 using ScopedHTType = 329 ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 330 AllocatorTy>; 331 332 /// \brief A scoped hash table of the current values of all of our simple 333 /// scalar expressions. 334 /// 335 /// As we walk down the domtree, we look to see if instructions are in this: 336 /// if so, we replace them with what we find, otherwise we insert them so 337 /// that dominated values can succeed in their lookup. 338 ScopedHTType AvailableValues; 339 340 /// A scoped hash table of the current values of previously encounted memory 341 /// locations. 342 /// 343 /// This allows us to get efficient access to dominating loads or stores when 344 /// we have a fully redundant load. In addition to the most recent load, we 345 /// keep track of a generation count of the read, which is compared against 346 /// the current generation count. The current generation count is incremented 347 /// after every possibly writing memory operation, which ensures that we only 348 /// CSE loads with other loads that have no intervening store. Ordering 349 /// events (such as fences or atomic instructions) increment the generation 350 /// count as well; essentially, we model these as writes to all possible 351 /// locations. Note that atomic and/or volatile loads and stores can be 352 /// present the table; it is the responsibility of the consumer to inspect 353 /// the atomicity/volatility if needed. 354 struct LoadValue { 355 Instruction *DefInst = nullptr; 356 unsigned Generation = 0; 357 int MatchingId = -1; 358 bool IsAtomic = false; 359 360 LoadValue() = default; 361 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId, 362 bool IsAtomic) 363 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), 364 IsAtomic(IsAtomic) {} 365 }; 366 367 using LoadMapAllocator = 368 RecyclingAllocator<BumpPtrAllocator, 369 ScopedHashTableVal<Value *, LoadValue>>; 370 using LoadHTType = 371 ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>, 372 LoadMapAllocator>; 373 374 LoadHTType AvailableLoads; 375 376 // A scoped hash table mapping memory locations (represented as typed 377 // addresses) to generation numbers at which that memory location became 378 // (henceforth indefinitely) invariant. 379 using InvariantMapAllocator = 380 RecyclingAllocator<BumpPtrAllocator, 381 ScopedHashTableVal<MemoryLocation, unsigned>>; 382 using InvariantHTType = 383 ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>, 384 InvariantMapAllocator>; 385 InvariantHTType AvailableInvariants; 386 387 /// \brief A scoped hash table of the current values of read-only call 388 /// values. 389 /// 390 /// It uses the same generation count as loads. 391 using CallHTType = 392 ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>; 393 CallHTType AvailableCalls; 394 395 /// \brief This is the current generation of the memory value. 396 unsigned CurrentGeneration = 0; 397 398 /// \brief Set up the EarlyCSE runner for a particular function. 399 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI, 400 const TargetTransformInfo &TTI, DominatorTree &DT, 401 AssumptionCache &AC, MemorySSA *MSSA) 402 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA), 403 MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {} 404 405 bool run(); 406 407 private: 408 // Almost a POD, but needs to call the constructors for the scoped hash 409 // tables so that a new scope gets pushed on. These are RAII so that the 410 // scope gets popped when the NodeScope is destroyed. 411 class NodeScope { 412 public: 413 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 414 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls) 415 : Scope(AvailableValues), LoadScope(AvailableLoads), 416 InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {} 417 NodeScope(const NodeScope &) = delete; 418 NodeScope &operator=(const NodeScope &) = delete; 419 420 private: 421 ScopedHTType::ScopeTy Scope; 422 LoadHTType::ScopeTy LoadScope; 423 InvariantHTType::ScopeTy InvariantScope; 424 CallHTType::ScopeTy CallScope; 425 }; 426 427 // Contains all the needed information to create a stack for doing a depth 428 // first traversal of the tree. This includes scopes for values, loads, and 429 // calls as well as the generation. There is a child iterator so that the 430 // children do not need to be store separately. 431 class StackNode { 432 public: 433 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 434 InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls, 435 unsigned cg, DomTreeNode *n, DomTreeNode::iterator child, 436 DomTreeNode::iterator end) 437 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 438 EndIter(end), 439 Scopes(AvailableValues, AvailableLoads, AvailableInvariants, 440 AvailableCalls) 441 {} 442 StackNode(const StackNode &) = delete; 443 StackNode &operator=(const StackNode &) = delete; 444 445 // Accessors. 446 unsigned currentGeneration() { return CurrentGeneration; } 447 unsigned childGeneration() { return ChildGeneration; } 448 void childGeneration(unsigned generation) { ChildGeneration = generation; } 449 DomTreeNode *node() { return Node; } 450 DomTreeNode::iterator childIter() { return ChildIter; } 451 452 DomTreeNode *nextChild() { 453 DomTreeNode *child = *ChildIter; 454 ++ChildIter; 455 return child; 456 } 457 458 DomTreeNode::iterator end() { return EndIter; } 459 bool isProcessed() { return Processed; } 460 void process() { Processed = true; } 461 462 private: 463 unsigned CurrentGeneration; 464 unsigned ChildGeneration; 465 DomTreeNode *Node; 466 DomTreeNode::iterator ChildIter; 467 DomTreeNode::iterator EndIter; 468 NodeScope Scopes; 469 bool Processed = false; 470 }; 471 472 /// \brief Wrapper class to handle memory instructions, including loads, 473 /// stores and intrinsic loads and stores defined by the target. 474 class ParseMemoryInst { 475 public: 476 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 477 : Inst(Inst) { 478 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 479 if (TTI.getTgtMemIntrinsic(II, Info)) 480 IsTargetMemInst = true; 481 } 482 483 bool isLoad() const { 484 if (IsTargetMemInst) return Info.ReadMem; 485 return isa<LoadInst>(Inst); 486 } 487 488 bool isStore() const { 489 if (IsTargetMemInst) return Info.WriteMem; 490 return isa<StoreInst>(Inst); 491 } 492 493 bool isAtomic() const { 494 if (IsTargetMemInst) 495 return Info.Ordering != AtomicOrdering::NotAtomic; 496 return Inst->isAtomic(); 497 } 498 499 bool isUnordered() const { 500 if (IsTargetMemInst) 501 return Info.isUnordered(); 502 503 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 504 return LI->isUnordered(); 505 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 506 return SI->isUnordered(); 507 } 508 // Conservative answer 509 return !Inst->isAtomic(); 510 } 511 512 bool isVolatile() const { 513 if (IsTargetMemInst) 514 return Info.IsVolatile; 515 516 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 517 return LI->isVolatile(); 518 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 519 return SI->isVolatile(); 520 } 521 // Conservative answer 522 return true; 523 } 524 525 bool isInvariantLoad() const { 526 if (auto *LI = dyn_cast<LoadInst>(Inst)) 527 return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr; 528 return false; 529 } 530 531 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const { 532 return (getPointerOperand() == Inst.getPointerOperand() && 533 getMatchingId() == Inst.getMatchingId()); 534 } 535 536 bool isValid() const { return getPointerOperand() != nullptr; } 537 538 // For regular (non-intrinsic) loads/stores, this is set to -1. For 539 // intrinsic loads/stores, the id is retrieved from the corresponding 540 // field in the MemIntrinsicInfo structure. That field contains 541 // non-negative values only. 542 int getMatchingId() const { 543 if (IsTargetMemInst) return Info.MatchingId; 544 return -1; 545 } 546 547 Value *getPointerOperand() const { 548 if (IsTargetMemInst) return Info.PtrVal; 549 return getLoadStorePointerOperand(Inst); 550 } 551 552 bool mayReadFromMemory() const { 553 if (IsTargetMemInst) return Info.ReadMem; 554 return Inst->mayReadFromMemory(); 555 } 556 557 bool mayWriteToMemory() const { 558 if (IsTargetMemInst) return Info.WriteMem; 559 return Inst->mayWriteToMemory(); 560 } 561 562 private: 563 bool IsTargetMemInst = false; 564 MemIntrinsicInfo Info; 565 Instruction *Inst; 566 }; 567 568 bool processNode(DomTreeNode *Node); 569 570 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 571 if (auto *LI = dyn_cast<LoadInst>(Inst)) 572 return LI; 573 if (auto *SI = dyn_cast<StoreInst>(Inst)) 574 return SI->getValueOperand(); 575 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 576 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 577 ExpectedType); 578 } 579 580 /// Return true if the instruction is known to only operate on memory 581 /// provably invariant in the given "generation". 582 bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt); 583 584 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration, 585 Instruction *EarlierInst, Instruction *LaterInst); 586 587 void removeMSSA(Instruction *Inst) { 588 if (!MSSA) 589 return; 590 // Removing a store here can leave MemorySSA in an unoptimized state by 591 // creating MemoryPhis that have identical arguments and by creating 592 // MemoryUses whose defining access is not an actual clobber. We handle the 593 // phi case eagerly here. The non-optimized MemoryUse case is lazily 594 // updated by MemorySSA getClobberingMemoryAccess. 595 if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) { 596 // Optimize MemoryPhi nodes that may become redundant by having all the 597 // same input values once MA is removed. 598 SmallSetVector<MemoryPhi *, 4> PhisToCheck; 599 SmallVector<MemoryAccess *, 8> WorkQueue; 600 WorkQueue.push_back(MA); 601 // Process MemoryPhi nodes in FIFO order using a ever-growing vector since 602 // we shouldn't be processing that many phis and this will avoid an 603 // allocation in almost all cases. 604 for (unsigned I = 0; I < WorkQueue.size(); ++I) { 605 MemoryAccess *WI = WorkQueue[I]; 606 607 for (auto *U : WI->users()) 608 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U)) 609 PhisToCheck.insert(MP); 610 611 MSSAUpdater->removeMemoryAccess(WI); 612 613 for (MemoryPhi *MP : PhisToCheck) { 614 MemoryAccess *FirstIn = MP->getIncomingValue(0); 615 if (llvm::all_of(MP->incoming_values(), 616 [=](Use &In) { return In == FirstIn; })) 617 WorkQueue.push_back(MP); 618 } 619 PhisToCheck.clear(); 620 } 621 } 622 } 623 }; 624 625 } // end anonymous namespace 626 627 /// Determine if the memory referenced by LaterInst is from the same heap 628 /// version as EarlierInst. 629 /// This is currently called in two scenarios: 630 /// 631 /// load p 632 /// ... 633 /// load p 634 /// 635 /// and 636 /// 637 /// x = load p 638 /// ... 639 /// store x, p 640 /// 641 /// in both cases we want to verify that there are no possible writes to the 642 /// memory referenced by p between the earlier and later instruction. 643 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration, 644 unsigned LaterGeneration, 645 Instruction *EarlierInst, 646 Instruction *LaterInst) { 647 // Check the simple memory generation tracking first. 648 if (EarlierGeneration == LaterGeneration) 649 return true; 650 651 if (!MSSA) 652 return false; 653 654 // If MemorySSA has determined that one of EarlierInst or LaterInst does not 655 // read/write memory, then we can safely return true here. 656 // FIXME: We could be more aggressive when checking doesNotAccessMemory(), 657 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass 658 // by also checking the MemorySSA MemoryAccess on the instruction. Initial 659 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled 660 // with the default optimization pipeline. 661 auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst); 662 if (!EarlierMA) 663 return true; 664 auto *LaterMA = MSSA->getMemoryAccess(LaterInst); 665 if (!LaterMA) 666 return true; 667 668 // Since we know LaterDef dominates LaterInst and EarlierInst dominates 669 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between 670 // EarlierInst and LaterInst and neither can any other write that potentially 671 // clobbers LaterInst. 672 MemoryAccess *LaterDef = 673 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst); 674 return MSSA->dominates(LaterDef, EarlierMA); 675 } 676 677 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) { 678 // A location loaded from with an invariant_load is assumed to *never* change 679 // within the visible scope of the compilation. 680 if (auto *LI = dyn_cast<LoadInst>(I)) 681 if (LI->getMetadata(LLVMContext::MD_invariant_load)) 682 return true; 683 684 auto MemLocOpt = MemoryLocation::getOrNone(I); 685 if (!MemLocOpt) 686 // "target" intrinsic forms of loads aren't currently known to 687 // MemoryLocation::get. TODO 688 return false; 689 MemoryLocation MemLoc = *MemLocOpt; 690 if (!AvailableInvariants.count(MemLoc)) 691 return false; 692 693 // Is the generation at which this became invariant older than the 694 // current one? 695 return AvailableInvariants.lookup(MemLoc) <= GenAt; 696 } 697 698 bool EarlyCSE::processNode(DomTreeNode *Node) { 699 bool Changed = false; 700 BasicBlock *BB = Node->getBlock(); 701 702 // If this block has a single predecessor, then the predecessor is the parent 703 // of the domtree node and all of the live out memory values are still current 704 // in this block. If this block has multiple predecessors, then they could 705 // have invalidated the live-out memory values of our parent value. For now, 706 // just be conservative and invalidate memory if this block has multiple 707 // predecessors. 708 if (!BB->getSinglePredecessor()) 709 ++CurrentGeneration; 710 711 // If this node has a single predecessor which ends in a conditional branch, 712 // we can infer the value of the branch condition given that we took this 713 // path. We need the single predecessor to ensure there's not another path 714 // which reaches this block where the condition might hold a different 715 // value. Since we're adding this to the scoped hash table (like any other 716 // def), it will have been popped if we encounter a future merge block. 717 if (BasicBlock *Pred = BB->getSinglePredecessor()) { 718 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()); 719 if (BI && BI->isConditional()) { 720 auto *CondInst = dyn_cast<Instruction>(BI->getCondition()); 721 if (CondInst && SimpleValue::canHandle(CondInst)) { 722 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); 723 auto *TorF = (BI->getSuccessor(0) == BB) 724 ? ConstantInt::getTrue(BB->getContext()) 725 : ConstantInt::getFalse(BB->getContext()); 726 AvailableValues.insert(CondInst, TorF); 727 DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" 728 << CondInst->getName() << "' as " << *TorF << " in " 729 << BB->getName() << "\n"); 730 // Replace all dominated uses with the known value. 731 if (unsigned Count = replaceDominatedUsesWith( 732 CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) { 733 Changed = true; 734 NumCSECVP += Count; 735 } 736 } 737 } 738 } 739 740 /// LastStore - Keep track of the last non-volatile store that we saw... for 741 /// as long as there in no instruction that reads memory. If we see a store 742 /// to the same location, we delete the dead store. This zaps trivial dead 743 /// stores which can occur in bitfield code among other things. 744 Instruction *LastStore = nullptr; 745 746 // See if any instructions in the block can be eliminated. If so, do it. If 747 // not, add them to AvailableValues. 748 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 749 Instruction *Inst = &*I++; 750 751 // Dead instructions should just be removed. 752 if (isInstructionTriviallyDead(Inst, &TLI)) { 753 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 754 salvageDebugInfo(*Inst); 755 removeMSSA(Inst); 756 Inst->eraseFromParent(); 757 Changed = true; 758 ++NumSimplify; 759 continue; 760 } 761 762 // Skip assume intrinsics, they don't really have side effects (although 763 // they're marked as such to ensure preservation of control dependencies), 764 // and this pass will not bother with its removal. However, we should mark 765 // its condition as true for all dominated blocks. 766 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { 767 auto *CondI = 768 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0)); 769 if (CondI && SimpleValue::canHandle(CondI)) { 770 DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n'); 771 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 772 } else 773 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); 774 continue; 775 } 776 777 // Skip sideeffect intrinsics, for the same reason as assume intrinsics. 778 if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) { 779 DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n'); 780 continue; 781 } 782 783 // We can skip all invariant.start intrinsics since they only read memory, 784 // and we can forward values across it. For invariant starts without 785 // invariant ends, we can use the fact that the invariantness never ends to 786 // start a scope in the current generaton which is true for all future 787 // generations. Also, we dont need to consume the last store since the 788 // semantics of invariant.start allow us to perform DSE of the last 789 // store, if there was a store following invariant.start. Consider: 790 // 791 // store 30, i8* p 792 // invariant.start(p) 793 // store 40, i8* p 794 // We can DSE the store to 30, since the store 40 to invariant location p 795 // causes undefined behaviour. 796 if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) { 797 // If there are any uses, the scope might end. 798 if (!Inst->use_empty()) 799 continue; 800 auto *CI = cast<CallInst>(Inst); 801 MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI); 802 // Don't start a scope if we already have a better one pushed 803 if (!AvailableInvariants.count(MemLoc)) 804 AvailableInvariants.insert(MemLoc, CurrentGeneration); 805 continue; 806 } 807 808 if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) { 809 if (auto *CondI = 810 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) { 811 if (SimpleValue::canHandle(CondI)) { 812 // Do we already know the actual value of this condition? 813 if (auto *KnownCond = AvailableValues.lookup(CondI)) { 814 // Is the condition known to be true? 815 if (isa<ConstantInt>(KnownCond) && 816 cast<ConstantInt>(KnownCond)->isOne()) { 817 DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n'); 818 removeMSSA(Inst); 819 Inst->eraseFromParent(); 820 Changed = true; 821 continue; 822 } else 823 // Use the known value if it wasn't true. 824 cast<CallInst>(Inst)->setArgOperand(0, KnownCond); 825 } 826 // The condition we're on guarding here is true for all dominated 827 // locations. 828 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 829 } 830 } 831 832 // Guard intrinsics read all memory, but don't write any memory. 833 // Accordingly, don't update the generation but consume the last store (to 834 // avoid an incorrect DSE). 835 LastStore = nullptr; 836 continue; 837 } 838 839 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 840 // its simpler value. 841 if (Value *V = SimplifyInstruction(Inst, SQ)) { 842 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 843 bool Killed = false; 844 if (!Inst->use_empty()) { 845 Inst->replaceAllUsesWith(V); 846 Changed = true; 847 } 848 if (isInstructionTriviallyDead(Inst, &TLI)) { 849 removeMSSA(Inst); 850 Inst->eraseFromParent(); 851 Changed = true; 852 Killed = true; 853 } 854 if (Changed) 855 ++NumSimplify; 856 if (Killed) 857 continue; 858 } 859 860 // If this is a simple instruction that we can value number, process it. 861 if (SimpleValue::canHandle(Inst)) { 862 // See if the instruction has an available value. If so, use it. 863 if (Value *V = AvailableValues.lookup(Inst)) { 864 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 865 if (auto *I = dyn_cast<Instruction>(V)) 866 I->andIRFlags(Inst); 867 Inst->replaceAllUsesWith(V); 868 removeMSSA(Inst); 869 Inst->eraseFromParent(); 870 Changed = true; 871 ++NumCSE; 872 continue; 873 } 874 875 // Otherwise, just remember that this value is available. 876 AvailableValues.insert(Inst, Inst); 877 continue; 878 } 879 880 ParseMemoryInst MemInst(Inst, TTI); 881 // If this is a non-volatile load, process it. 882 if (MemInst.isValid() && MemInst.isLoad()) { 883 // (conservatively) we can't peak past the ordering implied by this 884 // operation, but we can add this load to our set of available values 885 if (MemInst.isVolatile() || !MemInst.isUnordered()) { 886 LastStore = nullptr; 887 ++CurrentGeneration; 888 } 889 890 if (MemInst.isInvariantLoad()) { 891 // If we pass an invariant load, we know that memory location is 892 // indefinitely constant from the moment of first dereferenceability. 893 // We conservatively treat the invariant_load as that moment. If we 894 // pass a invariant load after already establishing a scope, don't 895 // restart it since we want to preserve the earliest point seen. 896 auto MemLoc = MemoryLocation::get(Inst); 897 if (!AvailableInvariants.count(MemLoc)) 898 AvailableInvariants.insert(MemLoc, CurrentGeneration); 899 } 900 901 // If we have an available version of this load, and if it is the right 902 // generation or the load is known to be from an invariant location, 903 // replace this instruction. 904 // 905 // If either the dominating load or the current load are invariant, then 906 // we can assume the current load loads the same value as the dominating 907 // load. 908 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 909 if (InVal.DefInst != nullptr && 910 InVal.MatchingId == MemInst.getMatchingId() && 911 // We don't yet handle removing loads with ordering of any kind. 912 !MemInst.isVolatile() && MemInst.isUnordered() && 913 // We can't replace an atomic load with one which isn't also atomic. 914 InVal.IsAtomic >= MemInst.isAtomic() && 915 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) || 916 isSameMemGeneration(InVal.Generation, CurrentGeneration, 917 InVal.DefInst, Inst))) { 918 Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType()); 919 if (Op != nullptr) { 920 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst 921 << " to: " << *InVal.DefInst << '\n'); 922 if (!Inst->use_empty()) 923 Inst->replaceAllUsesWith(Op); 924 removeMSSA(Inst); 925 Inst->eraseFromParent(); 926 Changed = true; 927 ++NumCSELoad; 928 continue; 929 } 930 } 931 932 // Otherwise, remember that we have this instruction. 933 AvailableLoads.insert( 934 MemInst.getPointerOperand(), 935 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 936 MemInst.isAtomic())); 937 LastStore = nullptr; 938 continue; 939 } 940 941 // If this instruction may read from memory or throw (and potentially read 942 // from memory in the exception handler), forget LastStore. Load/store 943 // intrinsics will indicate both a read and a write to memory. The target 944 // may override this (e.g. so that a store intrinsic does not read from 945 // memory, and thus will be treated the same as a regular store for 946 // commoning purposes). 947 if ((Inst->mayReadFromMemory() || Inst->mayThrow()) && 948 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 949 LastStore = nullptr; 950 951 // If this is a read-only call, process it. 952 if (CallValue::canHandle(Inst)) { 953 // If we have an available version of this call, and if it is the right 954 // generation, replace this instruction. 955 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst); 956 if (InVal.first != nullptr && 957 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first, 958 Inst)) { 959 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst 960 << " to: " << *InVal.first << '\n'); 961 if (!Inst->use_empty()) 962 Inst->replaceAllUsesWith(InVal.first); 963 removeMSSA(Inst); 964 Inst->eraseFromParent(); 965 Changed = true; 966 ++NumCSECall; 967 continue; 968 } 969 970 // Otherwise, remember that we have this instruction. 971 AvailableCalls.insert( 972 Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration)); 973 continue; 974 } 975 976 // A release fence requires that all stores complete before it, but does 977 // not prevent the reordering of following loads 'before' the fence. As a 978 // result, we don't need to consider it as writing to memory and don't need 979 // to advance the generation. We do need to prevent DSE across the fence, 980 // but that's handled above. 981 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 982 if (FI->getOrdering() == AtomicOrdering::Release) { 983 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above"); 984 continue; 985 } 986 987 // write back DSE - If we write back the same value we just loaded from 988 // the same location and haven't passed any intervening writes or ordering 989 // operations, we can remove the write. The primary benefit is in allowing 990 // the available load table to remain valid and value forward past where 991 // the store originally was. 992 if (MemInst.isValid() && MemInst.isStore()) { 993 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 994 if (InVal.DefInst && 995 InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) && 996 InVal.MatchingId == MemInst.getMatchingId() && 997 // We don't yet handle removing stores with ordering of any kind. 998 !MemInst.isVolatile() && MemInst.isUnordered() && 999 (isOperatingOnInvariantMemAt(Inst, InVal.Generation) || 1000 isSameMemGeneration(InVal.Generation, CurrentGeneration, 1001 InVal.DefInst, Inst))) { 1002 // It is okay to have a LastStore to a different pointer here if MemorySSA 1003 // tells us that the load and store are from the same memory generation. 1004 // In that case, LastStore should keep its present value since we're 1005 // removing the current store. 1006 assert((!LastStore || 1007 ParseMemoryInst(LastStore, TTI).getPointerOperand() == 1008 MemInst.getPointerOperand() || 1009 MSSA) && 1010 "can't have an intervening store if not using MemorySSA!"); 1011 DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n'); 1012 removeMSSA(Inst); 1013 Inst->eraseFromParent(); 1014 Changed = true; 1015 ++NumDSE; 1016 // We can avoid incrementing the generation count since we were able 1017 // to eliminate this store. 1018 continue; 1019 } 1020 } 1021 1022 // Okay, this isn't something we can CSE at all. Check to see if it is 1023 // something that could modify memory. If so, our available memory values 1024 // cannot be used so bump the generation count. 1025 if (Inst->mayWriteToMemory()) { 1026 ++CurrentGeneration; 1027 1028 if (MemInst.isValid() && MemInst.isStore()) { 1029 // We do a trivial form of DSE if there are two stores to the same 1030 // location with no intervening loads. Delete the earlier store. 1031 // At the moment, we don't remove ordered stores, but do remove 1032 // unordered atomic stores. There's no special requirement (for 1033 // unordered atomics) about removing atomic stores only in favor of 1034 // other atomic stores since we we're going to execute the non-atomic 1035 // one anyway and the atomic one might never have become visible. 1036 if (LastStore) { 1037 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 1038 assert(LastStoreMemInst.isUnordered() && 1039 !LastStoreMemInst.isVolatile() && 1040 "Violated invariant"); 1041 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 1042 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 1043 << " due to: " << *Inst << '\n'); 1044 removeMSSA(LastStore); 1045 LastStore->eraseFromParent(); 1046 Changed = true; 1047 ++NumDSE; 1048 LastStore = nullptr; 1049 } 1050 // fallthrough - we can exploit information about this store 1051 } 1052 1053 // Okay, we just invalidated anything we knew about loaded values. Try 1054 // to salvage *something* by remembering that the stored value is a live 1055 // version of the pointer. It is safe to forward from volatile stores 1056 // to non-volatile loads, so we don't have to check for volatility of 1057 // the store. 1058 AvailableLoads.insert( 1059 MemInst.getPointerOperand(), 1060 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 1061 MemInst.isAtomic())); 1062 1063 // Remember that this was the last unordered store we saw for DSE. We 1064 // don't yet handle DSE on ordered or volatile stores since we don't 1065 // have a good way to model the ordering requirement for following 1066 // passes once the store is removed. We could insert a fence, but 1067 // since fences are slightly stronger than stores in their ordering, 1068 // it's not clear this is a profitable transform. Another option would 1069 // be to merge the ordering with that of the post dominating store. 1070 if (MemInst.isUnordered() && !MemInst.isVolatile()) 1071 LastStore = Inst; 1072 else 1073 LastStore = nullptr; 1074 } 1075 } 1076 } 1077 1078 return Changed; 1079 } 1080 1081 bool EarlyCSE::run() { 1082 // Note, deque is being used here because there is significant performance 1083 // gains over vector when the container becomes very large due to the 1084 // specific access patterns. For more information see the mailing list 1085 // discussion on this: 1086 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 1087 std::deque<StackNode *> nodesToProcess; 1088 1089 bool Changed = false; 1090 1091 // Process the root node. 1092 nodesToProcess.push_back(new StackNode( 1093 AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls, 1094 CurrentGeneration, DT.getRootNode(), 1095 DT.getRootNode()->begin(), DT.getRootNode()->end())); 1096 1097 // Save the current generation. 1098 unsigned LiveOutGeneration = CurrentGeneration; 1099 1100 // Process the stack. 1101 while (!nodesToProcess.empty()) { 1102 // Grab the first item off the stack. Set the current generation, remove 1103 // the node from the stack, and process it. 1104 StackNode *NodeToProcess = nodesToProcess.back(); 1105 1106 // Initialize class members. 1107 CurrentGeneration = NodeToProcess->currentGeneration(); 1108 1109 // Check if the node needs to be processed. 1110 if (!NodeToProcess->isProcessed()) { 1111 // Process the node. 1112 Changed |= processNode(NodeToProcess->node()); 1113 NodeToProcess->childGeneration(CurrentGeneration); 1114 NodeToProcess->process(); 1115 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 1116 // Push the next child onto the stack. 1117 DomTreeNode *child = NodeToProcess->nextChild(); 1118 nodesToProcess.push_back( 1119 new StackNode(AvailableValues, AvailableLoads, AvailableInvariants, 1120 AvailableCalls, NodeToProcess->childGeneration(), 1121 child, child->begin(), child->end())); 1122 } else { 1123 // It has been processed, and there are no more children to process, 1124 // so delete it and pop it off the stack. 1125 delete NodeToProcess; 1126 nodesToProcess.pop_back(); 1127 } 1128 } // while (!nodes...) 1129 1130 // Reset the current generation. 1131 CurrentGeneration = LiveOutGeneration; 1132 1133 return Changed; 1134 } 1135 1136 PreservedAnalyses EarlyCSEPass::run(Function &F, 1137 FunctionAnalysisManager &AM) { 1138 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 1139 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 1140 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 1141 auto &AC = AM.getResult<AssumptionAnalysis>(F); 1142 auto *MSSA = 1143 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr; 1144 1145 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1146 1147 if (!CSE.run()) 1148 return PreservedAnalyses::all(); 1149 1150 PreservedAnalyses PA; 1151 PA.preserveSet<CFGAnalyses>(); 1152 PA.preserve<GlobalsAA>(); 1153 if (UseMemorySSA) 1154 PA.preserve<MemorySSAAnalysis>(); 1155 return PA; 1156 } 1157 1158 namespace { 1159 1160 /// \brief A simple and fast domtree-based CSE pass. 1161 /// 1162 /// This pass does a simple depth-first walk over the dominator tree, 1163 /// eliminating trivially redundant instructions and using instsimplify to 1164 /// canonicalize things as it goes. It is intended to be fast and catch obvious 1165 /// cases so that instcombine and other passes are more effective. It is 1166 /// expected that a later pass of GVN will catch the interesting/hard cases. 1167 template<bool UseMemorySSA> 1168 class EarlyCSELegacyCommonPass : public FunctionPass { 1169 public: 1170 static char ID; 1171 1172 EarlyCSELegacyCommonPass() : FunctionPass(ID) { 1173 if (UseMemorySSA) 1174 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry()); 1175 else 1176 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 1177 } 1178 1179 bool runOnFunction(Function &F) override { 1180 if (skipFunction(F)) 1181 return false; 1182 1183 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1184 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1185 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1186 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1187 auto *MSSA = 1188 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr; 1189 1190 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1191 1192 return CSE.run(); 1193 } 1194 1195 void getAnalysisUsage(AnalysisUsage &AU) const override { 1196 AU.addRequired<AssumptionCacheTracker>(); 1197 AU.addRequired<DominatorTreeWrapperPass>(); 1198 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1199 AU.addRequired<TargetTransformInfoWrapperPass>(); 1200 if (UseMemorySSA) { 1201 AU.addRequired<MemorySSAWrapperPass>(); 1202 AU.addPreserved<MemorySSAWrapperPass>(); 1203 } 1204 AU.addPreserved<GlobalsAAWrapperPass>(); 1205 AU.setPreservesCFG(); 1206 } 1207 }; 1208 1209 } // end anonymous namespace 1210 1211 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>; 1212 1213 template<> 1214 char EarlyCSELegacyPass::ID = 0; 1215 1216 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 1217 false) 1218 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1219 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1220 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1221 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1222 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 1223 1224 using EarlyCSEMemSSALegacyPass = 1225 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>; 1226 1227 template<> 1228 char EarlyCSEMemSSALegacyPass::ID = 0; 1229 1230 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) { 1231 if (UseMemorySSA) 1232 return new EarlyCSEMemSSALegacyPass(); 1233 else 1234 return new EarlyCSELegacyPass(); 1235 } 1236 1237 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1238 "Early CSE w/ MemorySSA", false, false) 1239 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1240 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1241 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1242 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1243 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 1244 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1245 "Early CSE w/ MemorySSA", false, false) 1246