1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/EarlyCSE.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/ScopedHashTable.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/GlobalsModRef.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/MemorySSA.h" 23 #include "llvm/Analysis/MemorySSAUpdater.h" 24 #include "llvm/Analysis/TargetLibraryInfo.h" 25 #include "llvm/Analysis/TargetTransformInfo.h" 26 #include "llvm/IR/DataLayout.h" 27 #include "llvm/IR/Dominators.h" 28 #include "llvm/IR/Instructions.h" 29 #include "llvm/IR/IntrinsicInst.h" 30 #include "llvm/IR/PatternMatch.h" 31 #include "llvm/Pass.h" 32 #include "llvm/Support/Debug.h" 33 #include "llvm/Support/RecyclingAllocator.h" 34 #include "llvm/Support/raw_ostream.h" 35 #include "llvm/Transforms/Scalar.h" 36 #include "llvm/Transforms/Utils/Local.h" 37 #include <deque> 38 using namespace llvm; 39 using namespace llvm::PatternMatch; 40 41 #define DEBUG_TYPE "early-cse" 42 43 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 44 STATISTIC(NumCSE, "Number of instructions CSE'd"); 45 STATISTIC(NumCSECVP, "Number of compare instructions CVP'd"); 46 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 47 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 48 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 49 50 //===----------------------------------------------------------------------===// 51 // SimpleValue 52 //===----------------------------------------------------------------------===// 53 54 namespace { 55 /// \brief Struct representing the available values in the scoped hash table. 56 struct SimpleValue { 57 Instruction *Inst; 58 59 SimpleValue(Instruction *I) : Inst(I) { 60 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 61 } 62 63 bool isSentinel() const { 64 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 65 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 66 } 67 68 static bool canHandle(Instruction *Inst) { 69 // This can only handle non-void readnone functions. 70 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 71 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 72 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 73 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 74 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 75 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 76 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 77 } 78 }; 79 } 80 81 namespace llvm { 82 template <> struct DenseMapInfo<SimpleValue> { 83 static inline SimpleValue getEmptyKey() { 84 return DenseMapInfo<Instruction *>::getEmptyKey(); 85 } 86 static inline SimpleValue getTombstoneKey() { 87 return DenseMapInfo<Instruction *>::getTombstoneKey(); 88 } 89 static unsigned getHashValue(SimpleValue Val); 90 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 91 }; 92 } 93 94 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 95 Instruction *Inst = Val.Inst; 96 // Hash in all of the operands as pointers. 97 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 98 Value *LHS = BinOp->getOperand(0); 99 Value *RHS = BinOp->getOperand(1); 100 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 101 std::swap(LHS, RHS); 102 103 return hash_combine(BinOp->getOpcode(), LHS, RHS); 104 } 105 106 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 107 Value *LHS = CI->getOperand(0); 108 Value *RHS = CI->getOperand(1); 109 CmpInst::Predicate Pred = CI->getPredicate(); 110 if (Inst->getOperand(0) > Inst->getOperand(1)) { 111 std::swap(LHS, RHS); 112 Pred = CI->getSwappedPredicate(); 113 } 114 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 115 } 116 117 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 118 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 119 120 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 121 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 122 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 123 124 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 125 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 126 IVI->getOperand(1), 127 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 128 129 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 130 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 131 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 132 isa<ShuffleVectorInst>(Inst)) && 133 "Invalid/unknown instruction"); 134 135 // Mix in the opcode. 136 return hash_combine( 137 Inst->getOpcode(), 138 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 139 } 140 141 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 142 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 143 144 if (LHS.isSentinel() || RHS.isSentinel()) 145 return LHSI == RHSI; 146 147 if (LHSI->getOpcode() != RHSI->getOpcode()) 148 return false; 149 if (LHSI->isIdenticalToWhenDefined(RHSI)) 150 return true; 151 152 // If we're not strictly identical, we still might be a commutable instruction 153 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 154 if (!LHSBinOp->isCommutative()) 155 return false; 156 157 assert(isa<BinaryOperator>(RHSI) && 158 "same opcode, but different instruction type?"); 159 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 160 161 // Commuted equality 162 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 163 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 164 } 165 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 166 assert(isa<CmpInst>(RHSI) && 167 "same opcode, but different instruction type?"); 168 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 169 // Commuted equality 170 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 171 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 172 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 173 } 174 175 return false; 176 } 177 178 //===----------------------------------------------------------------------===// 179 // CallValue 180 //===----------------------------------------------------------------------===// 181 182 namespace { 183 /// \brief Struct representing the available call values in the scoped hash 184 /// table. 185 struct CallValue { 186 Instruction *Inst; 187 188 CallValue(Instruction *I) : Inst(I) { 189 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 190 } 191 192 bool isSentinel() const { 193 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 194 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 195 } 196 197 static bool canHandle(Instruction *Inst) { 198 // Don't value number anything that returns void. 199 if (Inst->getType()->isVoidTy()) 200 return false; 201 202 CallInst *CI = dyn_cast<CallInst>(Inst); 203 if (!CI || !CI->onlyReadsMemory()) 204 return false; 205 return true; 206 } 207 }; 208 } 209 210 namespace llvm { 211 template <> struct DenseMapInfo<CallValue> { 212 static inline CallValue getEmptyKey() { 213 return DenseMapInfo<Instruction *>::getEmptyKey(); 214 } 215 static inline CallValue getTombstoneKey() { 216 return DenseMapInfo<Instruction *>::getTombstoneKey(); 217 } 218 static unsigned getHashValue(CallValue Val); 219 static bool isEqual(CallValue LHS, CallValue RHS); 220 }; 221 } 222 223 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 224 Instruction *Inst = Val.Inst; 225 // Hash all of the operands as pointers and mix in the opcode. 226 return hash_combine( 227 Inst->getOpcode(), 228 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 229 } 230 231 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 232 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 233 if (LHS.isSentinel() || RHS.isSentinel()) 234 return LHSI == RHSI; 235 return LHSI->isIdenticalTo(RHSI); 236 } 237 238 //===----------------------------------------------------------------------===// 239 // EarlyCSE implementation 240 //===----------------------------------------------------------------------===// 241 242 namespace { 243 /// \brief A simple and fast domtree-based CSE pass. 244 /// 245 /// This pass does a simple depth-first walk over the dominator tree, 246 /// eliminating trivially redundant instructions and using instsimplify to 247 /// canonicalize things as it goes. It is intended to be fast and catch obvious 248 /// cases so that instcombine and other passes are more effective. It is 249 /// expected that a later pass of GVN will catch the interesting/hard cases. 250 class EarlyCSE { 251 public: 252 const TargetLibraryInfo &TLI; 253 const TargetTransformInfo &TTI; 254 DominatorTree &DT; 255 AssumptionCache &AC; 256 const SimplifyQuery SQ; 257 MemorySSA *MSSA; 258 std::unique_ptr<MemorySSAUpdater> MSSAUpdater; 259 typedef RecyclingAllocator< 260 BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy; 261 typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 262 AllocatorTy> ScopedHTType; 263 264 /// \brief A scoped hash table of the current values of all of our simple 265 /// scalar expressions. 266 /// 267 /// As we walk down the domtree, we look to see if instructions are in this: 268 /// if so, we replace them with what we find, otherwise we insert them so 269 /// that dominated values can succeed in their lookup. 270 ScopedHTType AvailableValues; 271 272 /// A scoped hash table of the current values of previously encounted memory 273 /// locations. 274 /// 275 /// This allows us to get efficient access to dominating loads or stores when 276 /// we have a fully redundant load. In addition to the most recent load, we 277 /// keep track of a generation count of the read, which is compared against 278 /// the current generation count. The current generation count is incremented 279 /// after every possibly writing memory operation, which ensures that we only 280 /// CSE loads with other loads that have no intervening store. Ordering 281 /// events (such as fences or atomic instructions) increment the generation 282 /// count as well; essentially, we model these as writes to all possible 283 /// locations. Note that atomic and/or volatile loads and stores can be 284 /// present the table; it is the responsibility of the consumer to inspect 285 /// the atomicity/volatility if needed. 286 struct LoadValue { 287 Instruction *DefInst; 288 unsigned Generation; 289 int MatchingId; 290 bool IsAtomic; 291 bool IsInvariant; 292 LoadValue() 293 : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false), 294 IsInvariant(false) {} 295 LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId, 296 bool IsAtomic, bool IsInvariant) 297 : DefInst(Inst), Generation(Generation), MatchingId(MatchingId), 298 IsAtomic(IsAtomic), IsInvariant(IsInvariant) {} 299 }; 300 typedef RecyclingAllocator<BumpPtrAllocator, 301 ScopedHashTableVal<Value *, LoadValue>> 302 LoadMapAllocator; 303 typedef ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>, 304 LoadMapAllocator> LoadHTType; 305 LoadHTType AvailableLoads; 306 307 /// \brief A scoped hash table of the current values of read-only call 308 /// values. 309 /// 310 /// It uses the same generation count as loads. 311 typedef ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>> 312 CallHTType; 313 CallHTType AvailableCalls; 314 315 /// \brief This is the current generation of the memory value. 316 unsigned CurrentGeneration; 317 318 /// \brief Set up the EarlyCSE runner for a particular function. 319 EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI, 320 const TargetTransformInfo &TTI, DominatorTree &DT, 321 AssumptionCache &AC, MemorySSA *MSSA) 322 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA), 323 MSSAUpdater(make_unique<MemorySSAUpdater>(MSSA)), CurrentGeneration(0) { 324 } 325 326 bool run(); 327 328 private: 329 // Almost a POD, but needs to call the constructors for the scoped hash 330 // tables so that a new scope gets pushed on. These are RAII so that the 331 // scope gets popped when the NodeScope is destroyed. 332 class NodeScope { 333 public: 334 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 335 CallHTType &AvailableCalls) 336 : Scope(AvailableValues), LoadScope(AvailableLoads), 337 CallScope(AvailableCalls) {} 338 339 private: 340 NodeScope(const NodeScope &) = delete; 341 void operator=(const NodeScope &) = delete; 342 343 ScopedHTType::ScopeTy Scope; 344 LoadHTType::ScopeTy LoadScope; 345 CallHTType::ScopeTy CallScope; 346 }; 347 348 // Contains all the needed information to create a stack for doing a depth 349 // first traversal of the tree. This includes scopes for values, loads, and 350 // calls as well as the generation. There is a child iterator so that the 351 // children do not need to be store separately. 352 class StackNode { 353 public: 354 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 355 CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n, 356 DomTreeNode::iterator child, DomTreeNode::iterator end) 357 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 358 EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls), 359 Processed(false) {} 360 361 // Accessors. 362 unsigned currentGeneration() { return CurrentGeneration; } 363 unsigned childGeneration() { return ChildGeneration; } 364 void childGeneration(unsigned generation) { ChildGeneration = generation; } 365 DomTreeNode *node() { return Node; } 366 DomTreeNode::iterator childIter() { return ChildIter; } 367 DomTreeNode *nextChild() { 368 DomTreeNode *child = *ChildIter; 369 ++ChildIter; 370 return child; 371 } 372 DomTreeNode::iterator end() { return EndIter; } 373 bool isProcessed() { return Processed; } 374 void process() { Processed = true; } 375 376 private: 377 StackNode(const StackNode &) = delete; 378 void operator=(const StackNode &) = delete; 379 380 // Members. 381 unsigned CurrentGeneration; 382 unsigned ChildGeneration; 383 DomTreeNode *Node; 384 DomTreeNode::iterator ChildIter; 385 DomTreeNode::iterator EndIter; 386 NodeScope Scopes; 387 bool Processed; 388 }; 389 390 /// \brief Wrapper class to handle memory instructions, including loads, 391 /// stores and intrinsic loads and stores defined by the target. 392 class ParseMemoryInst { 393 public: 394 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 395 : IsTargetMemInst(false), Inst(Inst) { 396 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 397 if (TTI.getTgtMemIntrinsic(II, Info)) 398 IsTargetMemInst = true; 399 } 400 bool isLoad() const { 401 if (IsTargetMemInst) return Info.ReadMem; 402 return isa<LoadInst>(Inst); 403 } 404 bool isStore() const { 405 if (IsTargetMemInst) return Info.WriteMem; 406 return isa<StoreInst>(Inst); 407 } 408 bool isAtomic() const { 409 if (IsTargetMemInst) 410 return Info.Ordering != AtomicOrdering::NotAtomic; 411 return Inst->isAtomic(); 412 } 413 bool isUnordered() const { 414 if (IsTargetMemInst) 415 return Info.isUnordered(); 416 417 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 418 return LI->isUnordered(); 419 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 420 return SI->isUnordered(); 421 } 422 // Conservative answer 423 return !Inst->isAtomic(); 424 } 425 426 bool isVolatile() const { 427 if (IsTargetMemInst) 428 return Info.IsVolatile; 429 430 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 431 return LI->isVolatile(); 432 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 433 return SI->isVolatile(); 434 } 435 // Conservative answer 436 return true; 437 } 438 439 bool isInvariantLoad() const { 440 if (auto *LI = dyn_cast<LoadInst>(Inst)) 441 return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr; 442 return false; 443 } 444 445 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const { 446 return (getPointerOperand() == Inst.getPointerOperand() && 447 getMatchingId() == Inst.getMatchingId()); 448 } 449 bool isValid() const { return getPointerOperand() != nullptr; } 450 451 // For regular (non-intrinsic) loads/stores, this is set to -1. For 452 // intrinsic loads/stores, the id is retrieved from the corresponding 453 // field in the MemIntrinsicInfo structure. That field contains 454 // non-negative values only. 455 int getMatchingId() const { 456 if (IsTargetMemInst) return Info.MatchingId; 457 return -1; 458 } 459 Value *getPointerOperand() const { 460 if (IsTargetMemInst) return Info.PtrVal; 461 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 462 return LI->getPointerOperand(); 463 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 464 return SI->getPointerOperand(); 465 } 466 return nullptr; 467 } 468 bool mayReadFromMemory() const { 469 if (IsTargetMemInst) return Info.ReadMem; 470 return Inst->mayReadFromMemory(); 471 } 472 bool mayWriteToMemory() const { 473 if (IsTargetMemInst) return Info.WriteMem; 474 return Inst->mayWriteToMemory(); 475 } 476 477 private: 478 bool IsTargetMemInst; 479 MemIntrinsicInfo Info; 480 Instruction *Inst; 481 }; 482 483 bool processNode(DomTreeNode *Node); 484 485 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 486 if (auto *LI = dyn_cast<LoadInst>(Inst)) 487 return LI; 488 if (auto *SI = dyn_cast<StoreInst>(Inst)) 489 return SI->getValueOperand(); 490 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 491 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 492 ExpectedType); 493 } 494 495 bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration, 496 Instruction *EarlierInst, Instruction *LaterInst); 497 498 void removeMSSA(Instruction *Inst) { 499 if (!MSSA) 500 return; 501 // Removing a store here can leave MemorySSA in an unoptimized state by 502 // creating MemoryPhis that have identical arguments and by creating 503 // MemoryUses whose defining access is not an actual clobber. We handle the 504 // phi case eagerly here. The non-optimized MemoryUse case is lazily 505 // updated by MemorySSA getClobberingMemoryAccess. 506 if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) { 507 // Optimize MemoryPhi nodes that may become redundant by having all the 508 // same input values once MA is removed. 509 SmallVector<MemoryPhi *, 4> PhisToCheck; 510 SmallVector<MemoryAccess *, 8> WorkQueue; 511 WorkQueue.push_back(MA); 512 // Process MemoryPhi nodes in FIFO order using a ever-growing vector since 513 // we shouldn't be processing that many phis and this will avoid an 514 // allocation in almost all cases. 515 for (unsigned I = 0; I < WorkQueue.size(); ++I) { 516 MemoryAccess *WI = WorkQueue[I]; 517 518 for (auto *U : WI->users()) 519 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U)) 520 PhisToCheck.push_back(MP); 521 522 MSSAUpdater->removeMemoryAccess(WI); 523 524 for (MemoryPhi *MP : PhisToCheck) { 525 MemoryAccess *FirstIn = MP->getIncomingValue(0); 526 if (all_of(MP->incoming_values(), 527 [=](Use &In) { return In == FirstIn; })) 528 WorkQueue.push_back(MP); 529 } 530 PhisToCheck.clear(); 531 } 532 } 533 } 534 }; 535 } 536 537 /// Determine if the memory referenced by LaterInst is from the same heap 538 /// version as EarlierInst. 539 /// This is currently called in two scenarios: 540 /// 541 /// load p 542 /// ... 543 /// load p 544 /// 545 /// and 546 /// 547 /// x = load p 548 /// ... 549 /// store x, p 550 /// 551 /// in both cases we want to verify that there are no possible writes to the 552 /// memory referenced by p between the earlier and later instruction. 553 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration, 554 unsigned LaterGeneration, 555 Instruction *EarlierInst, 556 Instruction *LaterInst) { 557 // Check the simple memory generation tracking first. 558 if (EarlierGeneration == LaterGeneration) 559 return true; 560 561 if (!MSSA) 562 return false; 563 564 // Since we know LaterDef dominates LaterInst and EarlierInst dominates 565 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between 566 // EarlierInst and LaterInst and neither can any other write that potentially 567 // clobbers LaterInst. 568 MemoryAccess *LaterDef = 569 MSSA->getWalker()->getClobberingMemoryAccess(LaterInst); 570 return MSSA->dominates(LaterDef, MSSA->getMemoryAccess(EarlierInst)); 571 } 572 573 bool EarlyCSE::processNode(DomTreeNode *Node) { 574 bool Changed = false; 575 BasicBlock *BB = Node->getBlock(); 576 577 // If this block has a single predecessor, then the predecessor is the parent 578 // of the domtree node and all of the live out memory values are still current 579 // in this block. If this block has multiple predecessors, then they could 580 // have invalidated the live-out memory values of our parent value. For now, 581 // just be conservative and invalidate memory if this block has multiple 582 // predecessors. 583 if (!BB->getSinglePredecessor()) 584 ++CurrentGeneration; 585 586 // If this node has a single predecessor which ends in a conditional branch, 587 // we can infer the value of the branch condition given that we took this 588 // path. We need the single predecessor to ensure there's not another path 589 // which reaches this block where the condition might hold a different 590 // value. Since we're adding this to the scoped hash table (like any other 591 // def), it will have been popped if we encounter a future merge block. 592 if (BasicBlock *Pred = BB->getSinglePredecessor()) { 593 auto *BI = dyn_cast<BranchInst>(Pred->getTerminator()); 594 if (BI && BI->isConditional()) { 595 auto *CondInst = dyn_cast<Instruction>(BI->getCondition()); 596 if (CondInst && SimpleValue::canHandle(CondInst)) { 597 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); 598 auto *TorF = (BI->getSuccessor(0) == BB) 599 ? ConstantInt::getTrue(BB->getContext()) 600 : ConstantInt::getFalse(BB->getContext()); 601 AvailableValues.insert(CondInst, TorF); 602 DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" 603 << CondInst->getName() << "' as " << *TorF << " in " 604 << BB->getName() << "\n"); 605 // Replace all dominated uses with the known value. 606 if (unsigned Count = replaceDominatedUsesWith( 607 CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) { 608 Changed = true; 609 NumCSECVP = NumCSECVP + Count; 610 } 611 } 612 } 613 } 614 615 /// LastStore - Keep track of the last non-volatile store that we saw... for 616 /// as long as there in no instruction that reads memory. If we see a store 617 /// to the same location, we delete the dead store. This zaps trivial dead 618 /// stores which can occur in bitfield code among other things. 619 Instruction *LastStore = nullptr; 620 621 // See if any instructions in the block can be eliminated. If so, do it. If 622 // not, add them to AvailableValues. 623 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 624 Instruction *Inst = &*I++; 625 626 // Dead instructions should just be removed. 627 if (isInstructionTriviallyDead(Inst, &TLI)) { 628 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 629 removeMSSA(Inst); 630 Inst->eraseFromParent(); 631 Changed = true; 632 ++NumSimplify; 633 continue; 634 } 635 636 // Skip assume intrinsics, they don't really have side effects (although 637 // they're marked as such to ensure preservation of control dependencies), 638 // and this pass will not bother with its removal. However, we should mark 639 // its condition as true for all dominated blocks. 640 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { 641 auto *CondI = 642 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0)); 643 if (CondI && SimpleValue::canHandle(CondI)) { 644 DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n'); 645 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 646 } else 647 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); 648 continue; 649 } 650 651 // Skip invariant.start intrinsics since they only read memory, and we can 652 // forward values across it. Also, we dont need to consume the last store 653 // since the semantics of invariant.start allow us to perform DSE of the 654 // last store, if there was a store following invariant.start. Consider: 655 // 656 // store 30, i8* p 657 // invariant.start(p) 658 // store 40, i8* p 659 // We can DSE the store to 30, since the store 40 to invariant location p 660 // causes undefined behaviour. 661 if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) 662 continue; 663 664 if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) { 665 if (auto *CondI = 666 dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) { 667 if (SimpleValue::canHandle(CondI)) { 668 // Do we already know the actual value of this condition? 669 if (auto *KnownCond = AvailableValues.lookup(CondI)) { 670 // Is the condition known to be true? 671 if (isa<ConstantInt>(KnownCond) && 672 cast<ConstantInt>(KnownCond)->isOneValue()) { 673 DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n'); 674 removeMSSA(Inst); 675 Inst->eraseFromParent(); 676 Changed = true; 677 continue; 678 } else 679 // Use the known value if it wasn't true. 680 cast<CallInst>(Inst)->setArgOperand(0, KnownCond); 681 } 682 // The condition we're on guarding here is true for all dominated 683 // locations. 684 AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext())); 685 } 686 } 687 688 // Guard intrinsics read all memory, but don't write any memory. 689 // Accordingly, don't update the generation but consume the last store (to 690 // avoid an incorrect DSE). 691 LastStore = nullptr; 692 continue; 693 } 694 695 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 696 // its simpler value. 697 if (Value *V = SimplifyInstruction(Inst, SQ)) { 698 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 699 bool Killed = false; 700 if (!Inst->use_empty()) { 701 Inst->replaceAllUsesWith(V); 702 Changed = true; 703 } 704 if (isInstructionTriviallyDead(Inst, &TLI)) { 705 removeMSSA(Inst); 706 Inst->eraseFromParent(); 707 Changed = true; 708 Killed = true; 709 } 710 if (Changed) 711 ++NumSimplify; 712 if (Killed) 713 continue; 714 } 715 716 // If this is a simple instruction that we can value number, process it. 717 if (SimpleValue::canHandle(Inst)) { 718 // See if the instruction has an available value. If so, use it. 719 if (Value *V = AvailableValues.lookup(Inst)) { 720 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 721 if (auto *I = dyn_cast<Instruction>(V)) 722 I->andIRFlags(Inst); 723 Inst->replaceAllUsesWith(V); 724 removeMSSA(Inst); 725 Inst->eraseFromParent(); 726 Changed = true; 727 ++NumCSE; 728 continue; 729 } 730 731 // Otherwise, just remember that this value is available. 732 AvailableValues.insert(Inst, Inst); 733 continue; 734 } 735 736 ParseMemoryInst MemInst(Inst, TTI); 737 // If this is a non-volatile load, process it. 738 if (MemInst.isValid() && MemInst.isLoad()) { 739 // (conservatively) we can't peak past the ordering implied by this 740 // operation, but we can add this load to our set of available values 741 if (MemInst.isVolatile() || !MemInst.isUnordered()) { 742 LastStore = nullptr; 743 ++CurrentGeneration; 744 } 745 746 // If we have an available version of this load, and if it is the right 747 // generation or the load is known to be from an invariant location, 748 // replace this instruction. 749 // 750 // If either the dominating load or the current load are invariant, then 751 // we can assume the current load loads the same value as the dominating 752 // load. 753 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 754 if (InVal.DefInst != nullptr && 755 InVal.MatchingId == MemInst.getMatchingId() && 756 // We don't yet handle removing loads with ordering of any kind. 757 !MemInst.isVolatile() && MemInst.isUnordered() && 758 // We can't replace an atomic load with one which isn't also atomic. 759 InVal.IsAtomic >= MemInst.isAtomic() && 760 (InVal.IsInvariant || MemInst.isInvariantLoad() || 761 isSameMemGeneration(InVal.Generation, CurrentGeneration, 762 InVal.DefInst, Inst))) { 763 Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType()); 764 if (Op != nullptr) { 765 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst 766 << " to: " << *InVal.DefInst << '\n'); 767 if (!Inst->use_empty()) 768 Inst->replaceAllUsesWith(Op); 769 removeMSSA(Inst); 770 Inst->eraseFromParent(); 771 Changed = true; 772 ++NumCSELoad; 773 continue; 774 } 775 } 776 777 // Otherwise, remember that we have this instruction. 778 AvailableLoads.insert( 779 MemInst.getPointerOperand(), 780 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 781 MemInst.isAtomic(), MemInst.isInvariantLoad())); 782 LastStore = nullptr; 783 continue; 784 } 785 786 // If this instruction may read from memory or throw (and potentially read 787 // from memory in the exception handler), forget LastStore. Load/store 788 // intrinsics will indicate both a read and a write to memory. The target 789 // may override this (e.g. so that a store intrinsic does not read from 790 // memory, and thus will be treated the same as a regular store for 791 // commoning purposes). 792 if ((Inst->mayReadFromMemory() || Inst->mayThrow()) && 793 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 794 LastStore = nullptr; 795 796 // If this is a read-only call, process it. 797 if (CallValue::canHandle(Inst)) { 798 // If we have an available version of this call, and if it is the right 799 // generation, replace this instruction. 800 std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst); 801 if (InVal.first != nullptr && 802 isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first, 803 Inst)) { 804 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst 805 << " to: " << *InVal.first << '\n'); 806 if (!Inst->use_empty()) 807 Inst->replaceAllUsesWith(InVal.first); 808 removeMSSA(Inst); 809 Inst->eraseFromParent(); 810 Changed = true; 811 ++NumCSECall; 812 continue; 813 } 814 815 // Otherwise, remember that we have this instruction. 816 AvailableCalls.insert( 817 Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration)); 818 continue; 819 } 820 821 // A release fence requires that all stores complete before it, but does 822 // not prevent the reordering of following loads 'before' the fence. As a 823 // result, we don't need to consider it as writing to memory and don't need 824 // to advance the generation. We do need to prevent DSE across the fence, 825 // but that's handled above. 826 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 827 if (FI->getOrdering() == AtomicOrdering::Release) { 828 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above"); 829 continue; 830 } 831 832 // write back DSE - If we write back the same value we just loaded from 833 // the same location and haven't passed any intervening writes or ordering 834 // operations, we can remove the write. The primary benefit is in allowing 835 // the available load table to remain valid and value forward past where 836 // the store originally was. 837 if (MemInst.isValid() && MemInst.isStore()) { 838 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 839 if (InVal.DefInst && 840 InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) && 841 InVal.MatchingId == MemInst.getMatchingId() && 842 // We don't yet handle removing stores with ordering of any kind. 843 !MemInst.isVolatile() && MemInst.isUnordered() && 844 isSameMemGeneration(InVal.Generation, CurrentGeneration, 845 InVal.DefInst, Inst)) { 846 // It is okay to have a LastStore to a different pointer here if MemorySSA 847 // tells us that the load and store are from the same memory generation. 848 // In that case, LastStore should keep its present value since we're 849 // removing the current store. 850 assert((!LastStore || 851 ParseMemoryInst(LastStore, TTI).getPointerOperand() == 852 MemInst.getPointerOperand() || 853 MSSA) && 854 "can't have an intervening store if not using MemorySSA!"); 855 DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n'); 856 removeMSSA(Inst); 857 Inst->eraseFromParent(); 858 Changed = true; 859 ++NumDSE; 860 // We can avoid incrementing the generation count since we were able 861 // to eliminate this store. 862 continue; 863 } 864 } 865 866 // Okay, this isn't something we can CSE at all. Check to see if it is 867 // something that could modify memory. If so, our available memory values 868 // cannot be used so bump the generation count. 869 if (Inst->mayWriteToMemory()) { 870 ++CurrentGeneration; 871 872 if (MemInst.isValid() && MemInst.isStore()) { 873 // We do a trivial form of DSE if there are two stores to the same 874 // location with no intervening loads. Delete the earlier store. 875 // At the moment, we don't remove ordered stores, but do remove 876 // unordered atomic stores. There's no special requirement (for 877 // unordered atomics) about removing atomic stores only in favor of 878 // other atomic stores since we we're going to execute the non-atomic 879 // one anyway and the atomic one might never have become visible. 880 if (LastStore) { 881 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 882 assert(LastStoreMemInst.isUnordered() && 883 !LastStoreMemInst.isVolatile() && 884 "Violated invariant"); 885 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 886 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 887 << " due to: " << *Inst << '\n'); 888 removeMSSA(LastStore); 889 LastStore->eraseFromParent(); 890 Changed = true; 891 ++NumDSE; 892 LastStore = nullptr; 893 } 894 // fallthrough - we can exploit information about this store 895 } 896 897 // Okay, we just invalidated anything we knew about loaded values. Try 898 // to salvage *something* by remembering that the stored value is a live 899 // version of the pointer. It is safe to forward from volatile stores 900 // to non-volatile loads, so we don't have to check for volatility of 901 // the store. 902 AvailableLoads.insert( 903 MemInst.getPointerOperand(), 904 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 905 MemInst.isAtomic(), /*IsInvariant=*/false)); 906 907 // Remember that this was the last unordered store we saw for DSE. We 908 // don't yet handle DSE on ordered or volatile stores since we don't 909 // have a good way to model the ordering requirement for following 910 // passes once the store is removed. We could insert a fence, but 911 // since fences are slightly stronger than stores in their ordering, 912 // it's not clear this is a profitable transform. Another option would 913 // be to merge the ordering with that of the post dominating store. 914 if (MemInst.isUnordered() && !MemInst.isVolatile()) 915 LastStore = Inst; 916 else 917 LastStore = nullptr; 918 } 919 } 920 } 921 922 return Changed; 923 } 924 925 bool EarlyCSE::run() { 926 // Note, deque is being used here because there is significant performance 927 // gains over vector when the container becomes very large due to the 928 // specific access patterns. For more information see the mailing list 929 // discussion on this: 930 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 931 std::deque<StackNode *> nodesToProcess; 932 933 bool Changed = false; 934 935 // Process the root node. 936 nodesToProcess.push_back(new StackNode( 937 AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration, 938 DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end())); 939 940 // Save the current generation. 941 unsigned LiveOutGeneration = CurrentGeneration; 942 943 // Process the stack. 944 while (!nodesToProcess.empty()) { 945 // Grab the first item off the stack. Set the current generation, remove 946 // the node from the stack, and process it. 947 StackNode *NodeToProcess = nodesToProcess.back(); 948 949 // Initialize class members. 950 CurrentGeneration = NodeToProcess->currentGeneration(); 951 952 // Check if the node needs to be processed. 953 if (!NodeToProcess->isProcessed()) { 954 // Process the node. 955 Changed |= processNode(NodeToProcess->node()); 956 NodeToProcess->childGeneration(CurrentGeneration); 957 NodeToProcess->process(); 958 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 959 // Push the next child onto the stack. 960 DomTreeNode *child = NodeToProcess->nextChild(); 961 nodesToProcess.push_back( 962 new StackNode(AvailableValues, AvailableLoads, AvailableCalls, 963 NodeToProcess->childGeneration(), child, child->begin(), 964 child->end())); 965 } else { 966 // It has been processed, and there are no more children to process, 967 // so delete it and pop it off the stack. 968 delete NodeToProcess; 969 nodesToProcess.pop_back(); 970 } 971 } // while (!nodes...) 972 973 // Reset the current generation. 974 CurrentGeneration = LiveOutGeneration; 975 976 return Changed; 977 } 978 979 PreservedAnalyses EarlyCSEPass::run(Function &F, 980 FunctionAnalysisManager &AM) { 981 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 982 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 983 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 984 auto &AC = AM.getResult<AssumptionAnalysis>(F); 985 auto *MSSA = 986 UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr; 987 988 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 989 990 if (!CSE.run()) 991 return PreservedAnalyses::all(); 992 993 PreservedAnalyses PA; 994 PA.preserveSet<CFGAnalyses>(); 995 PA.preserve<GlobalsAA>(); 996 if (UseMemorySSA) 997 PA.preserve<MemorySSAAnalysis>(); 998 return PA; 999 } 1000 1001 namespace { 1002 /// \brief A simple and fast domtree-based CSE pass. 1003 /// 1004 /// This pass does a simple depth-first walk over the dominator tree, 1005 /// eliminating trivially redundant instructions and using instsimplify to 1006 /// canonicalize things as it goes. It is intended to be fast and catch obvious 1007 /// cases so that instcombine and other passes are more effective. It is 1008 /// expected that a later pass of GVN will catch the interesting/hard cases. 1009 template<bool UseMemorySSA> 1010 class EarlyCSELegacyCommonPass : public FunctionPass { 1011 public: 1012 static char ID; 1013 1014 EarlyCSELegacyCommonPass() : FunctionPass(ID) { 1015 if (UseMemorySSA) 1016 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry()); 1017 else 1018 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 1019 } 1020 1021 bool runOnFunction(Function &F) override { 1022 if (skipFunction(F)) 1023 return false; 1024 1025 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 1026 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 1027 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 1028 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 1029 auto *MSSA = 1030 UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr; 1031 1032 EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA); 1033 1034 return CSE.run(); 1035 } 1036 1037 void getAnalysisUsage(AnalysisUsage &AU) const override { 1038 AU.addRequired<AssumptionCacheTracker>(); 1039 AU.addRequired<DominatorTreeWrapperPass>(); 1040 AU.addRequired<TargetLibraryInfoWrapperPass>(); 1041 AU.addRequired<TargetTransformInfoWrapperPass>(); 1042 if (UseMemorySSA) { 1043 AU.addRequired<MemorySSAWrapperPass>(); 1044 AU.addPreserved<MemorySSAWrapperPass>(); 1045 } 1046 AU.addPreserved<GlobalsAAWrapperPass>(); 1047 AU.setPreservesCFG(); 1048 } 1049 }; 1050 } 1051 1052 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>; 1053 1054 template<> 1055 char EarlyCSELegacyPass::ID = 0; 1056 1057 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 1058 false) 1059 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1060 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1061 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1062 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1063 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 1064 1065 using EarlyCSEMemSSALegacyPass = 1066 EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>; 1067 1068 template<> 1069 char EarlyCSEMemSSALegacyPass::ID = 0; 1070 1071 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) { 1072 if (UseMemorySSA) 1073 return new EarlyCSEMemSSALegacyPass(); 1074 else 1075 return new EarlyCSELegacyPass(); 1076 } 1077 1078 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1079 "Early CSE w/ MemorySSA", false, false) 1080 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 1081 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 1082 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 1083 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 1084 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass) 1085 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa", 1086 "Early CSE w/ MemorySSA", false, false) 1087