1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/EarlyCSE.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/ScopedHashTable.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/Pass.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/RecyclingAllocator.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Transforms/Scalar.h" 32 #include "llvm/Transforms/Utils/Local.h" 33 #include <deque> 34 using namespace llvm; 35 using namespace llvm::PatternMatch; 36 37 #define DEBUG_TYPE "early-cse" 38 39 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 40 STATISTIC(NumCSE, "Number of instructions CSE'd"); 41 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 42 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 43 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 44 45 static unsigned getHash(const void *V) { 46 return DenseMapInfo<const void*>::getHashValue(V); 47 } 48 49 //===----------------------------------------------------------------------===// 50 // SimpleValue 51 //===----------------------------------------------------------------------===// 52 53 namespace { 54 /// \brief Struct representing the available values in the scoped hash table. 55 struct SimpleValue { 56 Instruction *Inst; 57 58 SimpleValue(Instruction *I) : Inst(I) { 59 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 60 } 61 62 bool isSentinel() const { 63 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 64 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 65 } 66 67 static bool canHandle(Instruction *Inst) { 68 // This can only handle non-void readnone functions. 69 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 70 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 71 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 72 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 73 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 74 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 75 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 76 } 77 }; 78 } 79 80 namespace llvm { 81 template <> struct DenseMapInfo<SimpleValue> { 82 static inline SimpleValue getEmptyKey() { 83 return DenseMapInfo<Instruction *>::getEmptyKey(); 84 } 85 static inline SimpleValue getTombstoneKey() { 86 return DenseMapInfo<Instruction *>::getTombstoneKey(); 87 } 88 static unsigned getHashValue(SimpleValue Val); 89 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 90 }; 91 } 92 93 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 94 Instruction *Inst = Val.Inst; 95 // Hash in all of the operands as pointers. 96 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 97 Value *LHS = BinOp->getOperand(0); 98 Value *RHS = BinOp->getOperand(1); 99 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 100 std::swap(LHS, RHS); 101 102 if (isa<OverflowingBinaryOperator>(BinOp)) { 103 // Hash the overflow behavior 104 unsigned Overflow = 105 BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap | 106 BinOp->hasNoUnsignedWrap() * 107 OverflowingBinaryOperator::NoUnsignedWrap; 108 return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS); 109 } 110 111 return hash_combine(BinOp->getOpcode(), LHS, RHS); 112 } 113 114 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 115 Value *LHS = CI->getOperand(0); 116 Value *RHS = CI->getOperand(1); 117 CmpInst::Predicate Pred = CI->getPredicate(); 118 if (Inst->getOperand(0) > Inst->getOperand(1)) { 119 std::swap(LHS, RHS); 120 Pred = CI->getSwappedPredicate(); 121 } 122 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 123 } 124 125 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 126 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 127 128 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 129 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 130 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 131 132 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 133 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 134 IVI->getOperand(1), 135 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 136 137 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 138 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 139 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 140 isa<ShuffleVectorInst>(Inst)) && 141 "Invalid/unknown instruction"); 142 143 // Mix in the opcode. 144 return hash_combine( 145 Inst->getOpcode(), 146 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 147 } 148 149 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 150 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 151 152 if (LHS.isSentinel() || RHS.isSentinel()) 153 return LHSI == RHSI; 154 155 if (LHSI->getOpcode() != RHSI->getOpcode()) 156 return false; 157 if (LHSI->isIdenticalTo(RHSI)) 158 return true; 159 160 // If we're not strictly identical, we still might be a commutable instruction 161 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 162 if (!LHSBinOp->isCommutative()) 163 return false; 164 165 assert(isa<BinaryOperator>(RHSI) && 166 "same opcode, but different instruction type?"); 167 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 168 169 // Check overflow attributes 170 if (isa<OverflowingBinaryOperator>(LHSBinOp)) { 171 assert(isa<OverflowingBinaryOperator>(RHSBinOp) && 172 "same opcode, but different operator type?"); 173 if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() || 174 LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap()) 175 return false; 176 } 177 178 // Commuted equality 179 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 180 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 181 } 182 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 183 assert(isa<CmpInst>(RHSI) && 184 "same opcode, but different instruction type?"); 185 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 186 // Commuted equality 187 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 188 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 189 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 190 } 191 192 return false; 193 } 194 195 //===----------------------------------------------------------------------===// 196 // CallValue 197 //===----------------------------------------------------------------------===// 198 199 namespace { 200 /// \brief Struct representing the available call values in the scoped hash 201 /// table. 202 struct CallValue { 203 Instruction *Inst; 204 205 CallValue(Instruction *I) : Inst(I) { 206 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 207 } 208 209 bool isSentinel() const { 210 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 211 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 212 } 213 214 static bool canHandle(Instruction *Inst) { 215 // Don't value number anything that returns void. 216 if (Inst->getType()->isVoidTy()) 217 return false; 218 219 CallInst *CI = dyn_cast<CallInst>(Inst); 220 if (!CI || !CI->onlyReadsMemory()) 221 return false; 222 return true; 223 } 224 }; 225 } 226 227 namespace llvm { 228 template <> struct DenseMapInfo<CallValue> { 229 static inline CallValue getEmptyKey() { 230 return DenseMapInfo<Instruction *>::getEmptyKey(); 231 } 232 static inline CallValue getTombstoneKey() { 233 return DenseMapInfo<Instruction *>::getTombstoneKey(); 234 } 235 static unsigned getHashValue(CallValue Val); 236 static bool isEqual(CallValue LHS, CallValue RHS); 237 }; 238 } 239 240 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 241 Instruction *Inst = Val.Inst; 242 // Hash in all of the operands as pointers. 243 unsigned Res = 0; 244 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) { 245 assert(!Inst->getOperand(i)->getType()->isMetadataTy() && 246 "Cannot value number calls with metadata operands"); 247 Res ^= getHash(Inst->getOperand(i)) << (i & 0xF); 248 } 249 250 // Mix in the opcode. 251 return (Res << 1) ^ Inst->getOpcode(); 252 } 253 254 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 255 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 256 if (LHS.isSentinel() || RHS.isSentinel()) 257 return LHSI == RHSI; 258 return LHSI->isIdenticalTo(RHSI); 259 } 260 261 //===----------------------------------------------------------------------===// 262 // EarlyCSE implementation 263 //===----------------------------------------------------------------------===// 264 265 namespace { 266 /// \brief A simple and fast domtree-based CSE pass. 267 /// 268 /// This pass does a simple depth-first walk over the dominator tree, 269 /// eliminating trivially redundant instructions and using instsimplify to 270 /// canonicalize things as it goes. It is intended to be fast and catch obvious 271 /// cases so that instcombine and other passes are more effective. It is 272 /// expected that a later pass of GVN will catch the interesting/hard cases. 273 class EarlyCSE { 274 public: 275 Function &F; 276 const DataLayout *DL; 277 const TargetLibraryInfo &TLI; 278 const TargetTransformInfo &TTI; 279 DominatorTree &DT; 280 AssumptionCache &AC; 281 typedef RecyclingAllocator< 282 BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy; 283 typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 284 AllocatorTy> ScopedHTType; 285 286 /// \brief A scoped hash table of the current values of all of our simple 287 /// scalar expressions. 288 /// 289 /// As we walk down the domtree, we look to see if instructions are in this: 290 /// if so, we replace them with what we find, otherwise we insert them so 291 /// that dominated values can succeed in their lookup. 292 ScopedHTType AvailableValues; 293 294 /// \brief A scoped hash table of the current values of loads. 295 /// 296 /// This allows us to get efficient access to dominating loads when we have 297 /// a fully redundant load. In addition to the most recent load, we keep 298 /// track of a generation count of the read, which is compared against the 299 /// current generation count. The current generation count is incremented 300 /// after every possibly writing memory operation, which ensures that we only 301 /// CSE loads with other loads that have no intervening store. 302 typedef RecyclingAllocator< 303 BumpPtrAllocator, 304 ScopedHashTableVal<Value *, std::pair<Value *, unsigned>>> 305 LoadMapAllocator; 306 typedef ScopedHashTable<Value *, std::pair<Value *, unsigned>, 307 DenseMapInfo<Value *>, LoadMapAllocator> LoadHTType; 308 LoadHTType AvailableLoads; 309 310 /// \brief A scoped hash table of the current values of read-only call 311 /// values. 312 /// 313 /// It uses the same generation count as loads. 314 typedef ScopedHashTable<CallValue, std::pair<Value *, unsigned>> CallHTType; 315 CallHTType AvailableCalls; 316 317 /// \brief This is the current generation of the memory value. 318 unsigned CurrentGeneration; 319 320 /// \brief Set up the EarlyCSE runner for a particular function. 321 EarlyCSE(Function &F, const DataLayout *DL, const TargetLibraryInfo &TLI, 322 const TargetTransformInfo &TTI, DominatorTree &DT, 323 AssumptionCache &AC) 324 : F(F), DL(DL), TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) { 325 } 326 327 bool run(); 328 329 private: 330 // Almost a POD, but needs to call the constructors for the scoped hash 331 // tables so that a new scope gets pushed on. These are RAII so that the 332 // scope gets popped when the NodeScope is destroyed. 333 class NodeScope { 334 public: 335 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 336 CallHTType &AvailableCalls) 337 : Scope(AvailableValues), LoadScope(AvailableLoads), 338 CallScope(AvailableCalls) {} 339 340 private: 341 NodeScope(const NodeScope &) LLVM_DELETED_FUNCTION; 342 void operator=(const NodeScope &) LLVM_DELETED_FUNCTION; 343 344 ScopedHTType::ScopeTy Scope; 345 LoadHTType::ScopeTy LoadScope; 346 CallHTType::ScopeTy CallScope; 347 }; 348 349 // Contains all the needed information to create a stack for doing a depth 350 // first tranversal of the tree. This includes scopes for values, loads, and 351 // calls as well as the generation. There is a child iterator so that the 352 // children do not need to be store spearately. 353 class StackNode { 354 public: 355 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 356 CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n, 357 DomTreeNode::iterator child, DomTreeNode::iterator end) 358 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 359 EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls), 360 Processed(false) {} 361 362 // Accessors. 363 unsigned currentGeneration() { return CurrentGeneration; } 364 unsigned childGeneration() { return ChildGeneration; } 365 void childGeneration(unsigned generation) { ChildGeneration = generation; } 366 DomTreeNode *node() { return Node; } 367 DomTreeNode::iterator childIter() { return ChildIter; } 368 DomTreeNode *nextChild() { 369 DomTreeNode *child = *ChildIter; 370 ++ChildIter; 371 return child; 372 } 373 DomTreeNode::iterator end() { return EndIter; } 374 bool isProcessed() { return Processed; } 375 void process() { Processed = true; } 376 377 private: 378 StackNode(const StackNode &) LLVM_DELETED_FUNCTION; 379 void operator=(const StackNode &) LLVM_DELETED_FUNCTION; 380 381 // Members. 382 unsigned CurrentGeneration; 383 unsigned ChildGeneration; 384 DomTreeNode *Node; 385 DomTreeNode::iterator ChildIter; 386 DomTreeNode::iterator EndIter; 387 NodeScope Scopes; 388 bool Processed; 389 }; 390 391 /// \brief Wrapper class to handle memory instructions, including loads, 392 /// stores and intrinsic loads and stores defined by the target. 393 class ParseMemoryInst { 394 public: 395 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 396 : Load(false), Store(false), Vol(false), MayReadFromMemory(false), 397 MayWriteToMemory(false), MatchingId(-1), Ptr(nullptr) { 398 MayReadFromMemory = Inst->mayReadFromMemory(); 399 MayWriteToMemory = Inst->mayWriteToMemory(); 400 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 401 MemIntrinsicInfo Info; 402 if (!TTI.getTgtMemIntrinsic(II, Info)) 403 return; 404 if (Info.NumMemRefs == 1) { 405 Store = Info.WriteMem; 406 Load = Info.ReadMem; 407 MatchingId = Info.MatchingId; 408 MayReadFromMemory = Info.ReadMem; 409 MayWriteToMemory = Info.WriteMem; 410 Vol = Info.Vol; 411 Ptr = Info.PtrVal; 412 } 413 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 414 Load = true; 415 Vol = !LI->isSimple(); 416 Ptr = LI->getPointerOperand(); 417 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 418 Store = true; 419 Vol = !SI->isSimple(); 420 Ptr = SI->getPointerOperand(); 421 } 422 } 423 bool isLoad() { return Load; } 424 bool isStore() { return Store; } 425 bool isVolatile() { return Vol; } 426 bool isMatchingMemLoc(const ParseMemoryInst &Inst) { 427 return Ptr == Inst.Ptr && MatchingId == Inst.MatchingId; 428 } 429 bool isValid() { return Ptr != nullptr; } 430 int getMatchingId() { return MatchingId; } 431 Value *getPtr() { return Ptr; } 432 bool mayReadFromMemory() { return MayReadFromMemory; } 433 bool mayWriteToMemory() { return MayWriteToMemory; } 434 435 private: 436 bool Load; 437 bool Store; 438 bool Vol; 439 bool MayReadFromMemory; 440 bool MayWriteToMemory; 441 // For regular (non-intrinsic) loads/stores, this is set to -1. For 442 // intrinsic loads/stores, the id is retrieved from the corresponding 443 // field in the MemIntrinsicInfo structure. That field contains 444 // non-negative values only. 445 int MatchingId; 446 Value *Ptr; 447 }; 448 449 bool processNode(DomTreeNode *Node); 450 451 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 452 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 453 return LI; 454 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 455 return SI->getValueOperand(); 456 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 457 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 458 ExpectedType); 459 } 460 }; 461 } 462 463 bool EarlyCSE::processNode(DomTreeNode *Node) { 464 BasicBlock *BB = Node->getBlock(); 465 466 // If this block has a single predecessor, then the predecessor is the parent 467 // of the domtree node and all of the live out memory values are still current 468 // in this block. If this block has multiple predecessors, then they could 469 // have invalidated the live-out memory values of our parent value. For now, 470 // just be conservative and invalidate memory if this block has multiple 471 // predecessors. 472 if (!BB->getSinglePredecessor()) 473 ++CurrentGeneration; 474 475 /// LastStore - Keep track of the last non-volatile store that we saw... for 476 /// as long as there in no instruction that reads memory. If we see a store 477 /// to the same location, we delete the dead store. This zaps trivial dead 478 /// stores which can occur in bitfield code among other things. 479 Instruction *LastStore = nullptr; 480 481 bool Changed = false; 482 483 // See if any instructions in the block can be eliminated. If so, do it. If 484 // not, add them to AvailableValues. 485 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 486 Instruction *Inst = I++; 487 488 // Dead instructions should just be removed. 489 if (isInstructionTriviallyDead(Inst, &TLI)) { 490 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 491 Inst->eraseFromParent(); 492 Changed = true; 493 ++NumSimplify; 494 continue; 495 } 496 497 // Skip assume intrinsics, they don't really have side effects (although 498 // they're marked as such to ensure preservation of control dependencies), 499 // and this pass will not disturb any of the assumption's control 500 // dependencies. 501 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { 502 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); 503 continue; 504 } 505 506 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 507 // its simpler value. 508 if (Value *V = SimplifyInstruction(Inst, DL, &TLI, &DT, &AC)) { 509 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 510 Inst->replaceAllUsesWith(V); 511 Inst->eraseFromParent(); 512 Changed = true; 513 ++NumSimplify; 514 continue; 515 } 516 517 // If this is a simple instruction that we can value number, process it. 518 if (SimpleValue::canHandle(Inst)) { 519 // See if the instruction has an available value. If so, use it. 520 if (Value *V = AvailableValues.lookup(Inst)) { 521 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 522 Inst->replaceAllUsesWith(V); 523 Inst->eraseFromParent(); 524 Changed = true; 525 ++NumCSE; 526 continue; 527 } 528 529 // Otherwise, just remember that this value is available. 530 AvailableValues.insert(Inst, Inst); 531 continue; 532 } 533 534 ParseMemoryInst MemInst(Inst, TTI); 535 // If this is a non-volatile load, process it. 536 if (MemInst.isValid() && MemInst.isLoad()) { 537 // Ignore volatile loads. 538 if (MemInst.isVolatile()) { 539 LastStore = nullptr; 540 continue; 541 } 542 543 // If we have an available version of this load, and if it is the right 544 // generation, replace this instruction. 545 std::pair<Value *, unsigned> InVal = 546 AvailableLoads.lookup(MemInst.getPtr()); 547 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 548 Value *Op = getOrCreateResult(InVal.first, Inst->getType()); 549 if (Op != nullptr) { 550 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst 551 << " to: " << *InVal.first << '\n'); 552 if (!Inst->use_empty()) 553 Inst->replaceAllUsesWith(Op); 554 Inst->eraseFromParent(); 555 Changed = true; 556 ++NumCSELoad; 557 continue; 558 } 559 } 560 561 // Otherwise, remember that we have this instruction. 562 AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>( 563 Inst, CurrentGeneration)); 564 LastStore = nullptr; 565 continue; 566 } 567 568 // If this instruction may read from memory, forget LastStore. 569 // Load/store intrinsics will indicate both a read and a write to 570 // memory. The target may override this (e.g. so that a store intrinsic 571 // does not read from memory, and thus will be treated the same as a 572 // regular store for commoning purposes). 573 if (Inst->mayReadFromMemory() && 574 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 575 LastStore = nullptr; 576 577 // If this is a read-only call, process it. 578 if (CallValue::canHandle(Inst)) { 579 // If we have an available version of this call, and if it is the right 580 // generation, replace this instruction. 581 std::pair<Value *, unsigned> InVal = AvailableCalls.lookup(Inst); 582 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 583 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst 584 << " to: " << *InVal.first << '\n'); 585 if (!Inst->use_empty()) 586 Inst->replaceAllUsesWith(InVal.first); 587 Inst->eraseFromParent(); 588 Changed = true; 589 ++NumCSECall; 590 continue; 591 } 592 593 // Otherwise, remember that we have this instruction. 594 AvailableCalls.insert( 595 Inst, std::pair<Value *, unsigned>(Inst, CurrentGeneration)); 596 continue; 597 } 598 599 // Okay, this isn't something we can CSE at all. Check to see if it is 600 // something that could modify memory. If so, our available memory values 601 // cannot be used so bump the generation count. 602 if (Inst->mayWriteToMemory()) { 603 ++CurrentGeneration; 604 605 if (MemInst.isValid() && MemInst.isStore()) { 606 // We do a trivial form of DSE if there are two stores to the same 607 // location with no intervening loads. Delete the earlier store. 608 if (LastStore) { 609 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 610 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 611 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 612 << " due to: " << *Inst << '\n'); 613 LastStore->eraseFromParent(); 614 Changed = true; 615 ++NumDSE; 616 LastStore = nullptr; 617 } 618 // fallthrough - we can exploit information about this store 619 } 620 621 // Okay, we just invalidated anything we knew about loaded values. Try 622 // to salvage *something* by remembering that the stored value is a live 623 // version of the pointer. It is safe to forward from volatile stores 624 // to non-volatile loads, so we don't have to check for volatility of 625 // the store. 626 AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>( 627 Inst, CurrentGeneration)); 628 629 // Remember that this was the last store we saw for DSE. 630 if (!MemInst.isVolatile()) 631 LastStore = Inst; 632 } 633 } 634 } 635 636 return Changed; 637 } 638 639 bool EarlyCSE::run() { 640 // Note, deque is being used here because there is significant performance 641 // gains over vector when the container becomes very large due to the 642 // specific access patterns. For more information see the mailing list 643 // discussion on this: 644 // http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 645 std::deque<StackNode *> nodesToProcess; 646 647 bool Changed = false; 648 649 // Process the root node. 650 nodesToProcess.push_back(new StackNode( 651 AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration, 652 DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end())); 653 654 // Save the current generation. 655 unsigned LiveOutGeneration = CurrentGeneration; 656 657 // Process the stack. 658 while (!nodesToProcess.empty()) { 659 // Grab the first item off the stack. Set the current generation, remove 660 // the node from the stack, and process it. 661 StackNode *NodeToProcess = nodesToProcess.back(); 662 663 // Initialize class members. 664 CurrentGeneration = NodeToProcess->currentGeneration(); 665 666 // Check if the node needs to be processed. 667 if (!NodeToProcess->isProcessed()) { 668 // Process the node. 669 Changed |= processNode(NodeToProcess->node()); 670 NodeToProcess->childGeneration(CurrentGeneration); 671 NodeToProcess->process(); 672 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 673 // Push the next child onto the stack. 674 DomTreeNode *child = NodeToProcess->nextChild(); 675 nodesToProcess.push_back( 676 new StackNode(AvailableValues, AvailableLoads, AvailableCalls, 677 NodeToProcess->childGeneration(), child, child->begin(), 678 child->end())); 679 } else { 680 // It has been processed, and there are no more children to process, 681 // so delete it and pop it off the stack. 682 delete NodeToProcess; 683 nodesToProcess.pop_back(); 684 } 685 } // while (!nodes...) 686 687 // Reset the current generation. 688 CurrentGeneration = LiveOutGeneration; 689 690 return Changed; 691 } 692 693 PreservedAnalyses EarlyCSEPass::run(Function &F, 694 AnalysisManager<Function> *AM) { 695 const DataLayout *DL = F.getParent()->getDataLayout(); 696 697 auto &TLI = AM->getResult<TargetLibraryAnalysis>(F); 698 auto &TTI = AM->getResult<TargetIRAnalysis>(F); 699 auto &DT = AM->getResult<DominatorTreeAnalysis>(F); 700 auto &AC = AM->getResult<AssumptionAnalysis>(F); 701 702 EarlyCSE CSE(F, DL, TLI, TTI, DT, AC); 703 704 if (!CSE.run()) 705 return PreservedAnalyses::all(); 706 707 // CSE preserves the dominator tree because it doesn't mutate the CFG. 708 // FIXME: Bundle this with other CFG-preservation. 709 PreservedAnalyses PA; 710 PA.preserve<DominatorTreeAnalysis>(); 711 return PA; 712 } 713 714 namespace { 715 /// \brief A simple and fast domtree-based CSE pass. 716 /// 717 /// This pass does a simple depth-first walk over the dominator tree, 718 /// eliminating trivially redundant instructions and using instsimplify to 719 /// canonicalize things as it goes. It is intended to be fast and catch obvious 720 /// cases so that instcombine and other passes are more effective. It is 721 /// expected that a later pass of GVN will catch the interesting/hard cases. 722 class EarlyCSELegacyPass : public FunctionPass { 723 public: 724 static char ID; 725 726 EarlyCSELegacyPass() : FunctionPass(ID) { 727 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 728 } 729 730 bool runOnFunction(Function &F) override { 731 if (skipOptnoneFunction(F)) 732 return false; 733 734 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 735 auto *DL = DLP ? &DLP->getDataLayout() : nullptr; 736 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 737 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(); 738 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 739 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 740 741 EarlyCSE CSE(F, DL, TLI, TTI, DT, AC); 742 743 return CSE.run(); 744 } 745 746 void getAnalysisUsage(AnalysisUsage &AU) const override { 747 AU.addRequired<AssumptionCacheTracker>(); 748 AU.addRequired<DominatorTreeWrapperPass>(); 749 AU.addRequired<TargetLibraryInfoWrapperPass>(); 750 AU.addRequired<TargetTransformInfoWrapperPass>(); 751 AU.setPreservesCFG(); 752 } 753 }; 754 } 755 756 char EarlyCSELegacyPass::ID = 0; 757 758 FunctionPass *llvm::createEarlyCSEPass() { return new EarlyCSELegacyPass(); } 759 760 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 761 false) 762 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 763 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 764 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 765 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 766 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 767