1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/ScopedHashTable.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/AssumptionCache.h" 20 #include "llvm/Analysis/InstructionSimplify.h" 21 #include "llvm/Analysis/TargetTransformInfo.h" 22 #include "llvm/IR/DataLayout.h" 23 #include "llvm/IR/Dominators.h" 24 #include "llvm/IR/Instructions.h" 25 #include "llvm/IR/IntrinsicInst.h" 26 #include "llvm/IR/PatternMatch.h" 27 #include "llvm/Pass.h" 28 #include "llvm/Support/Debug.h" 29 #include "llvm/Support/RecyclingAllocator.h" 30 #include "llvm/Analysis/TargetLibraryInfo.h" 31 #include "llvm/Transforms/Utils/Local.h" 32 #include <deque> 33 using namespace llvm; 34 using namespace llvm::PatternMatch; 35 36 #define DEBUG_TYPE "early-cse" 37 38 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 39 STATISTIC(NumCSE, "Number of instructions CSE'd"); 40 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 41 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 42 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 43 44 static unsigned getHash(const void *V) { 45 return DenseMapInfo<const void*>::getHashValue(V); 46 } 47 48 //===----------------------------------------------------------------------===// 49 // SimpleValue 50 //===----------------------------------------------------------------------===// 51 52 namespace { 53 /// \brief Struct representing the available values in the scoped hash table. 54 struct SimpleValue { 55 Instruction *Inst; 56 57 SimpleValue(Instruction *I) : Inst(I) { 58 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 59 } 60 61 bool isSentinel() const { 62 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 63 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 64 } 65 66 static bool canHandle(Instruction *Inst) { 67 // This can only handle non-void readnone functions. 68 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 69 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 70 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 71 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 72 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 73 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 74 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 75 } 76 }; 77 } 78 79 namespace llvm { 80 template <> struct DenseMapInfo<SimpleValue> { 81 static inline SimpleValue getEmptyKey() { 82 return DenseMapInfo<Instruction *>::getEmptyKey(); 83 } 84 static inline SimpleValue getTombstoneKey() { 85 return DenseMapInfo<Instruction *>::getTombstoneKey(); 86 } 87 static unsigned getHashValue(SimpleValue Val); 88 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 89 }; 90 } 91 92 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 93 Instruction *Inst = Val.Inst; 94 // Hash in all of the operands as pointers. 95 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 96 Value *LHS = BinOp->getOperand(0); 97 Value *RHS = BinOp->getOperand(1); 98 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 99 std::swap(LHS, RHS); 100 101 if (isa<OverflowingBinaryOperator>(BinOp)) { 102 // Hash the overflow behavior 103 unsigned Overflow = 104 BinOp->hasNoSignedWrap() * OverflowingBinaryOperator::NoSignedWrap | 105 BinOp->hasNoUnsignedWrap() * 106 OverflowingBinaryOperator::NoUnsignedWrap; 107 return hash_combine(BinOp->getOpcode(), Overflow, LHS, RHS); 108 } 109 110 return hash_combine(BinOp->getOpcode(), LHS, RHS); 111 } 112 113 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 114 Value *LHS = CI->getOperand(0); 115 Value *RHS = CI->getOperand(1); 116 CmpInst::Predicate Pred = CI->getPredicate(); 117 if (Inst->getOperand(0) > Inst->getOperand(1)) { 118 std::swap(LHS, RHS); 119 Pred = CI->getSwappedPredicate(); 120 } 121 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 122 } 123 124 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 125 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 126 127 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 128 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 129 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 130 131 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 132 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 133 IVI->getOperand(1), 134 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 135 136 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 137 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 138 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 139 isa<ShuffleVectorInst>(Inst)) && 140 "Invalid/unknown instruction"); 141 142 // Mix in the opcode. 143 return hash_combine( 144 Inst->getOpcode(), 145 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 146 } 147 148 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 149 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 150 151 if (LHS.isSentinel() || RHS.isSentinel()) 152 return LHSI == RHSI; 153 154 if (LHSI->getOpcode() != RHSI->getOpcode()) 155 return false; 156 if (LHSI->isIdenticalTo(RHSI)) 157 return true; 158 159 // If we're not strictly identical, we still might be a commutable instruction 160 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 161 if (!LHSBinOp->isCommutative()) 162 return false; 163 164 assert(isa<BinaryOperator>(RHSI) && 165 "same opcode, but different instruction type?"); 166 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 167 168 // Check overflow attributes 169 if (isa<OverflowingBinaryOperator>(LHSBinOp)) { 170 assert(isa<OverflowingBinaryOperator>(RHSBinOp) && 171 "same opcode, but different operator type?"); 172 if (LHSBinOp->hasNoUnsignedWrap() != RHSBinOp->hasNoUnsignedWrap() || 173 LHSBinOp->hasNoSignedWrap() != RHSBinOp->hasNoSignedWrap()) 174 return false; 175 } 176 177 // Commuted equality 178 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 179 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 180 } 181 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 182 assert(isa<CmpInst>(RHSI) && 183 "same opcode, but different instruction type?"); 184 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 185 // Commuted equality 186 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 187 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 188 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 189 } 190 191 return false; 192 } 193 194 //===----------------------------------------------------------------------===// 195 // CallValue 196 //===----------------------------------------------------------------------===// 197 198 namespace { 199 /// \brief Struct representing the available call values in the scoped hash 200 /// table. 201 struct CallValue { 202 Instruction *Inst; 203 204 CallValue(Instruction *I) : Inst(I) { 205 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 206 } 207 208 bool isSentinel() const { 209 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 210 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 211 } 212 213 static bool canHandle(Instruction *Inst) { 214 // Don't value number anything that returns void. 215 if (Inst->getType()->isVoidTy()) 216 return false; 217 218 CallInst *CI = dyn_cast<CallInst>(Inst); 219 if (!CI || !CI->onlyReadsMemory()) 220 return false; 221 return true; 222 } 223 }; 224 } 225 226 namespace llvm { 227 template <> struct DenseMapInfo<CallValue> { 228 static inline CallValue getEmptyKey() { 229 return DenseMapInfo<Instruction *>::getEmptyKey(); 230 } 231 static inline CallValue getTombstoneKey() { 232 return DenseMapInfo<Instruction *>::getTombstoneKey(); 233 } 234 static unsigned getHashValue(CallValue Val); 235 static bool isEqual(CallValue LHS, CallValue RHS); 236 }; 237 } 238 239 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 240 Instruction *Inst = Val.Inst; 241 // Hash in all of the operands as pointers. 242 unsigned Res = 0; 243 for (unsigned i = 0, e = Inst->getNumOperands(); i != e; ++i) { 244 assert(!Inst->getOperand(i)->getType()->isMetadataTy() && 245 "Cannot value number calls with metadata operands"); 246 Res ^= getHash(Inst->getOperand(i)) << (i & 0xF); 247 } 248 249 // Mix in the opcode. 250 return (Res << 1) ^ Inst->getOpcode(); 251 } 252 253 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 254 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 255 if (LHS.isSentinel() || RHS.isSentinel()) 256 return LHSI == RHSI; 257 return LHSI->isIdenticalTo(RHSI); 258 } 259 260 //===----------------------------------------------------------------------===// 261 // EarlyCSE implementation 262 //===----------------------------------------------------------------------===// 263 264 namespace { 265 /// \brief A simple and fast domtree-based CSE pass. 266 /// 267 /// This pass does a simple depth-first walk over the dominator tree, 268 /// eliminating trivially redundant instructions and using instsimplify to 269 /// canonicalize things as it goes. It is intended to be fast and catch obvious 270 /// cases so that instcombine and other passes are more effective. It is 271 /// expected that a later pass of GVN will catch the interesting/hard cases. 272 class EarlyCSE { 273 public: 274 Function &F; 275 const DataLayout *DL; 276 const TargetLibraryInfo &TLI; 277 const TargetTransformInfo &TTI; 278 DominatorTree &DT; 279 AssumptionCache &AC; 280 typedef RecyclingAllocator< 281 BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy; 282 typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 283 AllocatorTy> ScopedHTType; 284 285 /// \brief A scoped hash table of the current values of all of our simple 286 /// scalar expressions. 287 /// 288 /// As we walk down the domtree, we look to see if instructions are in this: 289 /// if so, we replace them with what we find, otherwise we insert them so 290 /// that dominated values can succeed in their lookup. 291 ScopedHTType AvailableValues; 292 293 /// \brief A scoped hash table of the current values of loads. 294 /// 295 /// This allows us to get efficient access to dominating loads when we have 296 /// a fully redundant load. In addition to the most recent load, we keep 297 /// track of a generation count of the read, which is compared against the 298 /// current generation count. The current generation count is incremented 299 /// after every possibly writing memory operation, which ensures that we only 300 /// CSE loads with other loads that have no intervening store. 301 typedef RecyclingAllocator< 302 BumpPtrAllocator, 303 ScopedHashTableVal<Value *, std::pair<Value *, unsigned>>> 304 LoadMapAllocator; 305 typedef ScopedHashTable<Value *, std::pair<Value *, unsigned>, 306 DenseMapInfo<Value *>, LoadMapAllocator> LoadHTType; 307 LoadHTType AvailableLoads; 308 309 /// \brief A scoped hash table of the current values of read-only call 310 /// values. 311 /// 312 /// It uses the same generation count as loads. 313 typedef ScopedHashTable<CallValue, std::pair<Value *, unsigned>> CallHTType; 314 CallHTType AvailableCalls; 315 316 /// \brief This is the current generation of the memory value. 317 unsigned CurrentGeneration; 318 319 /// \brief Set up the EarlyCSE runner for a particular function. 320 EarlyCSE(Function &F, const DataLayout *DL, const TargetLibraryInfo &TLI, 321 const TargetTransformInfo &TTI, DominatorTree &DT, 322 AssumptionCache &AC) 323 : F(F), DL(DL), TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) { 324 } 325 326 bool run(); 327 328 private: 329 // Almost a POD, but needs to call the constructors for the scoped hash 330 // tables so that a new scope gets pushed on. These are RAII so that the 331 // scope gets popped when the NodeScope is destroyed. 332 class NodeScope { 333 public: 334 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 335 CallHTType &AvailableCalls) 336 : Scope(AvailableValues), LoadScope(AvailableLoads), 337 CallScope(AvailableCalls) {} 338 339 private: 340 NodeScope(const NodeScope &) LLVM_DELETED_FUNCTION; 341 void operator=(const NodeScope &) LLVM_DELETED_FUNCTION; 342 343 ScopedHTType::ScopeTy Scope; 344 LoadHTType::ScopeTy LoadScope; 345 CallHTType::ScopeTy CallScope; 346 }; 347 348 // Contains all the needed information to create a stack for doing a depth 349 // first tranversal of the tree. This includes scopes for values, loads, and 350 // calls as well as the generation. There is a child iterator so that the 351 // children do not need to be store spearately. 352 class StackNode { 353 public: 354 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 355 CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n, 356 DomTreeNode::iterator child, DomTreeNode::iterator end) 357 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 358 EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls), 359 Processed(false) {} 360 361 // Accessors. 362 unsigned currentGeneration() { return CurrentGeneration; } 363 unsigned childGeneration() { return ChildGeneration; } 364 void childGeneration(unsigned generation) { ChildGeneration = generation; } 365 DomTreeNode *node() { return Node; } 366 DomTreeNode::iterator childIter() { return ChildIter; } 367 DomTreeNode *nextChild() { 368 DomTreeNode *child = *ChildIter; 369 ++ChildIter; 370 return child; 371 } 372 DomTreeNode::iterator end() { return EndIter; } 373 bool isProcessed() { return Processed; } 374 void process() { Processed = true; } 375 376 private: 377 StackNode(const StackNode &) LLVM_DELETED_FUNCTION; 378 void operator=(const StackNode &) LLVM_DELETED_FUNCTION; 379 380 // Members. 381 unsigned CurrentGeneration; 382 unsigned ChildGeneration; 383 DomTreeNode *Node; 384 DomTreeNode::iterator ChildIter; 385 DomTreeNode::iterator EndIter; 386 NodeScope Scopes; 387 bool Processed; 388 }; 389 390 /// \brief Wrapper class to handle memory instructions, including loads, 391 /// stores and intrinsic loads and stores defined by the target. 392 class ParseMemoryInst { 393 public: 394 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 395 : Load(false), Store(false), Vol(false), MayReadFromMemory(false), 396 MayWriteToMemory(false), MatchingId(-1), Ptr(nullptr) { 397 MayReadFromMemory = Inst->mayReadFromMemory(); 398 MayWriteToMemory = Inst->mayWriteToMemory(); 399 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 400 MemIntrinsicInfo Info; 401 if (!TTI.getTgtMemIntrinsic(II, Info)) 402 return; 403 if (Info.NumMemRefs == 1) { 404 Store = Info.WriteMem; 405 Load = Info.ReadMem; 406 MatchingId = Info.MatchingId; 407 MayReadFromMemory = Info.ReadMem; 408 MayWriteToMemory = Info.WriteMem; 409 Vol = Info.Vol; 410 Ptr = Info.PtrVal; 411 } 412 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 413 Load = true; 414 Vol = !LI->isSimple(); 415 Ptr = LI->getPointerOperand(); 416 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 417 Store = true; 418 Vol = !SI->isSimple(); 419 Ptr = SI->getPointerOperand(); 420 } 421 } 422 bool isLoad() { return Load; } 423 bool isStore() { return Store; } 424 bool isVolatile() { return Vol; } 425 bool isMatchingMemLoc(const ParseMemoryInst &Inst) { 426 return Ptr == Inst.Ptr && MatchingId == Inst.MatchingId; 427 } 428 bool isValid() { return Ptr != nullptr; } 429 int getMatchingId() { return MatchingId; } 430 Value *getPtr() { return Ptr; } 431 bool mayReadFromMemory() { return MayReadFromMemory; } 432 bool mayWriteToMemory() { return MayWriteToMemory; } 433 434 private: 435 bool Load; 436 bool Store; 437 bool Vol; 438 bool MayReadFromMemory; 439 bool MayWriteToMemory; 440 // For regular (non-intrinsic) loads/stores, this is set to -1. For 441 // intrinsic loads/stores, the id is retrieved from the corresponding 442 // field in the MemIntrinsicInfo structure. That field contains 443 // non-negative values only. 444 int MatchingId; 445 Value *Ptr; 446 }; 447 448 bool processNode(DomTreeNode *Node); 449 450 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 451 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 452 return LI; 453 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 454 return SI->getValueOperand(); 455 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 456 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 457 ExpectedType); 458 } 459 }; 460 } 461 462 bool EarlyCSE::processNode(DomTreeNode *Node) { 463 BasicBlock *BB = Node->getBlock(); 464 465 // If this block has a single predecessor, then the predecessor is the parent 466 // of the domtree node and all of the live out memory values are still current 467 // in this block. If this block has multiple predecessors, then they could 468 // have invalidated the live-out memory values of our parent value. For now, 469 // just be conservative and invalidate memory if this block has multiple 470 // predecessors. 471 if (!BB->getSinglePredecessor()) 472 ++CurrentGeneration; 473 474 /// LastStore - Keep track of the last non-volatile store that we saw... for 475 /// as long as there in no instruction that reads memory. If we see a store 476 /// to the same location, we delete the dead store. This zaps trivial dead 477 /// stores which can occur in bitfield code among other things. 478 Instruction *LastStore = nullptr; 479 480 bool Changed = false; 481 482 // See if any instructions in the block can be eliminated. If so, do it. If 483 // not, add them to AvailableValues. 484 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 485 Instruction *Inst = I++; 486 487 // Dead instructions should just be removed. 488 if (isInstructionTriviallyDead(Inst, &TLI)) { 489 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 490 Inst->eraseFromParent(); 491 Changed = true; 492 ++NumSimplify; 493 continue; 494 } 495 496 // Skip assume intrinsics, they don't really have side effects (although 497 // they're marked as such to ensure preservation of control dependencies), 498 // and this pass will not disturb any of the assumption's control 499 // dependencies. 500 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { 501 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); 502 continue; 503 } 504 505 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 506 // its simpler value. 507 if (Value *V = SimplifyInstruction(Inst, DL, &TLI, &DT, &AC)) { 508 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 509 Inst->replaceAllUsesWith(V); 510 Inst->eraseFromParent(); 511 Changed = true; 512 ++NumSimplify; 513 continue; 514 } 515 516 // If this is a simple instruction that we can value number, process it. 517 if (SimpleValue::canHandle(Inst)) { 518 // See if the instruction has an available value. If so, use it. 519 if (Value *V = AvailableValues.lookup(Inst)) { 520 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 521 Inst->replaceAllUsesWith(V); 522 Inst->eraseFromParent(); 523 Changed = true; 524 ++NumCSE; 525 continue; 526 } 527 528 // Otherwise, just remember that this value is available. 529 AvailableValues.insert(Inst, Inst); 530 continue; 531 } 532 533 ParseMemoryInst MemInst(Inst, TTI); 534 // If this is a non-volatile load, process it. 535 if (MemInst.isValid() && MemInst.isLoad()) { 536 // Ignore volatile loads. 537 if (MemInst.isVolatile()) { 538 LastStore = nullptr; 539 continue; 540 } 541 542 // If we have an available version of this load, and if it is the right 543 // generation, replace this instruction. 544 std::pair<Value *, unsigned> InVal = 545 AvailableLoads.lookup(MemInst.getPtr()); 546 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 547 Value *Op = getOrCreateResult(InVal.first, Inst->getType()); 548 if (Op != nullptr) { 549 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst 550 << " to: " << *InVal.first << '\n'); 551 if (!Inst->use_empty()) 552 Inst->replaceAllUsesWith(Op); 553 Inst->eraseFromParent(); 554 Changed = true; 555 ++NumCSELoad; 556 continue; 557 } 558 } 559 560 // Otherwise, remember that we have this instruction. 561 AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>( 562 Inst, CurrentGeneration)); 563 LastStore = nullptr; 564 continue; 565 } 566 567 // If this instruction may read from memory, forget LastStore. 568 // Load/store intrinsics will indicate both a read and a write to 569 // memory. The target may override this (e.g. so that a store intrinsic 570 // does not read from memory, and thus will be treated the same as a 571 // regular store for commoning purposes). 572 if (Inst->mayReadFromMemory() && 573 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 574 LastStore = nullptr; 575 576 // If this is a read-only call, process it. 577 if (CallValue::canHandle(Inst)) { 578 // If we have an available version of this call, and if it is the right 579 // generation, replace this instruction. 580 std::pair<Value *, unsigned> InVal = AvailableCalls.lookup(Inst); 581 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 582 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst 583 << " to: " << *InVal.first << '\n'); 584 if (!Inst->use_empty()) 585 Inst->replaceAllUsesWith(InVal.first); 586 Inst->eraseFromParent(); 587 Changed = true; 588 ++NumCSECall; 589 continue; 590 } 591 592 // Otherwise, remember that we have this instruction. 593 AvailableCalls.insert( 594 Inst, std::pair<Value *, unsigned>(Inst, CurrentGeneration)); 595 continue; 596 } 597 598 // Okay, this isn't something we can CSE at all. Check to see if it is 599 // something that could modify memory. If so, our available memory values 600 // cannot be used so bump the generation count. 601 if (Inst->mayWriteToMemory()) { 602 ++CurrentGeneration; 603 604 if (MemInst.isValid() && MemInst.isStore()) { 605 // We do a trivial form of DSE if there are two stores to the same 606 // location with no intervening loads. Delete the earlier store. 607 if (LastStore) { 608 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 609 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 610 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 611 << " due to: " << *Inst << '\n'); 612 LastStore->eraseFromParent(); 613 Changed = true; 614 ++NumDSE; 615 LastStore = nullptr; 616 } 617 // fallthrough - we can exploit information about this store 618 } 619 620 // Okay, we just invalidated anything we knew about loaded values. Try 621 // to salvage *something* by remembering that the stored value is a live 622 // version of the pointer. It is safe to forward from volatile stores 623 // to non-volatile loads, so we don't have to check for volatility of 624 // the store. 625 AvailableLoads.insert(MemInst.getPtr(), std::pair<Value *, unsigned>( 626 Inst, CurrentGeneration)); 627 628 // Remember that this was the last store we saw for DSE. 629 if (!MemInst.isVolatile()) 630 LastStore = Inst; 631 } 632 } 633 } 634 635 return Changed; 636 } 637 638 bool EarlyCSE::run() { 639 // Note, deque is being used here because there is significant performance 640 // gains over vector when the container becomes very large due to the 641 // specific access patterns. For more information see the mailing list 642 // discussion on this: 643 // http://lists.cs.uiuc.edu/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 644 std::deque<StackNode *> nodesToProcess; 645 646 bool Changed = false; 647 648 // Process the root node. 649 nodesToProcess.push_back(new StackNode( 650 AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration, 651 DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end())); 652 653 // Save the current generation. 654 unsigned LiveOutGeneration = CurrentGeneration; 655 656 // Process the stack. 657 while (!nodesToProcess.empty()) { 658 // Grab the first item off the stack. Set the current generation, remove 659 // the node from the stack, and process it. 660 StackNode *NodeToProcess = nodesToProcess.back(); 661 662 // Initialize class members. 663 CurrentGeneration = NodeToProcess->currentGeneration(); 664 665 // Check if the node needs to be processed. 666 if (!NodeToProcess->isProcessed()) { 667 // Process the node. 668 Changed |= processNode(NodeToProcess->node()); 669 NodeToProcess->childGeneration(CurrentGeneration); 670 NodeToProcess->process(); 671 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 672 // Push the next child onto the stack. 673 DomTreeNode *child = NodeToProcess->nextChild(); 674 nodesToProcess.push_back( 675 new StackNode(AvailableValues, AvailableLoads, AvailableCalls, 676 NodeToProcess->childGeneration(), child, child->begin(), 677 child->end())); 678 } else { 679 // It has been processed, and there are no more children to process, 680 // so delete it and pop it off the stack. 681 delete NodeToProcess; 682 nodesToProcess.pop_back(); 683 } 684 } // while (!nodes...) 685 686 // Reset the current generation. 687 CurrentGeneration = LiveOutGeneration; 688 689 return Changed; 690 } 691 692 namespace { 693 /// \brief A simple and fast domtree-based CSE pass. 694 /// 695 /// This pass does a simple depth-first walk over the dominator tree, 696 /// eliminating trivially redundant instructions and using instsimplify to 697 /// canonicalize things as it goes. It is intended to be fast and catch obvious 698 /// cases so that instcombine and other passes are more effective. It is 699 /// expected that a later pass of GVN will catch the interesting/hard cases. 700 class EarlyCSELegacyPass : public FunctionPass { 701 public: 702 static char ID; 703 704 EarlyCSELegacyPass() : FunctionPass(ID) { 705 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 706 } 707 708 bool runOnFunction(Function &F) override { 709 if (skipOptnoneFunction(F)) 710 return false; 711 712 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>(); 713 auto *DL = DLP ? &DLP->getDataLayout() : nullptr; 714 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 715 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(); 716 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 717 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 718 719 EarlyCSE CSE(F, DL, TLI, TTI, DT, AC); 720 721 return CSE.run(); 722 } 723 724 void getAnalysisUsage(AnalysisUsage &AU) const override { 725 AU.addRequired<AssumptionCacheTracker>(); 726 AU.addRequired<DominatorTreeWrapperPass>(); 727 AU.addRequired<TargetLibraryInfoWrapperPass>(); 728 AU.addRequired<TargetTransformInfoWrapperPass>(); 729 AU.setPreservesCFG(); 730 } 731 }; 732 } 733 734 char EarlyCSELegacyPass::ID = 0; 735 736 FunctionPass *llvm::createEarlyCSEPass() { return new EarlyCSELegacyPass(); } 737 738 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 739 false) 740 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 741 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 742 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 743 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 744 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 745