1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass performs a simple dominator tree walk that eliminates trivially 11 // redundant instructions. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Scalar/EarlyCSE.h" 16 #include "llvm/ADT/Hashing.h" 17 #include "llvm/ADT/ScopedHashTable.h" 18 #include "llvm/ADT/Statistic.h" 19 #include "llvm/Analysis/GlobalsModRef.h" 20 #include "llvm/Analysis/AssumptionCache.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/TargetLibraryInfo.h" 23 #include "llvm/Analysis/TargetTransformInfo.h" 24 #include "llvm/IR/DataLayout.h" 25 #include "llvm/IR/Dominators.h" 26 #include "llvm/IR/Instructions.h" 27 #include "llvm/IR/IntrinsicInst.h" 28 #include "llvm/IR/PatternMatch.h" 29 #include "llvm/Pass.h" 30 #include "llvm/Support/Debug.h" 31 #include "llvm/Support/RecyclingAllocator.h" 32 #include "llvm/Support/raw_ostream.h" 33 #include "llvm/Transforms/Scalar.h" 34 #include "llvm/Transforms/Utils/Local.h" 35 #include <deque> 36 using namespace llvm; 37 using namespace llvm::PatternMatch; 38 39 #define DEBUG_TYPE "early-cse" 40 41 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd"); 42 STATISTIC(NumCSE, "Number of instructions CSE'd"); 43 STATISTIC(NumCSELoad, "Number of load instructions CSE'd"); 44 STATISTIC(NumCSECall, "Number of call instructions CSE'd"); 45 STATISTIC(NumDSE, "Number of trivial dead stores removed"); 46 47 //===----------------------------------------------------------------------===// 48 // SimpleValue 49 //===----------------------------------------------------------------------===// 50 51 namespace { 52 /// \brief Struct representing the available values in the scoped hash table. 53 struct SimpleValue { 54 Instruction *Inst; 55 56 SimpleValue(Instruction *I) : Inst(I) { 57 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 58 } 59 60 bool isSentinel() const { 61 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 62 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 63 } 64 65 static bool canHandle(Instruction *Inst) { 66 // This can only handle non-void readnone functions. 67 if (CallInst *CI = dyn_cast<CallInst>(Inst)) 68 return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy(); 69 return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) || 70 isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) || 71 isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) || 72 isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) || 73 isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst); 74 } 75 }; 76 } 77 78 namespace llvm { 79 template <> struct DenseMapInfo<SimpleValue> { 80 static inline SimpleValue getEmptyKey() { 81 return DenseMapInfo<Instruction *>::getEmptyKey(); 82 } 83 static inline SimpleValue getTombstoneKey() { 84 return DenseMapInfo<Instruction *>::getTombstoneKey(); 85 } 86 static unsigned getHashValue(SimpleValue Val); 87 static bool isEqual(SimpleValue LHS, SimpleValue RHS); 88 }; 89 } 90 91 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) { 92 Instruction *Inst = Val.Inst; 93 // Hash in all of the operands as pointers. 94 if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) { 95 Value *LHS = BinOp->getOperand(0); 96 Value *RHS = BinOp->getOperand(1); 97 if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1)) 98 std::swap(LHS, RHS); 99 100 return hash_combine(BinOp->getOpcode(), LHS, RHS); 101 } 102 103 if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) { 104 Value *LHS = CI->getOperand(0); 105 Value *RHS = CI->getOperand(1); 106 CmpInst::Predicate Pred = CI->getPredicate(); 107 if (Inst->getOperand(0) > Inst->getOperand(1)) { 108 std::swap(LHS, RHS); 109 Pred = CI->getSwappedPredicate(); 110 } 111 return hash_combine(Inst->getOpcode(), Pred, LHS, RHS); 112 } 113 114 if (CastInst *CI = dyn_cast<CastInst>(Inst)) 115 return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0)); 116 117 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst)) 118 return hash_combine(EVI->getOpcode(), EVI->getOperand(0), 119 hash_combine_range(EVI->idx_begin(), EVI->idx_end())); 120 121 if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst)) 122 return hash_combine(IVI->getOpcode(), IVI->getOperand(0), 123 IVI->getOperand(1), 124 hash_combine_range(IVI->idx_begin(), IVI->idx_end())); 125 126 assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) || 127 isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) || 128 isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) || 129 isa<ShuffleVectorInst>(Inst)) && 130 "Invalid/unknown instruction"); 131 132 // Mix in the opcode. 133 return hash_combine( 134 Inst->getOpcode(), 135 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 136 } 137 138 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) { 139 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 140 141 if (LHS.isSentinel() || RHS.isSentinel()) 142 return LHSI == RHSI; 143 144 if (LHSI->getOpcode() != RHSI->getOpcode()) 145 return false; 146 if (LHSI->isIdenticalToWhenDefined(RHSI)) 147 return true; 148 149 // If we're not strictly identical, we still might be a commutable instruction 150 if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) { 151 if (!LHSBinOp->isCommutative()) 152 return false; 153 154 assert(isa<BinaryOperator>(RHSI) && 155 "same opcode, but different instruction type?"); 156 BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI); 157 158 // Commuted equality 159 return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) && 160 LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0); 161 } 162 if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) { 163 assert(isa<CmpInst>(RHSI) && 164 "same opcode, but different instruction type?"); 165 CmpInst *RHSCmp = cast<CmpInst>(RHSI); 166 // Commuted equality 167 return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) && 168 LHSCmp->getOperand(1) == RHSCmp->getOperand(0) && 169 LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate(); 170 } 171 172 return false; 173 } 174 175 //===----------------------------------------------------------------------===// 176 // CallValue 177 //===----------------------------------------------------------------------===// 178 179 namespace { 180 /// \brief Struct representing the available call values in the scoped hash 181 /// table. 182 struct CallValue { 183 Instruction *Inst; 184 185 CallValue(Instruction *I) : Inst(I) { 186 assert((isSentinel() || canHandle(I)) && "Inst can't be handled!"); 187 } 188 189 bool isSentinel() const { 190 return Inst == DenseMapInfo<Instruction *>::getEmptyKey() || 191 Inst == DenseMapInfo<Instruction *>::getTombstoneKey(); 192 } 193 194 static bool canHandle(Instruction *Inst) { 195 // Don't value number anything that returns void. 196 if (Inst->getType()->isVoidTy()) 197 return false; 198 199 CallInst *CI = dyn_cast<CallInst>(Inst); 200 if (!CI || !CI->onlyReadsMemory()) 201 return false; 202 return true; 203 } 204 }; 205 } 206 207 namespace llvm { 208 template <> struct DenseMapInfo<CallValue> { 209 static inline CallValue getEmptyKey() { 210 return DenseMapInfo<Instruction *>::getEmptyKey(); 211 } 212 static inline CallValue getTombstoneKey() { 213 return DenseMapInfo<Instruction *>::getTombstoneKey(); 214 } 215 static unsigned getHashValue(CallValue Val); 216 static bool isEqual(CallValue LHS, CallValue RHS); 217 }; 218 } 219 220 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) { 221 Instruction *Inst = Val.Inst; 222 // Hash all of the operands as pointers and mix in the opcode. 223 return hash_combine( 224 Inst->getOpcode(), 225 hash_combine_range(Inst->value_op_begin(), Inst->value_op_end())); 226 } 227 228 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) { 229 Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst; 230 if (LHS.isSentinel() || RHS.isSentinel()) 231 return LHSI == RHSI; 232 return LHSI->isIdenticalTo(RHSI); 233 } 234 235 //===----------------------------------------------------------------------===// 236 // EarlyCSE implementation 237 //===----------------------------------------------------------------------===// 238 239 namespace { 240 /// \brief A simple and fast domtree-based CSE pass. 241 /// 242 /// This pass does a simple depth-first walk over the dominator tree, 243 /// eliminating trivially redundant instructions and using instsimplify to 244 /// canonicalize things as it goes. It is intended to be fast and catch obvious 245 /// cases so that instcombine and other passes are more effective. It is 246 /// expected that a later pass of GVN will catch the interesting/hard cases. 247 class EarlyCSE { 248 public: 249 const TargetLibraryInfo &TLI; 250 const TargetTransformInfo &TTI; 251 DominatorTree &DT; 252 AssumptionCache &AC; 253 typedef RecyclingAllocator< 254 BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy; 255 typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>, 256 AllocatorTy> ScopedHTType; 257 258 /// \brief A scoped hash table of the current values of all of our simple 259 /// scalar expressions. 260 /// 261 /// As we walk down the domtree, we look to see if instructions are in this: 262 /// if so, we replace them with what we find, otherwise we insert them so 263 /// that dominated values can succeed in their lookup. 264 ScopedHTType AvailableValues; 265 266 /// A scoped hash table of the current values of previously encounted memory 267 /// locations. 268 /// 269 /// This allows us to get efficient access to dominating loads or stores when 270 /// we have a fully redundant load. In addition to the most recent load, we 271 /// keep track of a generation count of the read, which is compared against 272 /// the current generation count. The current generation count is incremented 273 /// after every possibly writing memory operation, which ensures that we only 274 /// CSE loads with other loads that have no intervening store. Ordering 275 /// events (such as fences or atomic instructions) increment the generation 276 /// count as well; essentially, we model these as writes to all possible 277 /// locations. Note that atomic and/or volatile loads and stores can be 278 /// present the table; it is the responsibility of the consumer to inspect 279 /// the atomicity/volatility if needed. 280 struct LoadValue { 281 Value *Data; 282 unsigned Generation; 283 int MatchingId; 284 bool IsAtomic; 285 LoadValue() 286 : Data(nullptr), Generation(0), MatchingId(-1), IsAtomic(false) {} 287 LoadValue(Value *Data, unsigned Generation, unsigned MatchingId, 288 bool IsAtomic) 289 : Data(Data), Generation(Generation), MatchingId(MatchingId), 290 IsAtomic(IsAtomic) {} 291 }; 292 typedef RecyclingAllocator<BumpPtrAllocator, 293 ScopedHashTableVal<Value *, LoadValue>> 294 LoadMapAllocator; 295 typedef ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>, 296 LoadMapAllocator> LoadHTType; 297 LoadHTType AvailableLoads; 298 299 /// \brief A scoped hash table of the current values of read-only call 300 /// values. 301 /// 302 /// It uses the same generation count as loads. 303 typedef ScopedHashTable<CallValue, std::pair<Value *, unsigned>> CallHTType; 304 CallHTType AvailableCalls; 305 306 /// \brief This is the current generation of the memory value. 307 unsigned CurrentGeneration; 308 309 /// \brief Set up the EarlyCSE runner for a particular function. 310 EarlyCSE(const TargetLibraryInfo &TLI, const TargetTransformInfo &TTI, 311 DominatorTree &DT, AssumptionCache &AC) 312 : TLI(TLI), TTI(TTI), DT(DT), AC(AC), CurrentGeneration(0) {} 313 314 bool run(); 315 316 private: 317 // Almost a POD, but needs to call the constructors for the scoped hash 318 // tables so that a new scope gets pushed on. These are RAII so that the 319 // scope gets popped when the NodeScope is destroyed. 320 class NodeScope { 321 public: 322 NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 323 CallHTType &AvailableCalls) 324 : Scope(AvailableValues), LoadScope(AvailableLoads), 325 CallScope(AvailableCalls) {} 326 327 private: 328 NodeScope(const NodeScope &) = delete; 329 void operator=(const NodeScope &) = delete; 330 331 ScopedHTType::ScopeTy Scope; 332 LoadHTType::ScopeTy LoadScope; 333 CallHTType::ScopeTy CallScope; 334 }; 335 336 // Contains all the needed information to create a stack for doing a depth 337 // first tranversal of the tree. This includes scopes for values, loads, and 338 // calls as well as the generation. There is a child iterator so that the 339 // children do not need to be store spearately. 340 class StackNode { 341 public: 342 StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads, 343 CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n, 344 DomTreeNode::iterator child, DomTreeNode::iterator end) 345 : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child), 346 EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls), 347 Processed(false) {} 348 349 // Accessors. 350 unsigned currentGeneration() { return CurrentGeneration; } 351 unsigned childGeneration() { return ChildGeneration; } 352 void childGeneration(unsigned generation) { ChildGeneration = generation; } 353 DomTreeNode *node() { return Node; } 354 DomTreeNode::iterator childIter() { return ChildIter; } 355 DomTreeNode *nextChild() { 356 DomTreeNode *child = *ChildIter; 357 ++ChildIter; 358 return child; 359 } 360 DomTreeNode::iterator end() { return EndIter; } 361 bool isProcessed() { return Processed; } 362 void process() { Processed = true; } 363 364 private: 365 StackNode(const StackNode &) = delete; 366 void operator=(const StackNode &) = delete; 367 368 // Members. 369 unsigned CurrentGeneration; 370 unsigned ChildGeneration; 371 DomTreeNode *Node; 372 DomTreeNode::iterator ChildIter; 373 DomTreeNode::iterator EndIter; 374 NodeScope Scopes; 375 bool Processed; 376 }; 377 378 /// \brief Wrapper class to handle memory instructions, including loads, 379 /// stores and intrinsic loads and stores defined by the target. 380 class ParseMemoryInst { 381 public: 382 ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI) 383 : IsTargetMemInst(false), Inst(Inst) { 384 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) 385 if (TTI.getTgtMemIntrinsic(II, Info) && Info.NumMemRefs == 1) 386 IsTargetMemInst = true; 387 } 388 bool isLoad() const { 389 if (IsTargetMemInst) return Info.ReadMem; 390 return isa<LoadInst>(Inst); 391 } 392 bool isStore() const { 393 if (IsTargetMemInst) return Info.WriteMem; 394 return isa<StoreInst>(Inst); 395 } 396 bool isAtomic() const { 397 if (IsTargetMemInst) { 398 assert(Info.IsSimple && "need to refine IsSimple in TTI"); 399 return false; 400 } 401 return Inst->isAtomic(); 402 } 403 bool isUnordered() const { 404 if (IsTargetMemInst) { 405 assert(Info.IsSimple && "need to refine IsSimple in TTI"); 406 return true; 407 } 408 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 409 return LI->isUnordered(); 410 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 411 return SI->isUnordered(); 412 } 413 // Conservative answer 414 return !Inst->isAtomic(); 415 } 416 417 bool isVolatile() const { 418 if (IsTargetMemInst) { 419 assert(Info.IsSimple && "need to refine IsSimple in TTI"); 420 return false; 421 } 422 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 423 return LI->isVolatile(); 424 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 425 return SI->isVolatile(); 426 } 427 // Conservative answer 428 return true; 429 } 430 431 432 bool isMatchingMemLoc(const ParseMemoryInst &Inst) const { 433 return (getPointerOperand() == Inst.getPointerOperand() && 434 getMatchingId() == Inst.getMatchingId()); 435 } 436 bool isValid() const { return getPointerOperand() != nullptr; } 437 438 // For regular (non-intrinsic) loads/stores, this is set to -1. For 439 // intrinsic loads/stores, the id is retrieved from the corresponding 440 // field in the MemIntrinsicInfo structure. That field contains 441 // non-negative values only. 442 int getMatchingId() const { 443 if (IsTargetMemInst) return Info.MatchingId; 444 return -1; 445 } 446 Value *getPointerOperand() const { 447 if (IsTargetMemInst) return Info.PtrVal; 448 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) { 449 return LI->getPointerOperand(); 450 } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 451 return SI->getPointerOperand(); 452 } 453 return nullptr; 454 } 455 bool mayReadFromMemory() const { 456 if (IsTargetMemInst) return Info.ReadMem; 457 return Inst->mayReadFromMemory(); 458 } 459 bool mayWriteToMemory() const { 460 if (IsTargetMemInst) return Info.WriteMem; 461 return Inst->mayWriteToMemory(); 462 } 463 464 private: 465 bool IsTargetMemInst; 466 MemIntrinsicInfo Info; 467 Instruction *Inst; 468 }; 469 470 bool processNode(DomTreeNode *Node); 471 472 Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const { 473 if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) 474 return LI; 475 else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) 476 return SI->getValueOperand(); 477 assert(isa<IntrinsicInst>(Inst) && "Instruction not supported"); 478 return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst), 479 ExpectedType); 480 } 481 }; 482 } 483 484 bool EarlyCSE::processNode(DomTreeNode *Node) { 485 BasicBlock *BB = Node->getBlock(); 486 487 // If this block has a single predecessor, then the predecessor is the parent 488 // of the domtree node and all of the live out memory values are still current 489 // in this block. If this block has multiple predecessors, then they could 490 // have invalidated the live-out memory values of our parent value. For now, 491 // just be conservative and invalidate memory if this block has multiple 492 // predecessors. 493 if (!BB->getSinglePredecessor()) 494 ++CurrentGeneration; 495 496 // If this node has a single predecessor which ends in a conditional branch, 497 // we can infer the value of the branch condition given that we took this 498 // path. We need the single predecessor to ensure there's not another path 499 // which reaches this block where the condition might hold a different 500 // value. Since we're adding this to the scoped hash table (like any other 501 // def), it will have been popped if we encounter a future merge block. 502 if (BasicBlock *Pred = BB->getSinglePredecessor()) 503 if (auto *BI = dyn_cast<BranchInst>(Pred->getTerminator())) 504 if (BI->isConditional()) 505 if (auto *CondInst = dyn_cast<Instruction>(BI->getCondition())) 506 if (SimpleValue::canHandle(CondInst)) { 507 assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB); 508 auto *ConditionalConstant = (BI->getSuccessor(0) == BB) ? 509 ConstantInt::getTrue(BB->getContext()) : 510 ConstantInt::getFalse(BB->getContext()); 511 AvailableValues.insert(CondInst, ConditionalConstant); 512 DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '" 513 << CondInst->getName() << "' as " << *ConditionalConstant 514 << " in " << BB->getName() << "\n"); 515 // Replace all dominated uses with the known value 516 replaceDominatedUsesWith(CondInst, ConditionalConstant, DT, 517 BasicBlockEdge(Pred, BB)); 518 } 519 520 /// LastStore - Keep track of the last non-volatile store that we saw... for 521 /// as long as there in no instruction that reads memory. If we see a store 522 /// to the same location, we delete the dead store. This zaps trivial dead 523 /// stores which can occur in bitfield code among other things. 524 Instruction *LastStore = nullptr; 525 526 bool Changed = false; 527 const DataLayout &DL = BB->getModule()->getDataLayout(); 528 529 // See if any instructions in the block can be eliminated. If so, do it. If 530 // not, add them to AvailableValues. 531 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { 532 Instruction *Inst = &*I++; 533 534 // Dead instructions should just be removed. 535 if (isInstructionTriviallyDead(Inst, &TLI)) { 536 DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); 537 Inst->eraseFromParent(); 538 Changed = true; 539 ++NumSimplify; 540 continue; 541 } 542 543 // Skip assume intrinsics, they don't really have side effects (although 544 // they're marked as such to ensure preservation of control dependencies), 545 // and this pass will not disturb any of the assumption's control 546 // dependencies. 547 if (match(Inst, m_Intrinsic<Intrinsic::assume>())) { 548 DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n'); 549 continue; 550 } 551 552 // If the instruction can be simplified (e.g. X+0 = X) then replace it with 553 // its simpler value. 554 if (Value *V = SimplifyInstruction(Inst, DL, &TLI, &DT, &AC)) { 555 DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << " to: " << *V << '\n'); 556 Inst->replaceAllUsesWith(V); 557 Inst->eraseFromParent(); 558 Changed = true; 559 ++NumSimplify; 560 continue; 561 } 562 563 // If this is a simple instruction that we can value number, process it. 564 if (SimpleValue::canHandle(Inst)) { 565 // See if the instruction has an available value. If so, use it. 566 if (Value *V = AvailableValues.lookup(Inst)) { 567 DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << " to: " << *V << '\n'); 568 if (auto *I = dyn_cast<Instruction>(V)) 569 I->andIRFlags(Inst); 570 Inst->replaceAllUsesWith(V); 571 Inst->eraseFromParent(); 572 Changed = true; 573 ++NumCSE; 574 continue; 575 } 576 577 // Otherwise, just remember that this value is available. 578 AvailableValues.insert(Inst, Inst); 579 continue; 580 } 581 582 ParseMemoryInst MemInst(Inst, TTI); 583 // If this is a non-volatile load, process it. 584 if (MemInst.isValid() && MemInst.isLoad()) { 585 // (conservatively) we can't peak past the ordering implied by this 586 // operation, but we can add this load to our set of available values 587 if (MemInst.isVolatile() || !MemInst.isUnordered()) { 588 LastStore = nullptr; 589 ++CurrentGeneration; 590 } 591 592 // If we have an available version of this load, and if it is the right 593 // generation, replace this instruction. 594 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 595 if (InVal.Data != nullptr && InVal.Generation == CurrentGeneration && 596 InVal.MatchingId == MemInst.getMatchingId() && 597 // We don't yet handle removing loads with ordering of any kind. 598 !MemInst.isVolatile() && MemInst.isUnordered() && 599 // We can't replace an atomic load with one which isn't also atomic. 600 InVal.IsAtomic >= MemInst.isAtomic()) { 601 Value *Op = getOrCreateResult(InVal.Data, Inst->getType()); 602 if (Op != nullptr) { 603 DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst 604 << " to: " << *InVal.Data << '\n'); 605 if (!Inst->use_empty()) 606 Inst->replaceAllUsesWith(Op); 607 Inst->eraseFromParent(); 608 Changed = true; 609 ++NumCSELoad; 610 continue; 611 } 612 } 613 614 // Otherwise, remember that we have this instruction. 615 AvailableLoads.insert( 616 MemInst.getPointerOperand(), 617 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 618 MemInst.isAtomic())); 619 LastStore = nullptr; 620 continue; 621 } 622 623 // If this instruction may read from memory, forget LastStore. 624 // Load/store intrinsics will indicate both a read and a write to 625 // memory. The target may override this (e.g. so that a store intrinsic 626 // does not read from memory, and thus will be treated the same as a 627 // regular store for commoning purposes). 628 if (Inst->mayReadFromMemory() && 629 !(MemInst.isValid() && !MemInst.mayReadFromMemory())) 630 LastStore = nullptr; 631 632 // If this is a read-only call, process it. 633 if (CallValue::canHandle(Inst)) { 634 // If we have an available version of this call, and if it is the right 635 // generation, replace this instruction. 636 std::pair<Value *, unsigned> InVal = AvailableCalls.lookup(Inst); 637 if (InVal.first != nullptr && InVal.second == CurrentGeneration) { 638 DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst 639 << " to: " << *InVal.first << '\n'); 640 if (!Inst->use_empty()) 641 Inst->replaceAllUsesWith(InVal.first); 642 Inst->eraseFromParent(); 643 Changed = true; 644 ++NumCSECall; 645 continue; 646 } 647 648 // Otherwise, remember that we have this instruction. 649 AvailableCalls.insert( 650 Inst, std::pair<Value *, unsigned>(Inst, CurrentGeneration)); 651 continue; 652 } 653 654 // A release fence requires that all stores complete before it, but does 655 // not prevent the reordering of following loads 'before' the fence. As a 656 // result, we don't need to consider it as writing to memory and don't need 657 // to advance the generation. We do need to prevent DSE across the fence, 658 // but that's handled above. 659 if (FenceInst *FI = dyn_cast<FenceInst>(Inst)) 660 if (FI->getOrdering() == AtomicOrdering::Release) { 661 assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above"); 662 continue; 663 } 664 665 // write back DSE - If we write back the same value we just loaded from 666 // the same location and haven't passed any intervening writes or ordering 667 // operations, we can remove the write. The primary benefit is in allowing 668 // the available load table to remain valid and value forward past where 669 // the store originally was. 670 if (MemInst.isValid() && MemInst.isStore()) { 671 LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand()); 672 if (InVal.Data && 673 InVal.Data == getOrCreateResult(Inst, InVal.Data->getType()) && 674 InVal.Generation == CurrentGeneration && 675 InVal.MatchingId == MemInst.getMatchingId() && 676 // We don't yet handle removing stores with ordering of any kind. 677 !MemInst.isVolatile() && MemInst.isUnordered()) { 678 assert((!LastStore || 679 ParseMemoryInst(LastStore, TTI).getPointerOperand() == 680 MemInst.getPointerOperand()) && 681 "can't have an intervening store!"); 682 DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n'); 683 Inst->eraseFromParent(); 684 Changed = true; 685 ++NumDSE; 686 // We can avoid incrementing the generation count since we were able 687 // to eliminate this store. 688 continue; 689 } 690 } 691 692 // Okay, this isn't something we can CSE at all. Check to see if it is 693 // something that could modify memory. If so, our available memory values 694 // cannot be used so bump the generation count. 695 if (Inst->mayWriteToMemory()) { 696 ++CurrentGeneration; 697 698 if (MemInst.isValid() && MemInst.isStore()) { 699 // We do a trivial form of DSE if there are two stores to the same 700 // location with no intervening loads. Delete the earlier store. 701 // At the moment, we don't remove ordered stores, but do remove 702 // unordered atomic stores. There's no special requirement (for 703 // unordered atomics) about removing atomic stores only in favor of 704 // other atomic stores since we we're going to execute the non-atomic 705 // one anyway and the atomic one might never have become visible. 706 if (LastStore) { 707 ParseMemoryInst LastStoreMemInst(LastStore, TTI); 708 assert(LastStoreMemInst.isUnordered() && 709 !LastStoreMemInst.isVolatile() && 710 "Violated invariant"); 711 if (LastStoreMemInst.isMatchingMemLoc(MemInst)) { 712 DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore 713 << " due to: " << *Inst << '\n'); 714 LastStore->eraseFromParent(); 715 Changed = true; 716 ++NumDSE; 717 LastStore = nullptr; 718 } 719 // fallthrough - we can exploit information about this store 720 } 721 722 // Okay, we just invalidated anything we knew about loaded values. Try 723 // to salvage *something* by remembering that the stored value is a live 724 // version of the pointer. It is safe to forward from volatile stores 725 // to non-volatile loads, so we don't have to check for volatility of 726 // the store. 727 AvailableLoads.insert( 728 MemInst.getPointerOperand(), 729 LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(), 730 MemInst.isAtomic())); 731 732 // Remember that this was the last unordered store we saw for DSE. We 733 // don't yet handle DSE on ordered or volatile stores since we don't 734 // have a good way to model the ordering requirement for following 735 // passes once the store is removed. We could insert a fence, but 736 // since fences are slightly stronger than stores in their ordering, 737 // it's not clear this is a profitable transform. Another option would 738 // be to merge the ordering with that of the post dominating store. 739 if (MemInst.isUnordered() && !MemInst.isVolatile()) 740 LastStore = Inst; 741 else 742 LastStore = nullptr; 743 } 744 } 745 } 746 747 return Changed; 748 } 749 750 bool EarlyCSE::run() { 751 // Note, deque is being used here because there is significant performance 752 // gains over vector when the container becomes very large due to the 753 // specific access patterns. For more information see the mailing list 754 // discussion on this: 755 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html 756 std::deque<StackNode *> nodesToProcess; 757 758 bool Changed = false; 759 760 // Process the root node. 761 nodesToProcess.push_back(new StackNode( 762 AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration, 763 DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end())); 764 765 // Save the current generation. 766 unsigned LiveOutGeneration = CurrentGeneration; 767 768 // Process the stack. 769 while (!nodesToProcess.empty()) { 770 // Grab the first item off the stack. Set the current generation, remove 771 // the node from the stack, and process it. 772 StackNode *NodeToProcess = nodesToProcess.back(); 773 774 // Initialize class members. 775 CurrentGeneration = NodeToProcess->currentGeneration(); 776 777 // Check if the node needs to be processed. 778 if (!NodeToProcess->isProcessed()) { 779 // Process the node. 780 Changed |= processNode(NodeToProcess->node()); 781 NodeToProcess->childGeneration(CurrentGeneration); 782 NodeToProcess->process(); 783 } else if (NodeToProcess->childIter() != NodeToProcess->end()) { 784 // Push the next child onto the stack. 785 DomTreeNode *child = NodeToProcess->nextChild(); 786 nodesToProcess.push_back( 787 new StackNode(AvailableValues, AvailableLoads, AvailableCalls, 788 NodeToProcess->childGeneration(), child, child->begin(), 789 child->end())); 790 } else { 791 // It has been processed, and there are no more children to process, 792 // so delete it and pop it off the stack. 793 delete NodeToProcess; 794 nodesToProcess.pop_back(); 795 } 796 } // while (!nodes...) 797 798 // Reset the current generation. 799 CurrentGeneration = LiveOutGeneration; 800 801 return Changed; 802 } 803 804 PreservedAnalyses EarlyCSEPass::run(Function &F, 805 AnalysisManager<Function> &AM) { 806 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 807 auto &TTI = AM.getResult<TargetIRAnalysis>(F); 808 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 809 auto &AC = AM.getResult<AssumptionAnalysis>(F); 810 811 EarlyCSE CSE(TLI, TTI, DT, AC); 812 813 if (!CSE.run()) 814 return PreservedAnalyses::all(); 815 816 // CSE preserves the dominator tree because it doesn't mutate the CFG. 817 // FIXME: Bundle this with other CFG-preservation. 818 PreservedAnalyses PA; 819 PA.preserve<DominatorTreeAnalysis>(); 820 return PA; 821 } 822 823 namespace { 824 /// \brief A simple and fast domtree-based CSE pass. 825 /// 826 /// This pass does a simple depth-first walk over the dominator tree, 827 /// eliminating trivially redundant instructions and using instsimplify to 828 /// canonicalize things as it goes. It is intended to be fast and catch obvious 829 /// cases so that instcombine and other passes are more effective. It is 830 /// expected that a later pass of GVN will catch the interesting/hard cases. 831 class EarlyCSELegacyPass : public FunctionPass { 832 public: 833 static char ID; 834 835 EarlyCSELegacyPass() : FunctionPass(ID) { 836 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry()); 837 } 838 839 bool runOnFunction(Function &F) override { 840 if (skipOptnoneFunction(F)) 841 return false; 842 843 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(); 844 auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F); 845 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 846 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F); 847 848 EarlyCSE CSE(TLI, TTI, DT, AC); 849 850 return CSE.run(); 851 } 852 853 void getAnalysisUsage(AnalysisUsage &AU) const override { 854 AU.addRequired<AssumptionCacheTracker>(); 855 AU.addRequired<DominatorTreeWrapperPass>(); 856 AU.addRequired<TargetLibraryInfoWrapperPass>(); 857 AU.addRequired<TargetTransformInfoWrapperPass>(); 858 AU.addPreserved<GlobalsAAWrapperPass>(); 859 AU.setPreservesCFG(); 860 } 861 }; 862 } 863 864 char EarlyCSELegacyPass::ID = 0; 865 866 FunctionPass *llvm::createEarlyCSEPass() { return new EarlyCSELegacyPass(); } 867 868 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false, 869 false) 870 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass) 871 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker) 872 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 873 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass) 874 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false) 875