xref: /llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp (revision f7d5daa0c0b47351a66f7d12f35247636934e7aa)
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs a simple dominator tree walk that eliminates trivially
11 // redundant instructions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Scalar/EarlyCSE.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/ScopedHashTable.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/GlobalsModRef.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/MemorySSA.h"
24 #include "llvm/Analysis/MemorySSAUpdater.h"
25 #include "llvm/Analysis/TargetLibraryInfo.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/IR/DataLayout.h"
28 #include "llvm/IR/Dominators.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/Pass.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/RecyclingAllocator.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Scalar.h"
37 #include "llvm/Transforms/Utils/Local.h"
38 #include <deque>
39 using namespace llvm;
40 using namespace llvm::PatternMatch;
41 
42 #define DEBUG_TYPE "early-cse"
43 
44 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
45 STATISTIC(NumCSE,      "Number of instructions CSE'd");
46 STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
47 STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
48 STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
49 STATISTIC(NumDSE,      "Number of trivial dead stores removed");
50 
51 //===----------------------------------------------------------------------===//
52 // SimpleValue
53 //===----------------------------------------------------------------------===//
54 
55 namespace {
56 /// \brief Struct representing the available values in the scoped hash table.
57 struct SimpleValue {
58   Instruction *Inst;
59 
60   SimpleValue(Instruction *I) : Inst(I) {
61     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
62   }
63 
64   bool isSentinel() const {
65     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
66            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
67   }
68 
69   static bool canHandle(Instruction *Inst) {
70     // This can only handle non-void readnone functions.
71     if (CallInst *CI = dyn_cast<CallInst>(Inst))
72       return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
73     return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
74            isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
75            isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
76            isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
77            isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
78   }
79 };
80 }
81 
82 namespace llvm {
83 template <> struct DenseMapInfo<SimpleValue> {
84   static inline SimpleValue getEmptyKey() {
85     return DenseMapInfo<Instruction *>::getEmptyKey();
86   }
87   static inline SimpleValue getTombstoneKey() {
88     return DenseMapInfo<Instruction *>::getTombstoneKey();
89   }
90   static unsigned getHashValue(SimpleValue Val);
91   static bool isEqual(SimpleValue LHS, SimpleValue RHS);
92 };
93 }
94 
95 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
96   Instruction *Inst = Val.Inst;
97   // Hash in all of the operands as pointers.
98   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
99     Value *LHS = BinOp->getOperand(0);
100     Value *RHS = BinOp->getOperand(1);
101     if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
102       std::swap(LHS, RHS);
103 
104     return hash_combine(BinOp->getOpcode(), LHS, RHS);
105   }
106 
107   if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
108     Value *LHS = CI->getOperand(0);
109     Value *RHS = CI->getOperand(1);
110     CmpInst::Predicate Pred = CI->getPredicate();
111     if (Inst->getOperand(0) > Inst->getOperand(1)) {
112       std::swap(LHS, RHS);
113       Pred = CI->getSwappedPredicate();
114     }
115     return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
116   }
117 
118   if (CastInst *CI = dyn_cast<CastInst>(Inst))
119     return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
120 
121   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
122     return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
123                         hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
124 
125   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
126     return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
127                         IVI->getOperand(1),
128                         hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
129 
130   assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
131           isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
132           isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
133           isa<ShuffleVectorInst>(Inst)) &&
134          "Invalid/unknown instruction");
135 
136   // Mix in the opcode.
137   return hash_combine(
138       Inst->getOpcode(),
139       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
140 }
141 
142 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
143   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
144 
145   if (LHS.isSentinel() || RHS.isSentinel())
146     return LHSI == RHSI;
147 
148   if (LHSI->getOpcode() != RHSI->getOpcode())
149     return false;
150   if (LHSI->isIdenticalToWhenDefined(RHSI))
151     return true;
152 
153   // If we're not strictly identical, we still might be a commutable instruction
154   if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
155     if (!LHSBinOp->isCommutative())
156       return false;
157 
158     assert(isa<BinaryOperator>(RHSI) &&
159            "same opcode, but different instruction type?");
160     BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
161 
162     // Commuted equality
163     return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
164            LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
165   }
166   if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
167     assert(isa<CmpInst>(RHSI) &&
168            "same opcode, but different instruction type?");
169     CmpInst *RHSCmp = cast<CmpInst>(RHSI);
170     // Commuted equality
171     return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
172            LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
173            LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
174   }
175 
176   return false;
177 }
178 
179 //===----------------------------------------------------------------------===//
180 // CallValue
181 //===----------------------------------------------------------------------===//
182 
183 namespace {
184 /// \brief Struct representing the available call values in the scoped hash
185 /// table.
186 struct CallValue {
187   Instruction *Inst;
188 
189   CallValue(Instruction *I) : Inst(I) {
190     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
191   }
192 
193   bool isSentinel() const {
194     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
195            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
196   }
197 
198   static bool canHandle(Instruction *Inst) {
199     // Don't value number anything that returns void.
200     if (Inst->getType()->isVoidTy())
201       return false;
202 
203     CallInst *CI = dyn_cast<CallInst>(Inst);
204     if (!CI || !CI->onlyReadsMemory())
205       return false;
206     return true;
207   }
208 };
209 }
210 
211 namespace llvm {
212 template <> struct DenseMapInfo<CallValue> {
213   static inline CallValue getEmptyKey() {
214     return DenseMapInfo<Instruction *>::getEmptyKey();
215   }
216   static inline CallValue getTombstoneKey() {
217     return DenseMapInfo<Instruction *>::getTombstoneKey();
218   }
219   static unsigned getHashValue(CallValue Val);
220   static bool isEqual(CallValue LHS, CallValue RHS);
221 };
222 }
223 
224 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
225   Instruction *Inst = Val.Inst;
226   // Hash all of the operands as pointers and mix in the opcode.
227   return hash_combine(
228       Inst->getOpcode(),
229       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
230 }
231 
232 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
233   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
234   if (LHS.isSentinel() || RHS.isSentinel())
235     return LHSI == RHSI;
236   return LHSI->isIdenticalTo(RHSI);
237 }
238 
239 //===----------------------------------------------------------------------===//
240 // EarlyCSE implementation
241 //===----------------------------------------------------------------------===//
242 
243 namespace {
244 /// \brief A simple and fast domtree-based CSE pass.
245 ///
246 /// This pass does a simple depth-first walk over the dominator tree,
247 /// eliminating trivially redundant instructions and using instsimplify to
248 /// canonicalize things as it goes. It is intended to be fast and catch obvious
249 /// cases so that instcombine and other passes are more effective. It is
250 /// expected that a later pass of GVN will catch the interesting/hard cases.
251 class EarlyCSE {
252 public:
253   const TargetLibraryInfo &TLI;
254   const TargetTransformInfo &TTI;
255   DominatorTree &DT;
256   AssumptionCache &AC;
257   const SimplifyQuery SQ;
258   MemorySSA *MSSA;
259   std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
260   typedef RecyclingAllocator<
261       BumpPtrAllocator, ScopedHashTableVal<SimpleValue, Value *>> AllocatorTy;
262   typedef ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
263                           AllocatorTy> ScopedHTType;
264 
265   /// \brief A scoped hash table of the current values of all of our simple
266   /// scalar expressions.
267   ///
268   /// As we walk down the domtree, we look to see if instructions are in this:
269   /// if so, we replace them with what we find, otherwise we insert them so
270   /// that dominated values can succeed in their lookup.
271   ScopedHTType AvailableValues;
272 
273   /// A scoped hash table of the current values of previously encounted memory
274   /// locations.
275   ///
276   /// This allows us to get efficient access to dominating loads or stores when
277   /// we have a fully redundant load.  In addition to the most recent load, we
278   /// keep track of a generation count of the read, which is compared against
279   /// the current generation count.  The current generation count is incremented
280   /// after every possibly writing memory operation, which ensures that we only
281   /// CSE loads with other loads that have no intervening store.  Ordering
282   /// events (such as fences or atomic instructions) increment the generation
283   /// count as well; essentially, we model these as writes to all possible
284   /// locations.  Note that atomic and/or volatile loads and stores can be
285   /// present the table; it is the responsibility of the consumer to inspect
286   /// the atomicity/volatility if needed.
287   struct LoadValue {
288     Instruction *DefInst;
289     unsigned Generation;
290     int MatchingId;
291     bool IsAtomic;
292     bool IsInvariant;
293     LoadValue()
294         : DefInst(nullptr), Generation(0), MatchingId(-1), IsAtomic(false),
295           IsInvariant(false) {}
296     LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
297               bool IsAtomic, bool IsInvariant)
298         : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
299           IsAtomic(IsAtomic), IsInvariant(IsInvariant) {}
300   };
301   typedef RecyclingAllocator<BumpPtrAllocator,
302                              ScopedHashTableVal<Value *, LoadValue>>
303       LoadMapAllocator;
304   typedef ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
305                           LoadMapAllocator> LoadHTType;
306   LoadHTType AvailableLoads;
307 
308   /// \brief A scoped hash table of the current values of read-only call
309   /// values.
310   ///
311   /// It uses the same generation count as loads.
312   typedef ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>
313       CallHTType;
314   CallHTType AvailableCalls;
315 
316   /// \brief This is the current generation of the memory value.
317   unsigned CurrentGeneration;
318 
319   /// \brief Set up the EarlyCSE runner for a particular function.
320   EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
321            const TargetTransformInfo &TTI, DominatorTree &DT,
322            AssumptionCache &AC, MemorySSA *MSSA)
323       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
324         MSSAUpdater(make_unique<MemorySSAUpdater>(MSSA)), CurrentGeneration(0) {
325   }
326 
327   bool run();
328 
329 private:
330   // Almost a POD, but needs to call the constructors for the scoped hash
331   // tables so that a new scope gets pushed on. These are RAII so that the
332   // scope gets popped when the NodeScope is destroyed.
333   class NodeScope {
334   public:
335     NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
336               CallHTType &AvailableCalls)
337         : Scope(AvailableValues), LoadScope(AvailableLoads),
338           CallScope(AvailableCalls) {}
339 
340   private:
341     NodeScope(const NodeScope &) = delete;
342     void operator=(const NodeScope &) = delete;
343 
344     ScopedHTType::ScopeTy Scope;
345     LoadHTType::ScopeTy LoadScope;
346     CallHTType::ScopeTy CallScope;
347   };
348 
349   // Contains all the needed information to create a stack for doing a depth
350   // first traversal of the tree. This includes scopes for values, loads, and
351   // calls as well as the generation. There is a child iterator so that the
352   // children do not need to be store separately.
353   class StackNode {
354   public:
355     StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
356               CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n,
357               DomTreeNode::iterator child, DomTreeNode::iterator end)
358         : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
359           EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls),
360           Processed(false) {}
361 
362     // Accessors.
363     unsigned currentGeneration() { return CurrentGeneration; }
364     unsigned childGeneration() { return ChildGeneration; }
365     void childGeneration(unsigned generation) { ChildGeneration = generation; }
366     DomTreeNode *node() { return Node; }
367     DomTreeNode::iterator childIter() { return ChildIter; }
368     DomTreeNode *nextChild() {
369       DomTreeNode *child = *ChildIter;
370       ++ChildIter;
371       return child;
372     }
373     DomTreeNode::iterator end() { return EndIter; }
374     bool isProcessed() { return Processed; }
375     void process() { Processed = true; }
376 
377   private:
378     StackNode(const StackNode &) = delete;
379     void operator=(const StackNode &) = delete;
380 
381     // Members.
382     unsigned CurrentGeneration;
383     unsigned ChildGeneration;
384     DomTreeNode *Node;
385     DomTreeNode::iterator ChildIter;
386     DomTreeNode::iterator EndIter;
387     NodeScope Scopes;
388     bool Processed;
389   };
390 
391   /// \brief Wrapper class to handle memory instructions, including loads,
392   /// stores and intrinsic loads and stores defined by the target.
393   class ParseMemoryInst {
394   public:
395     ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
396       : IsTargetMemInst(false), Inst(Inst) {
397       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
398         if (TTI.getTgtMemIntrinsic(II, Info))
399           IsTargetMemInst = true;
400     }
401     bool isLoad() const {
402       if (IsTargetMemInst) return Info.ReadMem;
403       return isa<LoadInst>(Inst);
404     }
405     bool isStore() const {
406       if (IsTargetMemInst) return Info.WriteMem;
407       return isa<StoreInst>(Inst);
408     }
409     bool isAtomic() const {
410       if (IsTargetMemInst)
411         return Info.Ordering != AtomicOrdering::NotAtomic;
412       return Inst->isAtomic();
413     }
414     bool isUnordered() const {
415       if (IsTargetMemInst)
416         return Info.isUnordered();
417 
418       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
419         return LI->isUnordered();
420       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
421         return SI->isUnordered();
422       }
423       // Conservative answer
424       return !Inst->isAtomic();
425     }
426 
427     bool isVolatile() const {
428       if (IsTargetMemInst)
429         return Info.IsVolatile;
430 
431       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
432         return LI->isVolatile();
433       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
434         return SI->isVolatile();
435       }
436       // Conservative answer
437       return true;
438     }
439 
440     bool isInvariantLoad() const {
441       if (auto *LI = dyn_cast<LoadInst>(Inst))
442         return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
443       return false;
444     }
445 
446     bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
447       return (getPointerOperand() == Inst.getPointerOperand() &&
448               getMatchingId() == Inst.getMatchingId());
449     }
450     bool isValid() const { return getPointerOperand() != nullptr; }
451 
452     // For regular (non-intrinsic) loads/stores, this is set to -1. For
453     // intrinsic loads/stores, the id is retrieved from the corresponding
454     // field in the MemIntrinsicInfo structure.  That field contains
455     // non-negative values only.
456     int getMatchingId() const {
457       if (IsTargetMemInst) return Info.MatchingId;
458       return -1;
459     }
460     Value *getPointerOperand() const {
461       if (IsTargetMemInst) return Info.PtrVal;
462       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
463         return LI->getPointerOperand();
464       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
465         return SI->getPointerOperand();
466       }
467       return nullptr;
468     }
469     bool mayReadFromMemory() const {
470       if (IsTargetMemInst) return Info.ReadMem;
471       return Inst->mayReadFromMemory();
472     }
473     bool mayWriteToMemory() const {
474       if (IsTargetMemInst) return Info.WriteMem;
475       return Inst->mayWriteToMemory();
476     }
477 
478   private:
479     bool IsTargetMemInst;
480     MemIntrinsicInfo Info;
481     Instruction *Inst;
482   };
483 
484   bool processNode(DomTreeNode *Node);
485 
486   Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
487     if (auto *LI = dyn_cast<LoadInst>(Inst))
488       return LI;
489     if (auto *SI = dyn_cast<StoreInst>(Inst))
490       return SI->getValueOperand();
491     assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
492     return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
493                                                  ExpectedType);
494   }
495 
496   bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
497                            Instruction *EarlierInst, Instruction *LaterInst);
498 
499   void removeMSSA(Instruction *Inst) {
500     if (!MSSA)
501       return;
502     // Removing a store here can leave MemorySSA in an unoptimized state by
503     // creating MemoryPhis that have identical arguments and by creating
504     // MemoryUses whose defining access is not an actual clobber.  We handle the
505     // phi case eagerly here.  The non-optimized MemoryUse case is lazily
506     // updated by MemorySSA getClobberingMemoryAccess.
507     if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
508       // Optimize MemoryPhi nodes that may become redundant by having all the
509       // same input values once MA is removed.
510       SmallSetVector<MemoryPhi *, 4> PhisToCheck;
511       SmallVector<MemoryAccess *, 8> WorkQueue;
512       WorkQueue.push_back(MA);
513       // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
514       // we shouldn't be processing that many phis and this will avoid an
515       // allocation in almost all cases.
516       for (unsigned I = 0; I < WorkQueue.size(); ++I) {
517         MemoryAccess *WI = WorkQueue[I];
518 
519         for (auto *U : WI->users())
520           if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
521             PhisToCheck.insert(MP);
522 
523         MSSAUpdater->removeMemoryAccess(WI);
524 
525         for (MemoryPhi *MP : PhisToCheck) {
526           MemoryAccess *FirstIn = MP->getIncomingValue(0);
527           if (all_of(MP->incoming_values(),
528                      [=](Use &In) { return In == FirstIn; }))
529             WorkQueue.push_back(MP);
530         }
531         PhisToCheck.clear();
532       }
533     }
534   }
535 };
536 }
537 
538 /// Determine if the memory referenced by LaterInst is from the same heap
539 /// version as EarlierInst.
540 /// This is currently called in two scenarios:
541 ///
542 ///   load p
543 ///   ...
544 ///   load p
545 ///
546 /// and
547 ///
548 ///   x = load p
549 ///   ...
550 ///   store x, p
551 ///
552 /// in both cases we want to verify that there are no possible writes to the
553 /// memory referenced by p between the earlier and later instruction.
554 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
555                                    unsigned LaterGeneration,
556                                    Instruction *EarlierInst,
557                                    Instruction *LaterInst) {
558   // Check the simple memory generation tracking first.
559   if (EarlierGeneration == LaterGeneration)
560     return true;
561 
562   if (!MSSA)
563     return false;
564 
565   // If MemorySSA has determined that one of EarlierInst or LaterInst does not
566   // read/write memory, then we can safely return true here.
567   // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
568   // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
569   // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
570   // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
571   // with the default optimization pipeline.
572   auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
573   if (!EarlierMA)
574     return true;
575   auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
576   if (!LaterMA)
577     return true;
578 
579   // Since we know LaterDef dominates LaterInst and EarlierInst dominates
580   // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
581   // EarlierInst and LaterInst and neither can any other write that potentially
582   // clobbers LaterInst.
583   MemoryAccess *LaterDef =
584       MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
585   return MSSA->dominates(LaterDef, EarlierMA);
586 }
587 
588 bool EarlyCSE::processNode(DomTreeNode *Node) {
589   bool Changed = false;
590   BasicBlock *BB = Node->getBlock();
591 
592   // If this block has a single predecessor, then the predecessor is the parent
593   // of the domtree node and all of the live out memory values are still current
594   // in this block.  If this block has multiple predecessors, then they could
595   // have invalidated the live-out memory values of our parent value.  For now,
596   // just be conservative and invalidate memory if this block has multiple
597   // predecessors.
598   if (!BB->getSinglePredecessor())
599     ++CurrentGeneration;
600 
601   // If this node has a single predecessor which ends in a conditional branch,
602   // we can infer the value of the branch condition given that we took this
603   // path.  We need the single predecessor to ensure there's not another path
604   // which reaches this block where the condition might hold a different
605   // value.  Since we're adding this to the scoped hash table (like any other
606   // def), it will have been popped if we encounter a future merge block.
607   if (BasicBlock *Pred = BB->getSinglePredecessor()) {
608     auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
609     if (BI && BI->isConditional()) {
610       auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
611       if (CondInst && SimpleValue::canHandle(CondInst)) {
612         assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
613         auto *TorF = (BI->getSuccessor(0) == BB)
614                          ? ConstantInt::getTrue(BB->getContext())
615                          : ConstantInt::getFalse(BB->getContext());
616         AvailableValues.insert(CondInst, TorF);
617         DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
618                      << CondInst->getName() << "' as " << *TorF << " in "
619                      << BB->getName() << "\n");
620         // Replace all dominated uses with the known value.
621         if (unsigned Count = replaceDominatedUsesWith(
622                 CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) {
623           Changed = true;
624           NumCSECVP += Count;
625         }
626       }
627     }
628   }
629 
630   /// LastStore - Keep track of the last non-volatile store that we saw... for
631   /// as long as there in no instruction that reads memory.  If we see a store
632   /// to the same location, we delete the dead store.  This zaps trivial dead
633   /// stores which can occur in bitfield code among other things.
634   Instruction *LastStore = nullptr;
635 
636   // See if any instructions in the block can be eliminated.  If so, do it.  If
637   // not, add them to AvailableValues.
638   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
639     Instruction *Inst = &*I++;
640 
641     // Dead instructions should just be removed.
642     if (isInstructionTriviallyDead(Inst, &TLI)) {
643       DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
644       removeMSSA(Inst);
645       Inst->eraseFromParent();
646       Changed = true;
647       ++NumSimplify;
648       continue;
649     }
650 
651     // Skip assume intrinsics, they don't really have side effects (although
652     // they're marked as such to ensure preservation of control dependencies),
653     // and this pass will not bother with its removal. However, we should mark
654     // its condition as true for all dominated blocks.
655     if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
656       auto *CondI =
657           dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
658       if (CondI && SimpleValue::canHandle(CondI)) {
659         DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n');
660         AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
661       } else
662         DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
663       continue;
664     }
665 
666     // Skip invariant.start intrinsics since they only read memory, and we can
667     // forward values across it. Also, we dont need to consume the last store
668     // since the semantics of invariant.start allow us to perform DSE of the
669     // last store, if there was a store following invariant.start. Consider:
670     //
671     // store 30, i8* p
672     // invariant.start(p)
673     // store 40, i8* p
674     // We can DSE the store to 30, since the store 40 to invariant location p
675     // causes undefined behaviour.
676     if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>()))
677       continue;
678 
679     if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) {
680       if (auto *CondI =
681               dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
682         if (SimpleValue::canHandle(CondI)) {
683           // Do we already know the actual value of this condition?
684           if (auto *KnownCond = AvailableValues.lookup(CondI)) {
685             // Is the condition known to be true?
686             if (isa<ConstantInt>(KnownCond) &&
687                 cast<ConstantInt>(KnownCond)->isOne()) {
688               DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n');
689               removeMSSA(Inst);
690               Inst->eraseFromParent();
691               Changed = true;
692               continue;
693             } else
694               // Use the known value if it wasn't true.
695               cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
696           }
697           // The condition we're on guarding here is true for all dominated
698           // locations.
699           AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
700         }
701       }
702 
703       // Guard intrinsics read all memory, but don't write any memory.
704       // Accordingly, don't update the generation but consume the last store (to
705       // avoid an incorrect DSE).
706       LastStore = nullptr;
707       continue;
708     }
709 
710     // If the instruction can be simplified (e.g. X+0 = X) then replace it with
711     // its simpler value.
712     if (Value *V = SimplifyInstruction(Inst, SQ)) {
713       DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V << '\n');
714       bool Killed = false;
715       if (!Inst->use_empty()) {
716         Inst->replaceAllUsesWith(V);
717         Changed = true;
718       }
719       if (isInstructionTriviallyDead(Inst, &TLI)) {
720         removeMSSA(Inst);
721         Inst->eraseFromParent();
722         Changed = true;
723         Killed = true;
724       }
725       if (Changed)
726         ++NumSimplify;
727       if (Killed)
728         continue;
729     }
730 
731     // If this is a simple instruction that we can value number, process it.
732     if (SimpleValue::canHandle(Inst)) {
733       // See if the instruction has an available value.  If so, use it.
734       if (Value *V = AvailableValues.lookup(Inst)) {
735         DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V << '\n');
736         if (auto *I = dyn_cast<Instruction>(V))
737           I->andIRFlags(Inst);
738         Inst->replaceAllUsesWith(V);
739         removeMSSA(Inst);
740         Inst->eraseFromParent();
741         Changed = true;
742         ++NumCSE;
743         continue;
744       }
745 
746       // Otherwise, just remember that this value is available.
747       AvailableValues.insert(Inst, Inst);
748       continue;
749     }
750 
751     ParseMemoryInst MemInst(Inst, TTI);
752     // If this is a non-volatile load, process it.
753     if (MemInst.isValid() && MemInst.isLoad()) {
754       // (conservatively) we can't peak past the ordering implied by this
755       // operation, but we can add this load to our set of available values
756       if (MemInst.isVolatile() || !MemInst.isUnordered()) {
757         LastStore = nullptr;
758         ++CurrentGeneration;
759       }
760 
761       // If we have an available version of this load, and if it is the right
762       // generation or the load is known to be from an invariant location,
763       // replace this instruction.
764       //
765       // If either the dominating load or the current load are invariant, then
766       // we can assume the current load loads the same value as the dominating
767       // load.
768       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
769       if (InVal.DefInst != nullptr &&
770           InVal.MatchingId == MemInst.getMatchingId() &&
771           // We don't yet handle removing loads with ordering of any kind.
772           !MemInst.isVolatile() && MemInst.isUnordered() &&
773           // We can't replace an atomic load with one which isn't also atomic.
774           InVal.IsAtomic >= MemInst.isAtomic() &&
775           (InVal.IsInvariant || MemInst.isInvariantLoad() ||
776            isSameMemGeneration(InVal.Generation, CurrentGeneration,
777                                InVal.DefInst, Inst))) {
778         Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
779         if (Op != nullptr) {
780           DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
781                        << "  to: " << *InVal.DefInst << '\n');
782           if (!Inst->use_empty())
783             Inst->replaceAllUsesWith(Op);
784           removeMSSA(Inst);
785           Inst->eraseFromParent();
786           Changed = true;
787           ++NumCSELoad;
788           continue;
789         }
790       }
791 
792       // Otherwise, remember that we have this instruction.
793       AvailableLoads.insert(
794           MemInst.getPointerOperand(),
795           LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
796                     MemInst.isAtomic(), MemInst.isInvariantLoad()));
797       LastStore = nullptr;
798       continue;
799     }
800 
801     // If this instruction may read from memory or throw (and potentially read
802     // from memory in the exception handler), forget LastStore.  Load/store
803     // intrinsics will indicate both a read and a write to memory.  The target
804     // may override this (e.g. so that a store intrinsic does not read from
805     // memory, and thus will be treated the same as a regular store for
806     // commoning purposes).
807     if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
808         !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
809       LastStore = nullptr;
810 
811     // If this is a read-only call, process it.
812     if (CallValue::canHandle(Inst)) {
813       // If we have an available version of this call, and if it is the right
814       // generation, replace this instruction.
815       std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
816       if (InVal.first != nullptr &&
817           isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
818                               Inst)) {
819         DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
820                      << "  to: " << *InVal.first << '\n');
821         if (!Inst->use_empty())
822           Inst->replaceAllUsesWith(InVal.first);
823         removeMSSA(Inst);
824         Inst->eraseFromParent();
825         Changed = true;
826         ++NumCSECall;
827         continue;
828       }
829 
830       // Otherwise, remember that we have this instruction.
831       AvailableCalls.insert(
832           Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
833       continue;
834     }
835 
836     // A release fence requires that all stores complete before it, but does
837     // not prevent the reordering of following loads 'before' the fence.  As a
838     // result, we don't need to consider it as writing to memory and don't need
839     // to advance the generation.  We do need to prevent DSE across the fence,
840     // but that's handled above.
841     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
842       if (FI->getOrdering() == AtomicOrdering::Release) {
843         assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
844         continue;
845       }
846 
847     // write back DSE - If we write back the same value we just loaded from
848     // the same location and haven't passed any intervening writes or ordering
849     // operations, we can remove the write.  The primary benefit is in allowing
850     // the available load table to remain valid and value forward past where
851     // the store originally was.
852     if (MemInst.isValid() && MemInst.isStore()) {
853       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
854       if (InVal.DefInst &&
855           InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
856           InVal.MatchingId == MemInst.getMatchingId() &&
857           // We don't yet handle removing stores with ordering of any kind.
858           !MemInst.isVolatile() && MemInst.isUnordered() &&
859           isSameMemGeneration(InVal.Generation, CurrentGeneration,
860                               InVal.DefInst, Inst)) {
861         // It is okay to have a LastStore to a different pointer here if MemorySSA
862         // tells us that the load and store are from the same memory generation.
863         // In that case, LastStore should keep its present value since we're
864         // removing the current store.
865         assert((!LastStore ||
866                 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
867                     MemInst.getPointerOperand() ||
868                 MSSA) &&
869                "can't have an intervening store if not using MemorySSA!");
870         DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
871         removeMSSA(Inst);
872         Inst->eraseFromParent();
873         Changed = true;
874         ++NumDSE;
875         // We can avoid incrementing the generation count since we were able
876         // to eliminate this store.
877         continue;
878       }
879     }
880 
881     // Okay, this isn't something we can CSE at all.  Check to see if it is
882     // something that could modify memory.  If so, our available memory values
883     // cannot be used so bump the generation count.
884     if (Inst->mayWriteToMemory()) {
885       ++CurrentGeneration;
886 
887       if (MemInst.isValid() && MemInst.isStore()) {
888         // We do a trivial form of DSE if there are two stores to the same
889         // location with no intervening loads.  Delete the earlier store.
890         // At the moment, we don't remove ordered stores, but do remove
891         // unordered atomic stores.  There's no special requirement (for
892         // unordered atomics) about removing atomic stores only in favor of
893         // other atomic stores since we we're going to execute the non-atomic
894         // one anyway and the atomic one might never have become visible.
895         if (LastStore) {
896           ParseMemoryInst LastStoreMemInst(LastStore, TTI);
897           assert(LastStoreMemInst.isUnordered() &&
898                  !LastStoreMemInst.isVolatile() &&
899                  "Violated invariant");
900           if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
901             DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
902                          << "  due to: " << *Inst << '\n');
903             removeMSSA(LastStore);
904             LastStore->eraseFromParent();
905             Changed = true;
906             ++NumDSE;
907             LastStore = nullptr;
908           }
909           // fallthrough - we can exploit information about this store
910         }
911 
912         // Okay, we just invalidated anything we knew about loaded values.  Try
913         // to salvage *something* by remembering that the stored value is a live
914         // version of the pointer.  It is safe to forward from volatile stores
915         // to non-volatile loads, so we don't have to check for volatility of
916         // the store.
917         AvailableLoads.insert(
918             MemInst.getPointerOperand(),
919             LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
920                       MemInst.isAtomic(), /*IsInvariant=*/false));
921 
922         // Remember that this was the last unordered store we saw for DSE. We
923         // don't yet handle DSE on ordered or volatile stores since we don't
924         // have a good way to model the ordering requirement for following
925         // passes  once the store is removed.  We could insert a fence, but
926         // since fences are slightly stronger than stores in their ordering,
927         // it's not clear this is a profitable transform. Another option would
928         // be to merge the ordering with that of the post dominating store.
929         if (MemInst.isUnordered() && !MemInst.isVolatile())
930           LastStore = Inst;
931         else
932           LastStore = nullptr;
933       }
934     }
935   }
936 
937   return Changed;
938 }
939 
940 bool EarlyCSE::run() {
941   // Note, deque is being used here because there is significant performance
942   // gains over vector when the container becomes very large due to the
943   // specific access patterns. For more information see the mailing list
944   // discussion on this:
945   // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
946   std::deque<StackNode *> nodesToProcess;
947 
948   bool Changed = false;
949 
950   // Process the root node.
951   nodesToProcess.push_back(new StackNode(
952       AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration,
953       DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end()));
954 
955   // Save the current generation.
956   unsigned LiveOutGeneration = CurrentGeneration;
957 
958   // Process the stack.
959   while (!nodesToProcess.empty()) {
960     // Grab the first item off the stack. Set the current generation, remove
961     // the node from the stack, and process it.
962     StackNode *NodeToProcess = nodesToProcess.back();
963 
964     // Initialize class members.
965     CurrentGeneration = NodeToProcess->currentGeneration();
966 
967     // Check if the node needs to be processed.
968     if (!NodeToProcess->isProcessed()) {
969       // Process the node.
970       Changed |= processNode(NodeToProcess->node());
971       NodeToProcess->childGeneration(CurrentGeneration);
972       NodeToProcess->process();
973     } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
974       // Push the next child onto the stack.
975       DomTreeNode *child = NodeToProcess->nextChild();
976       nodesToProcess.push_back(
977           new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
978                         NodeToProcess->childGeneration(), child, child->begin(),
979                         child->end()));
980     } else {
981       // It has been processed, and there are no more children to process,
982       // so delete it and pop it off the stack.
983       delete NodeToProcess;
984       nodesToProcess.pop_back();
985     }
986   } // while (!nodes...)
987 
988   // Reset the current generation.
989   CurrentGeneration = LiveOutGeneration;
990 
991   return Changed;
992 }
993 
994 PreservedAnalyses EarlyCSEPass::run(Function &F,
995                                     FunctionAnalysisManager &AM) {
996   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
997   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
998   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
999   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1000   auto *MSSA =
1001       UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1002 
1003   EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1004 
1005   if (!CSE.run())
1006     return PreservedAnalyses::all();
1007 
1008   PreservedAnalyses PA;
1009   PA.preserveSet<CFGAnalyses>();
1010   PA.preserve<GlobalsAA>();
1011   if (UseMemorySSA)
1012     PA.preserve<MemorySSAAnalysis>();
1013   return PA;
1014 }
1015 
1016 namespace {
1017 /// \brief A simple and fast domtree-based CSE pass.
1018 ///
1019 /// This pass does a simple depth-first walk over the dominator tree,
1020 /// eliminating trivially redundant instructions and using instsimplify to
1021 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1022 /// cases so that instcombine and other passes are more effective. It is
1023 /// expected that a later pass of GVN will catch the interesting/hard cases.
1024 template<bool UseMemorySSA>
1025 class EarlyCSELegacyCommonPass : public FunctionPass {
1026 public:
1027   static char ID;
1028 
1029   EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1030     if (UseMemorySSA)
1031       initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1032     else
1033       initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1034   }
1035 
1036   bool runOnFunction(Function &F) override {
1037     if (skipFunction(F))
1038       return false;
1039 
1040     auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1041     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1042     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1043     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1044     auto *MSSA =
1045         UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1046 
1047     EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1048 
1049     return CSE.run();
1050   }
1051 
1052   void getAnalysisUsage(AnalysisUsage &AU) const override {
1053     AU.addRequired<AssumptionCacheTracker>();
1054     AU.addRequired<DominatorTreeWrapperPass>();
1055     AU.addRequired<TargetLibraryInfoWrapperPass>();
1056     AU.addRequired<TargetTransformInfoWrapperPass>();
1057     if (UseMemorySSA) {
1058       AU.addRequired<MemorySSAWrapperPass>();
1059       AU.addPreserved<MemorySSAWrapperPass>();
1060     }
1061     AU.addPreserved<GlobalsAAWrapperPass>();
1062     AU.setPreservesCFG();
1063   }
1064 };
1065 }
1066 
1067 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1068 
1069 template<>
1070 char EarlyCSELegacyPass::ID = 0;
1071 
1072 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1073                       false)
1074 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1075 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1076 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1077 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1078 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1079 
1080 using EarlyCSEMemSSALegacyPass =
1081     EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1082 
1083 template<>
1084 char EarlyCSEMemSSALegacyPass::ID = 0;
1085 
1086 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1087   if (UseMemorySSA)
1088     return new EarlyCSEMemSSALegacyPass();
1089   else
1090     return new EarlyCSELegacyPass();
1091 }
1092 
1093 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1094                       "Early CSE w/ MemorySSA", false, false)
1095 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1096 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1097 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1098 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1099 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1100 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1101                     "Early CSE w/ MemorySSA", false, false)
1102