xref: /llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp (revision d34e60ca8532511acb8c93ef26297e349fbec86a)
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs a simple dominator tree walk that eliminates trivially
11 // redundant instructions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Scalar/EarlyCSE.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/Hashing.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopedHashTable.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/Utils/Local.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/PassManager.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/DebugCounter.h"
54 #include "llvm/Support/RecyclingAllocator.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Scalar.h"
57 #include <cassert>
58 #include <deque>
59 #include <memory>
60 #include <utility>
61 
62 using namespace llvm;
63 using namespace llvm::PatternMatch;
64 
65 #define DEBUG_TYPE "early-cse"
66 
67 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
68 STATISTIC(NumCSE,      "Number of instructions CSE'd");
69 STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
70 STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
71 STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
72 STATISTIC(NumDSE,      "Number of trivial dead stores removed");
73 
74 DEBUG_COUNTER(CSECounter, "early-cse",
75               "Controls which instructions are removed");
76 
77 //===----------------------------------------------------------------------===//
78 // SimpleValue
79 //===----------------------------------------------------------------------===//
80 
81 namespace {
82 
83 /// Struct representing the available values in the scoped hash table.
84 struct SimpleValue {
85   Instruction *Inst;
86 
87   SimpleValue(Instruction *I) : Inst(I) {
88     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
89   }
90 
91   bool isSentinel() const {
92     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
93            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
94   }
95 
96   static bool canHandle(Instruction *Inst) {
97     // This can only handle non-void readnone functions.
98     if (CallInst *CI = dyn_cast<CallInst>(Inst))
99       return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
100     return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
101            isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
102            isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
103            isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
104            isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
105   }
106 };
107 
108 } // end anonymous namespace
109 
110 namespace llvm {
111 
112 template <> struct DenseMapInfo<SimpleValue> {
113   static inline SimpleValue getEmptyKey() {
114     return DenseMapInfo<Instruction *>::getEmptyKey();
115   }
116 
117   static inline SimpleValue getTombstoneKey() {
118     return DenseMapInfo<Instruction *>::getTombstoneKey();
119   }
120 
121   static unsigned getHashValue(SimpleValue Val);
122   static bool isEqual(SimpleValue LHS, SimpleValue RHS);
123 };
124 
125 } // end namespace llvm
126 
127 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
128   Instruction *Inst = Val.Inst;
129   // Hash in all of the operands as pointers.
130   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
131     Value *LHS = BinOp->getOperand(0);
132     Value *RHS = BinOp->getOperand(1);
133     if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
134       std::swap(LHS, RHS);
135 
136     return hash_combine(BinOp->getOpcode(), LHS, RHS);
137   }
138 
139   if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
140     Value *LHS = CI->getOperand(0);
141     Value *RHS = CI->getOperand(1);
142     CmpInst::Predicate Pred = CI->getPredicate();
143     if (Inst->getOperand(0) > Inst->getOperand(1)) {
144       std::swap(LHS, RHS);
145       Pred = CI->getSwappedPredicate();
146     }
147     return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
148   }
149 
150   // Hash min/max/abs (cmp + select) to allow for commuted operands.
151   // Min/max may also have non-canonical compare predicate (eg, the compare for
152   // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
153   // compare.
154   Value *A, *B;
155   SelectPatternFlavor SPF = matchSelectPattern(Inst, A, B).Flavor;
156   // TODO: We should also detect FP min/max.
157   if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
158       SPF == SPF_UMIN || SPF == SPF_UMAX ||
159       SPF == SPF_ABS || SPF == SPF_NABS) {
160     if (A > B)
161       std::swap(A, B);
162     return hash_combine(Inst->getOpcode(), SPF, A, B);
163   }
164 
165   if (CastInst *CI = dyn_cast<CastInst>(Inst))
166     return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
167 
168   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
169     return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
170                         hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
171 
172   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
173     return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
174                         IVI->getOperand(1),
175                         hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
176 
177   assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
178           isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
179           isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
180           isa<ShuffleVectorInst>(Inst)) &&
181          "Invalid/unknown instruction");
182 
183   // Mix in the opcode.
184   return hash_combine(
185       Inst->getOpcode(),
186       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
187 }
188 
189 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
190   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
191 
192   if (LHS.isSentinel() || RHS.isSentinel())
193     return LHSI == RHSI;
194 
195   if (LHSI->getOpcode() != RHSI->getOpcode())
196     return false;
197   if (LHSI->isIdenticalToWhenDefined(RHSI))
198     return true;
199 
200   // If we're not strictly identical, we still might be a commutable instruction
201   if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
202     if (!LHSBinOp->isCommutative())
203       return false;
204 
205     assert(isa<BinaryOperator>(RHSI) &&
206            "same opcode, but different instruction type?");
207     BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
208 
209     // Commuted equality
210     return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
211            LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
212   }
213   if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
214     assert(isa<CmpInst>(RHSI) &&
215            "same opcode, but different instruction type?");
216     CmpInst *RHSCmp = cast<CmpInst>(RHSI);
217     // Commuted equality
218     return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
219            LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
220            LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
221   }
222 
223   // Min/max/abs can occur with commuted operands, non-canonical predicates,
224   // and/or non-canonical operands.
225   Value *LHSA, *LHSB;
226   SelectPatternFlavor LSPF = matchSelectPattern(LHSI, LHSA, LHSB).Flavor;
227   // TODO: We should also detect FP min/max.
228   if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
229       LSPF == SPF_UMIN || LSPF == SPF_UMAX ||
230       LSPF == SPF_ABS || LSPF == SPF_NABS) {
231     Value *RHSA, *RHSB;
232     SelectPatternFlavor RSPF = matchSelectPattern(RHSI, RHSA, RHSB).Flavor;
233     return (LSPF == RSPF && ((LHSA == RHSA && LHSB == RHSB) ||
234                              (LHSA == RHSB && LHSB == RHSA)));
235   }
236 
237   return false;
238 }
239 
240 //===----------------------------------------------------------------------===//
241 // CallValue
242 //===----------------------------------------------------------------------===//
243 
244 namespace {
245 
246 /// Struct representing the available call values in the scoped hash
247 /// table.
248 struct CallValue {
249   Instruction *Inst;
250 
251   CallValue(Instruction *I) : Inst(I) {
252     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
253   }
254 
255   bool isSentinel() const {
256     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
257            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
258   }
259 
260   static bool canHandle(Instruction *Inst) {
261     // Don't value number anything that returns void.
262     if (Inst->getType()->isVoidTy())
263       return false;
264 
265     CallInst *CI = dyn_cast<CallInst>(Inst);
266     if (!CI || !CI->onlyReadsMemory())
267       return false;
268     return true;
269   }
270 };
271 
272 } // end anonymous namespace
273 
274 namespace llvm {
275 
276 template <> struct DenseMapInfo<CallValue> {
277   static inline CallValue getEmptyKey() {
278     return DenseMapInfo<Instruction *>::getEmptyKey();
279   }
280 
281   static inline CallValue getTombstoneKey() {
282     return DenseMapInfo<Instruction *>::getTombstoneKey();
283   }
284 
285   static unsigned getHashValue(CallValue Val);
286   static bool isEqual(CallValue LHS, CallValue RHS);
287 };
288 
289 } // end namespace llvm
290 
291 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
292   Instruction *Inst = Val.Inst;
293   // Hash all of the operands as pointers and mix in the opcode.
294   return hash_combine(
295       Inst->getOpcode(),
296       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
297 }
298 
299 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
300   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
301   if (LHS.isSentinel() || RHS.isSentinel())
302     return LHSI == RHSI;
303   return LHSI->isIdenticalTo(RHSI);
304 }
305 
306 //===----------------------------------------------------------------------===//
307 // EarlyCSE implementation
308 //===----------------------------------------------------------------------===//
309 
310 namespace {
311 
312 /// A simple and fast domtree-based CSE pass.
313 ///
314 /// This pass does a simple depth-first walk over the dominator tree,
315 /// eliminating trivially redundant instructions and using instsimplify to
316 /// canonicalize things as it goes. It is intended to be fast and catch obvious
317 /// cases so that instcombine and other passes are more effective. It is
318 /// expected that a later pass of GVN will catch the interesting/hard cases.
319 class EarlyCSE {
320 public:
321   const TargetLibraryInfo &TLI;
322   const TargetTransformInfo &TTI;
323   DominatorTree &DT;
324   AssumptionCache &AC;
325   const SimplifyQuery SQ;
326   MemorySSA *MSSA;
327   std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
328 
329   using AllocatorTy =
330       RecyclingAllocator<BumpPtrAllocator,
331                          ScopedHashTableVal<SimpleValue, Value *>>;
332   using ScopedHTType =
333       ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
334                       AllocatorTy>;
335 
336   /// A scoped hash table of the current values of all of our simple
337   /// scalar expressions.
338   ///
339   /// As we walk down the domtree, we look to see if instructions are in this:
340   /// if so, we replace them with what we find, otherwise we insert them so
341   /// that dominated values can succeed in their lookup.
342   ScopedHTType AvailableValues;
343 
344   /// A scoped hash table of the current values of previously encounted memory
345   /// locations.
346   ///
347   /// This allows us to get efficient access to dominating loads or stores when
348   /// we have a fully redundant load.  In addition to the most recent load, we
349   /// keep track of a generation count of the read, which is compared against
350   /// the current generation count.  The current generation count is incremented
351   /// after every possibly writing memory operation, which ensures that we only
352   /// CSE loads with other loads that have no intervening store.  Ordering
353   /// events (such as fences or atomic instructions) increment the generation
354   /// count as well; essentially, we model these as writes to all possible
355   /// locations.  Note that atomic and/or volatile loads and stores can be
356   /// present the table; it is the responsibility of the consumer to inspect
357   /// the atomicity/volatility if needed.
358   struct LoadValue {
359     Instruction *DefInst = nullptr;
360     unsigned Generation = 0;
361     int MatchingId = -1;
362     bool IsAtomic = false;
363 
364     LoadValue() = default;
365     LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
366               bool IsAtomic)
367         : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
368           IsAtomic(IsAtomic) {}
369   };
370 
371   using LoadMapAllocator =
372       RecyclingAllocator<BumpPtrAllocator,
373                          ScopedHashTableVal<Value *, LoadValue>>;
374   using LoadHTType =
375       ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
376                       LoadMapAllocator>;
377 
378   LoadHTType AvailableLoads;
379 
380   // A scoped hash table mapping memory locations (represented as typed
381   // addresses) to generation numbers at which that memory location became
382   // (henceforth indefinitely) invariant.
383   using InvariantMapAllocator =
384       RecyclingAllocator<BumpPtrAllocator,
385                          ScopedHashTableVal<MemoryLocation, unsigned>>;
386   using InvariantHTType =
387       ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
388                       InvariantMapAllocator>;
389   InvariantHTType AvailableInvariants;
390 
391   /// A scoped hash table of the current values of read-only call
392   /// values.
393   ///
394   /// It uses the same generation count as loads.
395   using CallHTType =
396       ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
397   CallHTType AvailableCalls;
398 
399   /// This is the current generation of the memory value.
400   unsigned CurrentGeneration = 0;
401 
402   /// Set up the EarlyCSE runner for a particular function.
403   EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
404            const TargetTransformInfo &TTI, DominatorTree &DT,
405            AssumptionCache &AC, MemorySSA *MSSA)
406       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
407         MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
408 
409   bool run();
410 
411 private:
412   // Almost a POD, but needs to call the constructors for the scoped hash
413   // tables so that a new scope gets pushed on. These are RAII so that the
414   // scope gets popped when the NodeScope is destroyed.
415   class NodeScope {
416   public:
417     NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
418               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
419       : Scope(AvailableValues), LoadScope(AvailableLoads),
420         InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
421     NodeScope(const NodeScope &) = delete;
422     NodeScope &operator=(const NodeScope &) = delete;
423 
424   private:
425     ScopedHTType::ScopeTy Scope;
426     LoadHTType::ScopeTy LoadScope;
427     InvariantHTType::ScopeTy InvariantScope;
428     CallHTType::ScopeTy CallScope;
429   };
430 
431   // Contains all the needed information to create a stack for doing a depth
432   // first traversal of the tree. This includes scopes for values, loads, and
433   // calls as well as the generation. There is a child iterator so that the
434   // children do not need to be store separately.
435   class StackNode {
436   public:
437     StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
438               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
439               unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
440               DomTreeNode::iterator end)
441         : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
442           EndIter(end),
443           Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
444                  AvailableCalls)
445           {}
446     StackNode(const StackNode &) = delete;
447     StackNode &operator=(const StackNode &) = delete;
448 
449     // Accessors.
450     unsigned currentGeneration() { return CurrentGeneration; }
451     unsigned childGeneration() { return ChildGeneration; }
452     void childGeneration(unsigned generation) { ChildGeneration = generation; }
453     DomTreeNode *node() { return Node; }
454     DomTreeNode::iterator childIter() { return ChildIter; }
455 
456     DomTreeNode *nextChild() {
457       DomTreeNode *child = *ChildIter;
458       ++ChildIter;
459       return child;
460     }
461 
462     DomTreeNode::iterator end() { return EndIter; }
463     bool isProcessed() { return Processed; }
464     void process() { Processed = true; }
465 
466   private:
467     unsigned CurrentGeneration;
468     unsigned ChildGeneration;
469     DomTreeNode *Node;
470     DomTreeNode::iterator ChildIter;
471     DomTreeNode::iterator EndIter;
472     NodeScope Scopes;
473     bool Processed = false;
474   };
475 
476   /// Wrapper class to handle memory instructions, including loads,
477   /// stores and intrinsic loads and stores defined by the target.
478   class ParseMemoryInst {
479   public:
480     ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
481       : Inst(Inst) {
482       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
483         if (TTI.getTgtMemIntrinsic(II, Info))
484           IsTargetMemInst = true;
485     }
486 
487     bool isLoad() const {
488       if (IsTargetMemInst) return Info.ReadMem;
489       return isa<LoadInst>(Inst);
490     }
491 
492     bool isStore() const {
493       if (IsTargetMemInst) return Info.WriteMem;
494       return isa<StoreInst>(Inst);
495     }
496 
497     bool isAtomic() const {
498       if (IsTargetMemInst)
499         return Info.Ordering != AtomicOrdering::NotAtomic;
500       return Inst->isAtomic();
501     }
502 
503     bool isUnordered() const {
504       if (IsTargetMemInst)
505         return Info.isUnordered();
506 
507       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
508         return LI->isUnordered();
509       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
510         return SI->isUnordered();
511       }
512       // Conservative answer
513       return !Inst->isAtomic();
514     }
515 
516     bool isVolatile() const {
517       if (IsTargetMemInst)
518         return Info.IsVolatile;
519 
520       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
521         return LI->isVolatile();
522       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
523         return SI->isVolatile();
524       }
525       // Conservative answer
526       return true;
527     }
528 
529     bool isInvariantLoad() const {
530       if (auto *LI = dyn_cast<LoadInst>(Inst))
531         return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
532       return false;
533     }
534 
535     bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
536       return (getPointerOperand() == Inst.getPointerOperand() &&
537               getMatchingId() == Inst.getMatchingId());
538     }
539 
540     bool isValid() const { return getPointerOperand() != nullptr; }
541 
542     // For regular (non-intrinsic) loads/stores, this is set to -1. For
543     // intrinsic loads/stores, the id is retrieved from the corresponding
544     // field in the MemIntrinsicInfo structure.  That field contains
545     // non-negative values only.
546     int getMatchingId() const {
547       if (IsTargetMemInst) return Info.MatchingId;
548       return -1;
549     }
550 
551     Value *getPointerOperand() const {
552       if (IsTargetMemInst) return Info.PtrVal;
553       return getLoadStorePointerOperand(Inst);
554     }
555 
556     bool mayReadFromMemory() const {
557       if (IsTargetMemInst) return Info.ReadMem;
558       return Inst->mayReadFromMemory();
559     }
560 
561     bool mayWriteToMemory() const {
562       if (IsTargetMemInst) return Info.WriteMem;
563       return Inst->mayWriteToMemory();
564     }
565 
566   private:
567     bool IsTargetMemInst = false;
568     MemIntrinsicInfo Info;
569     Instruction *Inst;
570   };
571 
572   bool processNode(DomTreeNode *Node);
573 
574   Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
575     if (auto *LI = dyn_cast<LoadInst>(Inst))
576       return LI;
577     if (auto *SI = dyn_cast<StoreInst>(Inst))
578       return SI->getValueOperand();
579     assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
580     return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
581                                                  ExpectedType);
582   }
583 
584   /// Return true if the instruction is known to only operate on memory
585   /// provably invariant in the given "generation".
586   bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
587 
588   bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
589                            Instruction *EarlierInst, Instruction *LaterInst);
590 
591   void removeMSSA(Instruction *Inst) {
592     if (!MSSA)
593       return;
594     // Removing a store here can leave MemorySSA in an unoptimized state by
595     // creating MemoryPhis that have identical arguments and by creating
596     // MemoryUses whose defining access is not an actual clobber.  We handle the
597     // phi case eagerly here.  The non-optimized MemoryUse case is lazily
598     // updated by MemorySSA getClobberingMemoryAccess.
599     if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
600       // Optimize MemoryPhi nodes that may become redundant by having all the
601       // same input values once MA is removed.
602       SmallSetVector<MemoryPhi *, 4> PhisToCheck;
603       SmallVector<MemoryAccess *, 8> WorkQueue;
604       WorkQueue.push_back(MA);
605       // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
606       // we shouldn't be processing that many phis and this will avoid an
607       // allocation in almost all cases.
608       for (unsigned I = 0; I < WorkQueue.size(); ++I) {
609         MemoryAccess *WI = WorkQueue[I];
610 
611         for (auto *U : WI->users())
612           if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
613             PhisToCheck.insert(MP);
614 
615         MSSAUpdater->removeMemoryAccess(WI);
616 
617         for (MemoryPhi *MP : PhisToCheck) {
618           MemoryAccess *FirstIn = MP->getIncomingValue(0);
619           if (llvm::all_of(MP->incoming_values(),
620                            [=](Use &In) { return In == FirstIn; }))
621             WorkQueue.push_back(MP);
622         }
623         PhisToCheck.clear();
624       }
625     }
626   }
627 };
628 
629 } // end anonymous namespace
630 
631 /// Determine if the memory referenced by LaterInst is from the same heap
632 /// version as EarlierInst.
633 /// This is currently called in two scenarios:
634 ///
635 ///   load p
636 ///   ...
637 ///   load p
638 ///
639 /// and
640 ///
641 ///   x = load p
642 ///   ...
643 ///   store x, p
644 ///
645 /// in both cases we want to verify that there are no possible writes to the
646 /// memory referenced by p between the earlier and later instruction.
647 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
648                                    unsigned LaterGeneration,
649                                    Instruction *EarlierInst,
650                                    Instruction *LaterInst) {
651   // Check the simple memory generation tracking first.
652   if (EarlierGeneration == LaterGeneration)
653     return true;
654 
655   if (!MSSA)
656     return false;
657 
658   // If MemorySSA has determined that one of EarlierInst or LaterInst does not
659   // read/write memory, then we can safely return true here.
660   // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
661   // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
662   // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
663   // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
664   // with the default optimization pipeline.
665   auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
666   if (!EarlierMA)
667     return true;
668   auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
669   if (!LaterMA)
670     return true;
671 
672   // Since we know LaterDef dominates LaterInst and EarlierInst dominates
673   // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
674   // EarlierInst and LaterInst and neither can any other write that potentially
675   // clobbers LaterInst.
676   MemoryAccess *LaterDef =
677       MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
678   return MSSA->dominates(LaterDef, EarlierMA);
679 }
680 
681 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
682   // A location loaded from with an invariant_load is assumed to *never* change
683   // within the visible scope of the compilation.
684   if (auto *LI = dyn_cast<LoadInst>(I))
685     if (LI->getMetadata(LLVMContext::MD_invariant_load))
686       return true;
687 
688   auto MemLocOpt = MemoryLocation::getOrNone(I);
689   if (!MemLocOpt)
690     // "target" intrinsic forms of loads aren't currently known to
691     // MemoryLocation::get.  TODO
692     return false;
693   MemoryLocation MemLoc = *MemLocOpt;
694   if (!AvailableInvariants.count(MemLoc))
695     return false;
696 
697   // Is the generation at which this became invariant older than the
698   // current one?
699   return AvailableInvariants.lookup(MemLoc) <= GenAt;
700 }
701 
702 bool EarlyCSE::processNode(DomTreeNode *Node) {
703   bool Changed = false;
704   BasicBlock *BB = Node->getBlock();
705 
706   // If this block has a single predecessor, then the predecessor is the parent
707   // of the domtree node and all of the live out memory values are still current
708   // in this block.  If this block has multiple predecessors, then they could
709   // have invalidated the live-out memory values of our parent value.  For now,
710   // just be conservative and invalidate memory if this block has multiple
711   // predecessors.
712   if (!BB->getSinglePredecessor())
713     ++CurrentGeneration;
714 
715   // If this node has a single predecessor which ends in a conditional branch,
716   // we can infer the value of the branch condition given that we took this
717   // path.  We need the single predecessor to ensure there's not another path
718   // which reaches this block where the condition might hold a different
719   // value.  Since we're adding this to the scoped hash table (like any other
720   // def), it will have been popped if we encounter a future merge block.
721   if (BasicBlock *Pred = BB->getSinglePredecessor()) {
722     auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
723     if (BI && BI->isConditional()) {
724       auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
725       if (CondInst && SimpleValue::canHandle(CondInst)) {
726         assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
727         auto *TorF = (BI->getSuccessor(0) == BB)
728                          ? ConstantInt::getTrue(BB->getContext())
729                          : ConstantInt::getFalse(BB->getContext());
730         AvailableValues.insert(CondInst, TorF);
731         LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
732                           << CondInst->getName() << "' as " << *TorF << " in "
733                           << BB->getName() << "\n");
734         if (!DebugCounter::shouldExecute(CSECounter)) {
735           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
736         } else {
737           // Replace all dominated uses with the known value.
738           if (unsigned Count = replaceDominatedUsesWith(
739                   CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) {
740             Changed = true;
741             NumCSECVP += Count;
742           }
743         }
744       }
745     }
746   }
747 
748   /// LastStore - Keep track of the last non-volatile store that we saw... for
749   /// as long as there in no instruction that reads memory.  If we see a store
750   /// to the same location, we delete the dead store.  This zaps trivial dead
751   /// stores which can occur in bitfield code among other things.
752   Instruction *LastStore = nullptr;
753 
754   // See if any instructions in the block can be eliminated.  If so, do it.  If
755   // not, add them to AvailableValues.
756   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
757     Instruction *Inst = &*I++;
758 
759     // Dead instructions should just be removed.
760     if (isInstructionTriviallyDead(Inst, &TLI)) {
761       LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
762       if (!DebugCounter::shouldExecute(CSECounter)) {
763         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
764         continue;
765       }
766       salvageDebugInfo(*Inst);
767       removeMSSA(Inst);
768       Inst->eraseFromParent();
769       Changed = true;
770       ++NumSimplify;
771       continue;
772     }
773 
774     // Skip assume intrinsics, they don't really have side effects (although
775     // they're marked as such to ensure preservation of control dependencies),
776     // and this pass will not bother with its removal. However, we should mark
777     // its condition as true for all dominated blocks.
778     if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
779       auto *CondI =
780           dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
781       if (CondI && SimpleValue::canHandle(CondI)) {
782         LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
783                           << '\n');
784         AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
785       } else
786         LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
787       continue;
788     }
789 
790     // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
791     if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
792       LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
793       continue;
794     }
795 
796     // We can skip all invariant.start intrinsics since they only read memory,
797     // and we can forward values across it. For invariant starts without
798     // invariant ends, we can use the fact that the invariantness never ends to
799     // start a scope in the current generaton which is true for all future
800     // generations.  Also, we dont need to consume the last store since the
801     // semantics of invariant.start allow us to perform   DSE of the last
802     // store, if there was a store following invariant.start. Consider:
803     //
804     // store 30, i8* p
805     // invariant.start(p)
806     // store 40, i8* p
807     // We can DSE the store to 30, since the store 40 to invariant location p
808     // causes undefined behaviour.
809     if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
810       // If there are any uses, the scope might end.
811       if (!Inst->use_empty())
812         continue;
813       auto *CI = cast<CallInst>(Inst);
814       MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
815       // Don't start a scope if we already have a better one pushed
816       if (!AvailableInvariants.count(MemLoc))
817         AvailableInvariants.insert(MemLoc, CurrentGeneration);
818       continue;
819     }
820 
821     if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) {
822       if (auto *CondI =
823               dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
824         if (SimpleValue::canHandle(CondI)) {
825           // Do we already know the actual value of this condition?
826           if (auto *KnownCond = AvailableValues.lookup(CondI)) {
827             // Is the condition known to be true?
828             if (isa<ConstantInt>(KnownCond) &&
829                 cast<ConstantInt>(KnownCond)->isOne()) {
830               LLVM_DEBUG(dbgs()
831                          << "EarlyCSE removing guard: " << *Inst << '\n');
832               removeMSSA(Inst);
833               Inst->eraseFromParent();
834               Changed = true;
835               continue;
836             } else
837               // Use the known value if it wasn't true.
838               cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
839           }
840           // The condition we're on guarding here is true for all dominated
841           // locations.
842           AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
843         }
844       }
845 
846       // Guard intrinsics read all memory, but don't write any memory.
847       // Accordingly, don't update the generation but consume the last store (to
848       // avoid an incorrect DSE).
849       LastStore = nullptr;
850       continue;
851     }
852 
853     // If the instruction can be simplified (e.g. X+0 = X) then replace it with
854     // its simpler value.
855     if (Value *V = SimplifyInstruction(Inst, SQ)) {
856       LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V
857                         << '\n');
858       if (!DebugCounter::shouldExecute(CSECounter)) {
859         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
860       } else {
861         bool Killed = false;
862         if (!Inst->use_empty()) {
863           Inst->replaceAllUsesWith(V);
864           Changed = true;
865         }
866         if (isInstructionTriviallyDead(Inst, &TLI)) {
867           removeMSSA(Inst);
868           Inst->eraseFromParent();
869           Changed = true;
870           Killed = true;
871         }
872         if (Changed)
873           ++NumSimplify;
874         if (Killed)
875           continue;
876       }
877     }
878 
879     // If this is a simple instruction that we can value number, process it.
880     if (SimpleValue::canHandle(Inst)) {
881       // See if the instruction has an available value.  If so, use it.
882       if (Value *V = AvailableValues.lookup(Inst)) {
883         LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V
884                           << '\n');
885         if (!DebugCounter::shouldExecute(CSECounter)) {
886           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
887           continue;
888         }
889         if (auto *I = dyn_cast<Instruction>(V))
890           I->andIRFlags(Inst);
891         Inst->replaceAllUsesWith(V);
892         removeMSSA(Inst);
893         Inst->eraseFromParent();
894         Changed = true;
895         ++NumCSE;
896         continue;
897       }
898 
899       // Otherwise, just remember that this value is available.
900       AvailableValues.insert(Inst, Inst);
901       continue;
902     }
903 
904     ParseMemoryInst MemInst(Inst, TTI);
905     // If this is a non-volatile load, process it.
906     if (MemInst.isValid() && MemInst.isLoad()) {
907       // (conservatively) we can't peak past the ordering implied by this
908       // operation, but we can add this load to our set of available values
909       if (MemInst.isVolatile() || !MemInst.isUnordered()) {
910         LastStore = nullptr;
911         ++CurrentGeneration;
912       }
913 
914       if (MemInst.isInvariantLoad()) {
915         // If we pass an invariant load, we know that memory location is
916         // indefinitely constant from the moment of first dereferenceability.
917         // We conservatively treat the invariant_load as that moment.  If we
918         // pass a invariant load after already establishing a scope, don't
919         // restart it since we want to preserve the earliest point seen.
920         auto MemLoc = MemoryLocation::get(Inst);
921         if (!AvailableInvariants.count(MemLoc))
922           AvailableInvariants.insert(MemLoc, CurrentGeneration);
923       }
924 
925       // If we have an available version of this load, and if it is the right
926       // generation or the load is known to be from an invariant location,
927       // replace this instruction.
928       //
929       // If either the dominating load or the current load are invariant, then
930       // we can assume the current load loads the same value as the dominating
931       // load.
932       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
933       if (InVal.DefInst != nullptr &&
934           InVal.MatchingId == MemInst.getMatchingId() &&
935           // We don't yet handle removing loads with ordering of any kind.
936           !MemInst.isVolatile() && MemInst.isUnordered() &&
937           // We can't replace an atomic load with one which isn't also atomic.
938           InVal.IsAtomic >= MemInst.isAtomic() &&
939           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
940            isSameMemGeneration(InVal.Generation, CurrentGeneration,
941                                InVal.DefInst, Inst))) {
942         Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
943         if (Op != nullptr) {
944           LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
945                             << "  to: " << *InVal.DefInst << '\n');
946           if (!DebugCounter::shouldExecute(CSECounter)) {
947             LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
948             continue;
949           }
950           if (!Inst->use_empty())
951             Inst->replaceAllUsesWith(Op);
952           removeMSSA(Inst);
953           Inst->eraseFromParent();
954           Changed = true;
955           ++NumCSELoad;
956           continue;
957         }
958       }
959 
960       // Otherwise, remember that we have this instruction.
961       AvailableLoads.insert(
962           MemInst.getPointerOperand(),
963           LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
964                     MemInst.isAtomic()));
965       LastStore = nullptr;
966       continue;
967     }
968 
969     // If this instruction may read from memory or throw (and potentially read
970     // from memory in the exception handler), forget LastStore.  Load/store
971     // intrinsics will indicate both a read and a write to memory.  The target
972     // may override this (e.g. so that a store intrinsic does not read from
973     // memory, and thus will be treated the same as a regular store for
974     // commoning purposes).
975     if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
976         !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
977       LastStore = nullptr;
978 
979     // If this is a read-only call, process it.
980     if (CallValue::canHandle(Inst)) {
981       // If we have an available version of this call, and if it is the right
982       // generation, replace this instruction.
983       std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
984       if (InVal.first != nullptr &&
985           isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
986                               Inst)) {
987         LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
988                           << "  to: " << *InVal.first << '\n');
989         if (!DebugCounter::shouldExecute(CSECounter)) {
990           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
991           continue;
992         }
993         if (!Inst->use_empty())
994           Inst->replaceAllUsesWith(InVal.first);
995         removeMSSA(Inst);
996         Inst->eraseFromParent();
997         Changed = true;
998         ++NumCSECall;
999         continue;
1000       }
1001 
1002       // Otherwise, remember that we have this instruction.
1003       AvailableCalls.insert(
1004           Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1005       continue;
1006     }
1007 
1008     // A release fence requires that all stores complete before it, but does
1009     // not prevent the reordering of following loads 'before' the fence.  As a
1010     // result, we don't need to consider it as writing to memory and don't need
1011     // to advance the generation.  We do need to prevent DSE across the fence,
1012     // but that's handled above.
1013     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1014       if (FI->getOrdering() == AtomicOrdering::Release) {
1015         assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1016         continue;
1017       }
1018 
1019     // write back DSE - If we write back the same value we just loaded from
1020     // the same location and haven't passed any intervening writes or ordering
1021     // operations, we can remove the write.  The primary benefit is in allowing
1022     // the available load table to remain valid and value forward past where
1023     // the store originally was.
1024     if (MemInst.isValid() && MemInst.isStore()) {
1025       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1026       if (InVal.DefInst &&
1027           InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1028           InVal.MatchingId == MemInst.getMatchingId() &&
1029           // We don't yet handle removing stores with ordering of any kind.
1030           !MemInst.isVolatile() && MemInst.isUnordered() &&
1031           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1032            isSameMemGeneration(InVal.Generation, CurrentGeneration,
1033                                InVal.DefInst, Inst))) {
1034         // It is okay to have a LastStore to a different pointer here if MemorySSA
1035         // tells us that the load and store are from the same memory generation.
1036         // In that case, LastStore should keep its present value since we're
1037         // removing the current store.
1038         assert((!LastStore ||
1039                 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1040                     MemInst.getPointerOperand() ||
1041                 MSSA) &&
1042                "can't have an intervening store if not using MemorySSA!");
1043         LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1044         if (!DebugCounter::shouldExecute(CSECounter)) {
1045           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1046           continue;
1047         }
1048         removeMSSA(Inst);
1049         Inst->eraseFromParent();
1050         Changed = true;
1051         ++NumDSE;
1052         // We can avoid incrementing the generation count since we were able
1053         // to eliminate this store.
1054         continue;
1055       }
1056     }
1057 
1058     // Okay, this isn't something we can CSE at all.  Check to see if it is
1059     // something that could modify memory.  If so, our available memory values
1060     // cannot be used so bump the generation count.
1061     if (Inst->mayWriteToMemory()) {
1062       ++CurrentGeneration;
1063 
1064       if (MemInst.isValid() && MemInst.isStore()) {
1065         // We do a trivial form of DSE if there are two stores to the same
1066         // location with no intervening loads.  Delete the earlier store.
1067         // At the moment, we don't remove ordered stores, but do remove
1068         // unordered atomic stores.  There's no special requirement (for
1069         // unordered atomics) about removing atomic stores only in favor of
1070         // other atomic stores since we we're going to execute the non-atomic
1071         // one anyway and the atomic one might never have become visible.
1072         if (LastStore) {
1073           ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1074           assert(LastStoreMemInst.isUnordered() &&
1075                  !LastStoreMemInst.isVolatile() &&
1076                  "Violated invariant");
1077           if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1078             LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1079                               << "  due to: " << *Inst << '\n');
1080             if (!DebugCounter::shouldExecute(CSECounter)) {
1081               LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1082             } else {
1083               removeMSSA(LastStore);
1084               LastStore->eraseFromParent();
1085               Changed = true;
1086               ++NumDSE;
1087               LastStore = nullptr;
1088             }
1089           }
1090           // fallthrough - we can exploit information about this store
1091         }
1092 
1093         // Okay, we just invalidated anything we knew about loaded values.  Try
1094         // to salvage *something* by remembering that the stored value is a live
1095         // version of the pointer.  It is safe to forward from volatile stores
1096         // to non-volatile loads, so we don't have to check for volatility of
1097         // the store.
1098         AvailableLoads.insert(
1099             MemInst.getPointerOperand(),
1100             LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1101                       MemInst.isAtomic()));
1102 
1103         // Remember that this was the last unordered store we saw for DSE. We
1104         // don't yet handle DSE on ordered or volatile stores since we don't
1105         // have a good way to model the ordering requirement for following
1106         // passes  once the store is removed.  We could insert a fence, but
1107         // since fences are slightly stronger than stores in their ordering,
1108         // it's not clear this is a profitable transform. Another option would
1109         // be to merge the ordering with that of the post dominating store.
1110         if (MemInst.isUnordered() && !MemInst.isVolatile())
1111           LastStore = Inst;
1112         else
1113           LastStore = nullptr;
1114       }
1115     }
1116   }
1117 
1118   return Changed;
1119 }
1120 
1121 bool EarlyCSE::run() {
1122   // Note, deque is being used here because there is significant performance
1123   // gains over vector when the container becomes very large due to the
1124   // specific access patterns. For more information see the mailing list
1125   // discussion on this:
1126   // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1127   std::deque<StackNode *> nodesToProcess;
1128 
1129   bool Changed = false;
1130 
1131   // Process the root node.
1132   nodesToProcess.push_back(new StackNode(
1133       AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1134       CurrentGeneration, DT.getRootNode(),
1135       DT.getRootNode()->begin(), DT.getRootNode()->end()));
1136 
1137   // Save the current generation.
1138   unsigned LiveOutGeneration = CurrentGeneration;
1139 
1140   // Process the stack.
1141   while (!nodesToProcess.empty()) {
1142     // Grab the first item off the stack. Set the current generation, remove
1143     // the node from the stack, and process it.
1144     StackNode *NodeToProcess = nodesToProcess.back();
1145 
1146     // Initialize class members.
1147     CurrentGeneration = NodeToProcess->currentGeneration();
1148 
1149     // Check if the node needs to be processed.
1150     if (!NodeToProcess->isProcessed()) {
1151       // Process the node.
1152       Changed |= processNode(NodeToProcess->node());
1153       NodeToProcess->childGeneration(CurrentGeneration);
1154       NodeToProcess->process();
1155     } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1156       // Push the next child onto the stack.
1157       DomTreeNode *child = NodeToProcess->nextChild();
1158       nodesToProcess.push_back(
1159           new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1160                         AvailableCalls, NodeToProcess->childGeneration(),
1161                         child, child->begin(), child->end()));
1162     } else {
1163       // It has been processed, and there are no more children to process,
1164       // so delete it and pop it off the stack.
1165       delete NodeToProcess;
1166       nodesToProcess.pop_back();
1167     }
1168   } // while (!nodes...)
1169 
1170   // Reset the current generation.
1171   CurrentGeneration = LiveOutGeneration;
1172 
1173   return Changed;
1174 }
1175 
1176 PreservedAnalyses EarlyCSEPass::run(Function &F,
1177                                     FunctionAnalysisManager &AM) {
1178   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1179   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1180   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1181   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1182   auto *MSSA =
1183       UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1184 
1185   EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1186 
1187   if (!CSE.run())
1188     return PreservedAnalyses::all();
1189 
1190   PreservedAnalyses PA;
1191   PA.preserveSet<CFGAnalyses>();
1192   PA.preserve<GlobalsAA>();
1193   if (UseMemorySSA)
1194     PA.preserve<MemorySSAAnalysis>();
1195   return PA;
1196 }
1197 
1198 namespace {
1199 
1200 /// A simple and fast domtree-based CSE pass.
1201 ///
1202 /// This pass does a simple depth-first walk over the dominator tree,
1203 /// eliminating trivially redundant instructions and using instsimplify to
1204 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1205 /// cases so that instcombine and other passes are more effective. It is
1206 /// expected that a later pass of GVN will catch the interesting/hard cases.
1207 template<bool UseMemorySSA>
1208 class EarlyCSELegacyCommonPass : public FunctionPass {
1209 public:
1210   static char ID;
1211 
1212   EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1213     if (UseMemorySSA)
1214       initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1215     else
1216       initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1217   }
1218 
1219   bool runOnFunction(Function &F) override {
1220     if (skipFunction(F))
1221       return false;
1222 
1223     auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1224     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1225     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1226     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1227     auto *MSSA =
1228         UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1229 
1230     EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1231 
1232     return CSE.run();
1233   }
1234 
1235   void getAnalysisUsage(AnalysisUsage &AU) const override {
1236     AU.addRequired<AssumptionCacheTracker>();
1237     AU.addRequired<DominatorTreeWrapperPass>();
1238     AU.addRequired<TargetLibraryInfoWrapperPass>();
1239     AU.addRequired<TargetTransformInfoWrapperPass>();
1240     if (UseMemorySSA) {
1241       AU.addRequired<MemorySSAWrapperPass>();
1242       AU.addPreserved<MemorySSAWrapperPass>();
1243     }
1244     AU.addPreserved<GlobalsAAWrapperPass>();
1245     AU.setPreservesCFG();
1246   }
1247 };
1248 
1249 } // end anonymous namespace
1250 
1251 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1252 
1253 template<>
1254 char EarlyCSELegacyPass::ID = 0;
1255 
1256 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1257                       false)
1258 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1259 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1260 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1261 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1262 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1263 
1264 using EarlyCSEMemSSALegacyPass =
1265     EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1266 
1267 template<>
1268 char EarlyCSEMemSSALegacyPass::ID = 0;
1269 
1270 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1271   if (UseMemorySSA)
1272     return new EarlyCSEMemSSALegacyPass();
1273   else
1274     return new EarlyCSELegacyPass();
1275 }
1276 
1277 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1278                       "Early CSE w/ MemorySSA", false, false)
1279 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1280 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1281 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1282 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1283 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1284 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1285                     "Early CSE w/ MemorySSA", false, false)
1286