xref: /llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp (revision f14e62c9a53b20ed6ed3486588c5ac730cf50442)
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs a simple dominator tree walk that eliminates trivially
11 // redundant instructions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Scalar/EarlyCSE.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/Hashing.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopedHashTable.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/Utils/Local.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/PassManager.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/DebugCounter.h"
54 #include "llvm/Support/RecyclingAllocator.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Scalar.h"
57 #include <cassert>
58 #include <deque>
59 #include <memory>
60 #include <utility>
61 
62 using namespace llvm;
63 using namespace llvm::PatternMatch;
64 
65 #define DEBUG_TYPE "early-cse"
66 
67 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
68 STATISTIC(NumCSE,      "Number of instructions CSE'd");
69 STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
70 STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
71 STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
72 STATISTIC(NumDSE,      "Number of trivial dead stores removed");
73 
74 DEBUG_COUNTER(CSECounter, "early-cse",
75               "Controls which instructions are removed");
76 
77 //===----------------------------------------------------------------------===//
78 // SimpleValue
79 //===----------------------------------------------------------------------===//
80 
81 namespace {
82 
83 /// Struct representing the available values in the scoped hash table.
84 struct SimpleValue {
85   Instruction *Inst;
86 
87   SimpleValue(Instruction *I) : Inst(I) {
88     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
89   }
90 
91   bool isSentinel() const {
92     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
93            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
94   }
95 
96   static bool canHandle(Instruction *Inst) {
97     // This can only handle non-void readnone functions.
98     if (CallInst *CI = dyn_cast<CallInst>(Inst))
99       return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
100     return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
101            isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
102            isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
103            isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
104            isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
105   }
106 };
107 
108 } // end anonymous namespace
109 
110 namespace llvm {
111 
112 template <> struct DenseMapInfo<SimpleValue> {
113   static inline SimpleValue getEmptyKey() {
114     return DenseMapInfo<Instruction *>::getEmptyKey();
115   }
116 
117   static inline SimpleValue getTombstoneKey() {
118     return DenseMapInfo<Instruction *>::getTombstoneKey();
119   }
120 
121   static unsigned getHashValue(SimpleValue Val);
122   static bool isEqual(SimpleValue LHS, SimpleValue RHS);
123 };
124 
125 } // end namespace llvm
126 
127 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
128   Instruction *Inst = Val.Inst;
129   // Hash in all of the operands as pointers.
130   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
131     Value *LHS = BinOp->getOperand(0);
132     Value *RHS = BinOp->getOperand(1);
133     if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
134       std::swap(LHS, RHS);
135 
136     return hash_combine(BinOp->getOpcode(), LHS, RHS);
137   }
138 
139   if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
140     Value *LHS = CI->getOperand(0);
141     Value *RHS = CI->getOperand(1);
142     CmpInst::Predicate Pred = CI->getPredicate();
143     if (Inst->getOperand(0) > Inst->getOperand(1)) {
144       std::swap(LHS, RHS);
145       Pred = CI->getSwappedPredicate();
146     }
147     return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
148   }
149 
150   // Hash min/max/abs (cmp + select) to allow for commuted operands.
151   // Min/max may also have non-canonical compare predicate (eg, the compare for
152   // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
153   // compare.
154   Value *A, *B;
155   SelectPatternFlavor SPF = matchSelectPattern(Inst, A, B).Flavor;
156   // TODO: We should also detect FP min/max.
157   if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
158       SPF == SPF_UMIN || SPF == SPF_UMAX) {
159     if (A > B)
160       std::swap(A, B);
161     return hash_combine(Inst->getOpcode(), SPF, A, B);
162   }
163   if (SPF == SPF_ABS || SPF == SPF_NABS) {
164     // ABS/NABS always puts the input in A and its negation in B.
165     return hash_combine(Inst->getOpcode(), SPF, A, B);
166   }
167 
168   if (CastInst *CI = dyn_cast<CastInst>(Inst))
169     return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
170 
171   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
172     return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
173                         hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
174 
175   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
176     return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
177                         IVI->getOperand(1),
178                         hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
179 
180   assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
181           isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
182           isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
183           isa<ShuffleVectorInst>(Inst)) &&
184          "Invalid/unknown instruction");
185 
186   // Mix in the opcode.
187   return hash_combine(
188       Inst->getOpcode(),
189       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
190 }
191 
192 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
193   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
194 
195   if (LHS.isSentinel() || RHS.isSentinel())
196     return LHSI == RHSI;
197 
198   if (LHSI->getOpcode() != RHSI->getOpcode())
199     return false;
200   if (LHSI->isIdenticalToWhenDefined(RHSI))
201     return true;
202 
203   // If we're not strictly identical, we still might be a commutable instruction
204   if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
205     if (!LHSBinOp->isCommutative())
206       return false;
207 
208     assert(isa<BinaryOperator>(RHSI) &&
209            "same opcode, but different instruction type?");
210     BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
211 
212     // Commuted equality
213     return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
214            LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
215   }
216   if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
217     assert(isa<CmpInst>(RHSI) &&
218            "same opcode, but different instruction type?");
219     CmpInst *RHSCmp = cast<CmpInst>(RHSI);
220     // Commuted equality
221     return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
222            LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
223            LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
224   }
225 
226   // Min/max/abs can occur with commuted operands, non-canonical predicates,
227   // and/or non-canonical operands.
228   Value *LHSA, *LHSB;
229   SelectPatternFlavor LSPF = matchSelectPattern(LHSI, LHSA, LHSB).Flavor;
230   // TODO: We should also detect FP min/max.
231   if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
232       LSPF == SPF_UMIN || LSPF == SPF_UMAX ||
233       LSPF == SPF_ABS || LSPF == SPF_NABS) {
234     Value *RHSA, *RHSB;
235     SelectPatternFlavor RSPF = matchSelectPattern(RHSI, RHSA, RHSB).Flavor;
236     if (LSPF == RSPF) {
237       // Abs results are placed in a defined order by matchSelectPattern.
238       if (LSPF == SPF_ABS || LSPF == SPF_NABS)
239         return LHSA == RHSA && LHSB == RHSB;
240       return ((LHSA == RHSA && LHSB == RHSB) ||
241               (LHSA == RHSB && LHSB == RHSA));
242     }
243   }
244 
245   return false;
246 }
247 
248 //===----------------------------------------------------------------------===//
249 // CallValue
250 //===----------------------------------------------------------------------===//
251 
252 namespace {
253 
254 /// Struct representing the available call values in the scoped hash
255 /// table.
256 struct CallValue {
257   Instruction *Inst;
258 
259   CallValue(Instruction *I) : Inst(I) {
260     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
261   }
262 
263   bool isSentinel() const {
264     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
265            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
266   }
267 
268   static bool canHandle(Instruction *Inst) {
269     // Don't value number anything that returns void.
270     if (Inst->getType()->isVoidTy())
271       return false;
272 
273     CallInst *CI = dyn_cast<CallInst>(Inst);
274     if (!CI || !CI->onlyReadsMemory())
275       return false;
276     return true;
277   }
278 };
279 
280 } // end anonymous namespace
281 
282 namespace llvm {
283 
284 template <> struct DenseMapInfo<CallValue> {
285   static inline CallValue getEmptyKey() {
286     return DenseMapInfo<Instruction *>::getEmptyKey();
287   }
288 
289   static inline CallValue getTombstoneKey() {
290     return DenseMapInfo<Instruction *>::getTombstoneKey();
291   }
292 
293   static unsigned getHashValue(CallValue Val);
294   static bool isEqual(CallValue LHS, CallValue RHS);
295 };
296 
297 } // end namespace llvm
298 
299 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
300   Instruction *Inst = Val.Inst;
301   // Hash all of the operands as pointers and mix in the opcode.
302   return hash_combine(
303       Inst->getOpcode(),
304       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
305 }
306 
307 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
308   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
309   if (LHS.isSentinel() || RHS.isSentinel())
310     return LHSI == RHSI;
311   return LHSI->isIdenticalTo(RHSI);
312 }
313 
314 //===----------------------------------------------------------------------===//
315 // EarlyCSE implementation
316 //===----------------------------------------------------------------------===//
317 
318 namespace {
319 
320 /// A simple and fast domtree-based CSE pass.
321 ///
322 /// This pass does a simple depth-first walk over the dominator tree,
323 /// eliminating trivially redundant instructions and using instsimplify to
324 /// canonicalize things as it goes. It is intended to be fast and catch obvious
325 /// cases so that instcombine and other passes are more effective. It is
326 /// expected that a later pass of GVN will catch the interesting/hard cases.
327 class EarlyCSE {
328 public:
329   const TargetLibraryInfo &TLI;
330   const TargetTransformInfo &TTI;
331   DominatorTree &DT;
332   AssumptionCache &AC;
333   const SimplifyQuery SQ;
334   MemorySSA *MSSA;
335   std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
336 
337   using AllocatorTy =
338       RecyclingAllocator<BumpPtrAllocator,
339                          ScopedHashTableVal<SimpleValue, Value *>>;
340   using ScopedHTType =
341       ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
342                       AllocatorTy>;
343 
344   /// A scoped hash table of the current values of all of our simple
345   /// scalar expressions.
346   ///
347   /// As we walk down the domtree, we look to see if instructions are in this:
348   /// if so, we replace them with what we find, otherwise we insert them so
349   /// that dominated values can succeed in their lookup.
350   ScopedHTType AvailableValues;
351 
352   /// A scoped hash table of the current values of previously encounted memory
353   /// locations.
354   ///
355   /// This allows us to get efficient access to dominating loads or stores when
356   /// we have a fully redundant load.  In addition to the most recent load, we
357   /// keep track of a generation count of the read, which is compared against
358   /// the current generation count.  The current generation count is incremented
359   /// after every possibly writing memory operation, which ensures that we only
360   /// CSE loads with other loads that have no intervening store.  Ordering
361   /// events (such as fences or atomic instructions) increment the generation
362   /// count as well; essentially, we model these as writes to all possible
363   /// locations.  Note that atomic and/or volatile loads and stores can be
364   /// present the table; it is the responsibility of the consumer to inspect
365   /// the atomicity/volatility if needed.
366   struct LoadValue {
367     Instruction *DefInst = nullptr;
368     unsigned Generation = 0;
369     int MatchingId = -1;
370     bool IsAtomic = false;
371 
372     LoadValue() = default;
373     LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
374               bool IsAtomic)
375         : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
376           IsAtomic(IsAtomic) {}
377   };
378 
379   using LoadMapAllocator =
380       RecyclingAllocator<BumpPtrAllocator,
381                          ScopedHashTableVal<Value *, LoadValue>>;
382   using LoadHTType =
383       ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
384                       LoadMapAllocator>;
385 
386   LoadHTType AvailableLoads;
387 
388   // A scoped hash table mapping memory locations (represented as typed
389   // addresses) to generation numbers at which that memory location became
390   // (henceforth indefinitely) invariant.
391   using InvariantMapAllocator =
392       RecyclingAllocator<BumpPtrAllocator,
393                          ScopedHashTableVal<MemoryLocation, unsigned>>;
394   using InvariantHTType =
395       ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
396                       InvariantMapAllocator>;
397   InvariantHTType AvailableInvariants;
398 
399   /// A scoped hash table of the current values of read-only call
400   /// values.
401   ///
402   /// It uses the same generation count as loads.
403   using CallHTType =
404       ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
405   CallHTType AvailableCalls;
406 
407   /// This is the current generation of the memory value.
408   unsigned CurrentGeneration = 0;
409 
410   /// Set up the EarlyCSE runner for a particular function.
411   EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
412            const TargetTransformInfo &TTI, DominatorTree &DT,
413            AssumptionCache &AC, MemorySSA *MSSA)
414       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
415         MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
416 
417   bool run();
418 
419 private:
420   // Almost a POD, but needs to call the constructors for the scoped hash
421   // tables so that a new scope gets pushed on. These are RAII so that the
422   // scope gets popped when the NodeScope is destroyed.
423   class NodeScope {
424   public:
425     NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
426               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
427       : Scope(AvailableValues), LoadScope(AvailableLoads),
428         InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
429     NodeScope(const NodeScope &) = delete;
430     NodeScope &operator=(const NodeScope &) = delete;
431 
432   private:
433     ScopedHTType::ScopeTy Scope;
434     LoadHTType::ScopeTy LoadScope;
435     InvariantHTType::ScopeTy InvariantScope;
436     CallHTType::ScopeTy CallScope;
437   };
438 
439   // Contains all the needed information to create a stack for doing a depth
440   // first traversal of the tree. This includes scopes for values, loads, and
441   // calls as well as the generation. There is a child iterator so that the
442   // children do not need to be store separately.
443   class StackNode {
444   public:
445     StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
446               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
447               unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
448               DomTreeNode::iterator end)
449         : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
450           EndIter(end),
451           Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
452                  AvailableCalls)
453           {}
454     StackNode(const StackNode &) = delete;
455     StackNode &operator=(const StackNode &) = delete;
456 
457     // Accessors.
458     unsigned currentGeneration() { return CurrentGeneration; }
459     unsigned childGeneration() { return ChildGeneration; }
460     void childGeneration(unsigned generation) { ChildGeneration = generation; }
461     DomTreeNode *node() { return Node; }
462     DomTreeNode::iterator childIter() { return ChildIter; }
463 
464     DomTreeNode *nextChild() {
465       DomTreeNode *child = *ChildIter;
466       ++ChildIter;
467       return child;
468     }
469 
470     DomTreeNode::iterator end() { return EndIter; }
471     bool isProcessed() { return Processed; }
472     void process() { Processed = true; }
473 
474   private:
475     unsigned CurrentGeneration;
476     unsigned ChildGeneration;
477     DomTreeNode *Node;
478     DomTreeNode::iterator ChildIter;
479     DomTreeNode::iterator EndIter;
480     NodeScope Scopes;
481     bool Processed = false;
482   };
483 
484   /// Wrapper class to handle memory instructions, including loads,
485   /// stores and intrinsic loads and stores defined by the target.
486   class ParseMemoryInst {
487   public:
488     ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
489       : Inst(Inst) {
490       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
491         if (TTI.getTgtMemIntrinsic(II, Info))
492           IsTargetMemInst = true;
493     }
494 
495     bool isLoad() const {
496       if (IsTargetMemInst) return Info.ReadMem;
497       return isa<LoadInst>(Inst);
498     }
499 
500     bool isStore() const {
501       if (IsTargetMemInst) return Info.WriteMem;
502       return isa<StoreInst>(Inst);
503     }
504 
505     bool isAtomic() const {
506       if (IsTargetMemInst)
507         return Info.Ordering != AtomicOrdering::NotAtomic;
508       return Inst->isAtomic();
509     }
510 
511     bool isUnordered() const {
512       if (IsTargetMemInst)
513         return Info.isUnordered();
514 
515       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
516         return LI->isUnordered();
517       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
518         return SI->isUnordered();
519       }
520       // Conservative answer
521       return !Inst->isAtomic();
522     }
523 
524     bool isVolatile() const {
525       if (IsTargetMemInst)
526         return Info.IsVolatile;
527 
528       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
529         return LI->isVolatile();
530       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
531         return SI->isVolatile();
532       }
533       // Conservative answer
534       return true;
535     }
536 
537     bool isInvariantLoad() const {
538       if (auto *LI = dyn_cast<LoadInst>(Inst))
539         return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
540       return false;
541     }
542 
543     bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
544       return (getPointerOperand() == Inst.getPointerOperand() &&
545               getMatchingId() == Inst.getMatchingId());
546     }
547 
548     bool isValid() const { return getPointerOperand() != nullptr; }
549 
550     // For regular (non-intrinsic) loads/stores, this is set to -1. For
551     // intrinsic loads/stores, the id is retrieved from the corresponding
552     // field in the MemIntrinsicInfo structure.  That field contains
553     // non-negative values only.
554     int getMatchingId() const {
555       if (IsTargetMemInst) return Info.MatchingId;
556       return -1;
557     }
558 
559     Value *getPointerOperand() const {
560       if (IsTargetMemInst) return Info.PtrVal;
561       return getLoadStorePointerOperand(Inst);
562     }
563 
564     bool mayReadFromMemory() const {
565       if (IsTargetMemInst) return Info.ReadMem;
566       return Inst->mayReadFromMemory();
567     }
568 
569     bool mayWriteToMemory() const {
570       if (IsTargetMemInst) return Info.WriteMem;
571       return Inst->mayWriteToMemory();
572     }
573 
574   private:
575     bool IsTargetMemInst = false;
576     MemIntrinsicInfo Info;
577     Instruction *Inst;
578   };
579 
580   bool processNode(DomTreeNode *Node);
581 
582   Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
583     if (auto *LI = dyn_cast<LoadInst>(Inst))
584       return LI;
585     if (auto *SI = dyn_cast<StoreInst>(Inst))
586       return SI->getValueOperand();
587     assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
588     return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
589                                                  ExpectedType);
590   }
591 
592   /// Return true if the instruction is known to only operate on memory
593   /// provably invariant in the given "generation".
594   bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
595 
596   bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
597                            Instruction *EarlierInst, Instruction *LaterInst);
598 
599   void removeMSSA(Instruction *Inst) {
600     if (!MSSA)
601       return;
602     // Removing a store here can leave MemorySSA in an unoptimized state by
603     // creating MemoryPhis that have identical arguments and by creating
604     // MemoryUses whose defining access is not an actual clobber.  We handle the
605     // phi case eagerly here.  The non-optimized MemoryUse case is lazily
606     // updated by MemorySSA getClobberingMemoryAccess.
607     if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
608       // Optimize MemoryPhi nodes that may become redundant by having all the
609       // same input values once MA is removed.
610       SmallSetVector<MemoryPhi *, 4> PhisToCheck;
611       SmallVector<MemoryAccess *, 8> WorkQueue;
612       WorkQueue.push_back(MA);
613       // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
614       // we shouldn't be processing that many phis and this will avoid an
615       // allocation in almost all cases.
616       for (unsigned I = 0; I < WorkQueue.size(); ++I) {
617         MemoryAccess *WI = WorkQueue[I];
618 
619         for (auto *U : WI->users())
620           if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
621             PhisToCheck.insert(MP);
622 
623         MSSAUpdater->removeMemoryAccess(WI);
624 
625         for (MemoryPhi *MP : PhisToCheck) {
626           MemoryAccess *FirstIn = MP->getIncomingValue(0);
627           if (llvm::all_of(MP->incoming_values(),
628                            [=](Use &In) { return In == FirstIn; }))
629             WorkQueue.push_back(MP);
630         }
631         PhisToCheck.clear();
632       }
633     }
634   }
635 };
636 
637 } // end anonymous namespace
638 
639 /// Determine if the memory referenced by LaterInst is from the same heap
640 /// version as EarlierInst.
641 /// This is currently called in two scenarios:
642 ///
643 ///   load p
644 ///   ...
645 ///   load p
646 ///
647 /// and
648 ///
649 ///   x = load p
650 ///   ...
651 ///   store x, p
652 ///
653 /// in both cases we want to verify that there are no possible writes to the
654 /// memory referenced by p between the earlier and later instruction.
655 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
656                                    unsigned LaterGeneration,
657                                    Instruction *EarlierInst,
658                                    Instruction *LaterInst) {
659   // Check the simple memory generation tracking first.
660   if (EarlierGeneration == LaterGeneration)
661     return true;
662 
663   if (!MSSA)
664     return false;
665 
666   // If MemorySSA has determined that one of EarlierInst or LaterInst does not
667   // read/write memory, then we can safely return true here.
668   // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
669   // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
670   // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
671   // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
672   // with the default optimization pipeline.
673   auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
674   if (!EarlierMA)
675     return true;
676   auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
677   if (!LaterMA)
678     return true;
679 
680   // Since we know LaterDef dominates LaterInst and EarlierInst dominates
681   // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
682   // EarlierInst and LaterInst and neither can any other write that potentially
683   // clobbers LaterInst.
684   MemoryAccess *LaterDef =
685       MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
686   return MSSA->dominates(LaterDef, EarlierMA);
687 }
688 
689 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
690   // A location loaded from with an invariant_load is assumed to *never* change
691   // within the visible scope of the compilation.
692   if (auto *LI = dyn_cast<LoadInst>(I))
693     if (LI->getMetadata(LLVMContext::MD_invariant_load))
694       return true;
695 
696   auto MemLocOpt = MemoryLocation::getOrNone(I);
697   if (!MemLocOpt)
698     // "target" intrinsic forms of loads aren't currently known to
699     // MemoryLocation::get.  TODO
700     return false;
701   MemoryLocation MemLoc = *MemLocOpt;
702   if (!AvailableInvariants.count(MemLoc))
703     return false;
704 
705   // Is the generation at which this became invariant older than the
706   // current one?
707   return AvailableInvariants.lookup(MemLoc) <= GenAt;
708 }
709 
710 bool EarlyCSE::processNode(DomTreeNode *Node) {
711   bool Changed = false;
712   BasicBlock *BB = Node->getBlock();
713 
714   // If this block has a single predecessor, then the predecessor is the parent
715   // of the domtree node and all of the live out memory values are still current
716   // in this block.  If this block has multiple predecessors, then they could
717   // have invalidated the live-out memory values of our parent value.  For now,
718   // just be conservative and invalidate memory if this block has multiple
719   // predecessors.
720   if (!BB->getSinglePredecessor())
721     ++CurrentGeneration;
722 
723   // If this node has a single predecessor which ends in a conditional branch,
724   // we can infer the value of the branch condition given that we took this
725   // path.  We need the single predecessor to ensure there's not another path
726   // which reaches this block where the condition might hold a different
727   // value.  Since we're adding this to the scoped hash table (like any other
728   // def), it will have been popped if we encounter a future merge block.
729   if (BasicBlock *Pred = BB->getSinglePredecessor()) {
730     auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
731     if (BI && BI->isConditional()) {
732       auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
733       if (CondInst && SimpleValue::canHandle(CondInst)) {
734         assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
735         auto *TorF = (BI->getSuccessor(0) == BB)
736                          ? ConstantInt::getTrue(BB->getContext())
737                          : ConstantInt::getFalse(BB->getContext());
738         AvailableValues.insert(CondInst, TorF);
739         LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
740                           << CondInst->getName() << "' as " << *TorF << " in "
741                           << BB->getName() << "\n");
742         if (!DebugCounter::shouldExecute(CSECounter)) {
743           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
744         } else {
745           // Replace all dominated uses with the known value.
746           if (unsigned Count = replaceDominatedUsesWith(
747                   CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) {
748             Changed = true;
749             NumCSECVP += Count;
750           }
751         }
752       }
753     }
754   }
755 
756   /// LastStore - Keep track of the last non-volatile store that we saw... for
757   /// as long as there in no instruction that reads memory.  If we see a store
758   /// to the same location, we delete the dead store.  This zaps trivial dead
759   /// stores which can occur in bitfield code among other things.
760   Instruction *LastStore = nullptr;
761 
762   // See if any instructions in the block can be eliminated.  If so, do it.  If
763   // not, add them to AvailableValues.
764   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
765     Instruction *Inst = &*I++;
766 
767     // Dead instructions should just be removed.
768     if (isInstructionTriviallyDead(Inst, &TLI)) {
769       LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
770       if (!DebugCounter::shouldExecute(CSECounter)) {
771         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
772         continue;
773       }
774       salvageDebugInfo(*Inst);
775       removeMSSA(Inst);
776       Inst->eraseFromParent();
777       Changed = true;
778       ++NumSimplify;
779       continue;
780     }
781 
782     // Skip assume intrinsics, they don't really have side effects (although
783     // they're marked as such to ensure preservation of control dependencies),
784     // and this pass will not bother with its removal. However, we should mark
785     // its condition as true for all dominated blocks.
786     if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
787       auto *CondI =
788           dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
789       if (CondI && SimpleValue::canHandle(CondI)) {
790         LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
791                           << '\n');
792         AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
793       } else
794         LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
795       continue;
796     }
797 
798     // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
799     if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
800       LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
801       continue;
802     }
803 
804     // We can skip all invariant.start intrinsics since they only read memory,
805     // and we can forward values across it. For invariant starts without
806     // invariant ends, we can use the fact that the invariantness never ends to
807     // start a scope in the current generaton which is true for all future
808     // generations.  Also, we dont need to consume the last store since the
809     // semantics of invariant.start allow us to perform   DSE of the last
810     // store, if there was a store following invariant.start. Consider:
811     //
812     // store 30, i8* p
813     // invariant.start(p)
814     // store 40, i8* p
815     // We can DSE the store to 30, since the store 40 to invariant location p
816     // causes undefined behaviour.
817     if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
818       // If there are any uses, the scope might end.
819       if (!Inst->use_empty())
820         continue;
821       auto *CI = cast<CallInst>(Inst);
822       MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
823       // Don't start a scope if we already have a better one pushed
824       if (!AvailableInvariants.count(MemLoc))
825         AvailableInvariants.insert(MemLoc, CurrentGeneration);
826       continue;
827     }
828 
829     if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) {
830       if (auto *CondI =
831               dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
832         if (SimpleValue::canHandle(CondI)) {
833           // Do we already know the actual value of this condition?
834           if (auto *KnownCond = AvailableValues.lookup(CondI)) {
835             // Is the condition known to be true?
836             if (isa<ConstantInt>(KnownCond) &&
837                 cast<ConstantInt>(KnownCond)->isOne()) {
838               LLVM_DEBUG(dbgs()
839                          << "EarlyCSE removing guard: " << *Inst << '\n');
840               removeMSSA(Inst);
841               Inst->eraseFromParent();
842               Changed = true;
843               continue;
844             } else
845               // Use the known value if it wasn't true.
846               cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
847           }
848           // The condition we're on guarding here is true for all dominated
849           // locations.
850           AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
851         }
852       }
853 
854       // Guard intrinsics read all memory, but don't write any memory.
855       // Accordingly, don't update the generation but consume the last store (to
856       // avoid an incorrect DSE).
857       LastStore = nullptr;
858       continue;
859     }
860 
861     // If the instruction can be simplified (e.g. X+0 = X) then replace it with
862     // its simpler value.
863     if (Value *V = SimplifyInstruction(Inst, SQ)) {
864       LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V
865                         << '\n');
866       if (!DebugCounter::shouldExecute(CSECounter)) {
867         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
868       } else {
869         bool Killed = false;
870         if (!Inst->use_empty()) {
871           Inst->replaceAllUsesWith(V);
872           Changed = true;
873         }
874         if (isInstructionTriviallyDead(Inst, &TLI)) {
875           removeMSSA(Inst);
876           Inst->eraseFromParent();
877           Changed = true;
878           Killed = true;
879         }
880         if (Changed)
881           ++NumSimplify;
882         if (Killed)
883           continue;
884       }
885     }
886 
887     // If this is a simple instruction that we can value number, process it.
888     if (SimpleValue::canHandle(Inst)) {
889       // See if the instruction has an available value.  If so, use it.
890       if (Value *V = AvailableValues.lookup(Inst)) {
891         LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V
892                           << '\n');
893         if (!DebugCounter::shouldExecute(CSECounter)) {
894           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
895           continue;
896         }
897         if (auto *I = dyn_cast<Instruction>(V))
898           I->andIRFlags(Inst);
899         Inst->replaceAllUsesWith(V);
900         removeMSSA(Inst);
901         Inst->eraseFromParent();
902         Changed = true;
903         ++NumCSE;
904         continue;
905       }
906 
907       // Otherwise, just remember that this value is available.
908       AvailableValues.insert(Inst, Inst);
909       continue;
910     }
911 
912     ParseMemoryInst MemInst(Inst, TTI);
913     // If this is a non-volatile load, process it.
914     if (MemInst.isValid() && MemInst.isLoad()) {
915       // (conservatively) we can't peak past the ordering implied by this
916       // operation, but we can add this load to our set of available values
917       if (MemInst.isVolatile() || !MemInst.isUnordered()) {
918         LastStore = nullptr;
919         ++CurrentGeneration;
920       }
921 
922       if (MemInst.isInvariantLoad()) {
923         // If we pass an invariant load, we know that memory location is
924         // indefinitely constant from the moment of first dereferenceability.
925         // We conservatively treat the invariant_load as that moment.  If we
926         // pass a invariant load after already establishing a scope, don't
927         // restart it since we want to preserve the earliest point seen.
928         auto MemLoc = MemoryLocation::get(Inst);
929         if (!AvailableInvariants.count(MemLoc))
930           AvailableInvariants.insert(MemLoc, CurrentGeneration);
931       }
932 
933       // If we have an available version of this load, and if it is the right
934       // generation or the load is known to be from an invariant location,
935       // replace this instruction.
936       //
937       // If either the dominating load or the current load are invariant, then
938       // we can assume the current load loads the same value as the dominating
939       // load.
940       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
941       if (InVal.DefInst != nullptr &&
942           InVal.MatchingId == MemInst.getMatchingId() &&
943           // We don't yet handle removing loads with ordering of any kind.
944           !MemInst.isVolatile() && MemInst.isUnordered() &&
945           // We can't replace an atomic load with one which isn't also atomic.
946           InVal.IsAtomic >= MemInst.isAtomic() &&
947           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
948            isSameMemGeneration(InVal.Generation, CurrentGeneration,
949                                InVal.DefInst, Inst))) {
950         Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
951         if (Op != nullptr) {
952           LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
953                             << "  to: " << *InVal.DefInst << '\n');
954           if (!DebugCounter::shouldExecute(CSECounter)) {
955             LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
956             continue;
957           }
958           if (!Inst->use_empty())
959             Inst->replaceAllUsesWith(Op);
960           removeMSSA(Inst);
961           Inst->eraseFromParent();
962           Changed = true;
963           ++NumCSELoad;
964           continue;
965         }
966       }
967 
968       // Otherwise, remember that we have this instruction.
969       AvailableLoads.insert(
970           MemInst.getPointerOperand(),
971           LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
972                     MemInst.isAtomic()));
973       LastStore = nullptr;
974       continue;
975     }
976 
977     // If this instruction may read from memory or throw (and potentially read
978     // from memory in the exception handler), forget LastStore.  Load/store
979     // intrinsics will indicate both a read and a write to memory.  The target
980     // may override this (e.g. so that a store intrinsic does not read from
981     // memory, and thus will be treated the same as a regular store for
982     // commoning purposes).
983     if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
984         !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
985       LastStore = nullptr;
986 
987     // If this is a read-only call, process it.
988     if (CallValue::canHandle(Inst)) {
989       // If we have an available version of this call, and if it is the right
990       // generation, replace this instruction.
991       std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
992       if (InVal.first != nullptr &&
993           isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
994                               Inst)) {
995         LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
996                           << "  to: " << *InVal.first << '\n');
997         if (!DebugCounter::shouldExecute(CSECounter)) {
998           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
999           continue;
1000         }
1001         if (!Inst->use_empty())
1002           Inst->replaceAllUsesWith(InVal.first);
1003         removeMSSA(Inst);
1004         Inst->eraseFromParent();
1005         Changed = true;
1006         ++NumCSECall;
1007         continue;
1008       }
1009 
1010       // Otherwise, remember that we have this instruction.
1011       AvailableCalls.insert(
1012           Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1013       continue;
1014     }
1015 
1016     // A release fence requires that all stores complete before it, but does
1017     // not prevent the reordering of following loads 'before' the fence.  As a
1018     // result, we don't need to consider it as writing to memory and don't need
1019     // to advance the generation.  We do need to prevent DSE across the fence,
1020     // but that's handled above.
1021     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1022       if (FI->getOrdering() == AtomicOrdering::Release) {
1023         assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1024         continue;
1025       }
1026 
1027     // write back DSE - If we write back the same value we just loaded from
1028     // the same location and haven't passed any intervening writes or ordering
1029     // operations, we can remove the write.  The primary benefit is in allowing
1030     // the available load table to remain valid and value forward past where
1031     // the store originally was.
1032     if (MemInst.isValid() && MemInst.isStore()) {
1033       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1034       if (InVal.DefInst &&
1035           InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1036           InVal.MatchingId == MemInst.getMatchingId() &&
1037           // We don't yet handle removing stores with ordering of any kind.
1038           !MemInst.isVolatile() && MemInst.isUnordered() &&
1039           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1040            isSameMemGeneration(InVal.Generation, CurrentGeneration,
1041                                InVal.DefInst, Inst))) {
1042         // It is okay to have a LastStore to a different pointer here if MemorySSA
1043         // tells us that the load and store are from the same memory generation.
1044         // In that case, LastStore should keep its present value since we're
1045         // removing the current store.
1046         assert((!LastStore ||
1047                 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1048                     MemInst.getPointerOperand() ||
1049                 MSSA) &&
1050                "can't have an intervening store if not using MemorySSA!");
1051         LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1052         if (!DebugCounter::shouldExecute(CSECounter)) {
1053           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1054           continue;
1055         }
1056         removeMSSA(Inst);
1057         Inst->eraseFromParent();
1058         Changed = true;
1059         ++NumDSE;
1060         // We can avoid incrementing the generation count since we were able
1061         // to eliminate this store.
1062         continue;
1063       }
1064     }
1065 
1066     // Okay, this isn't something we can CSE at all.  Check to see if it is
1067     // something that could modify memory.  If so, our available memory values
1068     // cannot be used so bump the generation count.
1069     if (Inst->mayWriteToMemory()) {
1070       ++CurrentGeneration;
1071 
1072       if (MemInst.isValid() && MemInst.isStore()) {
1073         // We do a trivial form of DSE if there are two stores to the same
1074         // location with no intervening loads.  Delete the earlier store.
1075         // At the moment, we don't remove ordered stores, but do remove
1076         // unordered atomic stores.  There's no special requirement (for
1077         // unordered atomics) about removing atomic stores only in favor of
1078         // other atomic stores since we we're going to execute the non-atomic
1079         // one anyway and the atomic one might never have become visible.
1080         if (LastStore) {
1081           ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1082           assert(LastStoreMemInst.isUnordered() &&
1083                  !LastStoreMemInst.isVolatile() &&
1084                  "Violated invariant");
1085           if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1086             LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1087                               << "  due to: " << *Inst << '\n');
1088             if (!DebugCounter::shouldExecute(CSECounter)) {
1089               LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1090             } else {
1091               removeMSSA(LastStore);
1092               LastStore->eraseFromParent();
1093               Changed = true;
1094               ++NumDSE;
1095               LastStore = nullptr;
1096             }
1097           }
1098           // fallthrough - we can exploit information about this store
1099         }
1100 
1101         // Okay, we just invalidated anything we knew about loaded values.  Try
1102         // to salvage *something* by remembering that the stored value is a live
1103         // version of the pointer.  It is safe to forward from volatile stores
1104         // to non-volatile loads, so we don't have to check for volatility of
1105         // the store.
1106         AvailableLoads.insert(
1107             MemInst.getPointerOperand(),
1108             LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1109                       MemInst.isAtomic()));
1110 
1111         // Remember that this was the last unordered store we saw for DSE. We
1112         // don't yet handle DSE on ordered or volatile stores since we don't
1113         // have a good way to model the ordering requirement for following
1114         // passes  once the store is removed.  We could insert a fence, but
1115         // since fences are slightly stronger than stores in their ordering,
1116         // it's not clear this is a profitable transform. Another option would
1117         // be to merge the ordering with that of the post dominating store.
1118         if (MemInst.isUnordered() && !MemInst.isVolatile())
1119           LastStore = Inst;
1120         else
1121           LastStore = nullptr;
1122       }
1123     }
1124   }
1125 
1126   return Changed;
1127 }
1128 
1129 bool EarlyCSE::run() {
1130   // Note, deque is being used here because there is significant performance
1131   // gains over vector when the container becomes very large due to the
1132   // specific access patterns. For more information see the mailing list
1133   // discussion on this:
1134   // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1135   std::deque<StackNode *> nodesToProcess;
1136 
1137   bool Changed = false;
1138 
1139   // Process the root node.
1140   nodesToProcess.push_back(new StackNode(
1141       AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1142       CurrentGeneration, DT.getRootNode(),
1143       DT.getRootNode()->begin(), DT.getRootNode()->end()));
1144 
1145   // Save the current generation.
1146   unsigned LiveOutGeneration = CurrentGeneration;
1147 
1148   // Process the stack.
1149   while (!nodesToProcess.empty()) {
1150     // Grab the first item off the stack. Set the current generation, remove
1151     // the node from the stack, and process it.
1152     StackNode *NodeToProcess = nodesToProcess.back();
1153 
1154     // Initialize class members.
1155     CurrentGeneration = NodeToProcess->currentGeneration();
1156 
1157     // Check if the node needs to be processed.
1158     if (!NodeToProcess->isProcessed()) {
1159       // Process the node.
1160       Changed |= processNode(NodeToProcess->node());
1161       NodeToProcess->childGeneration(CurrentGeneration);
1162       NodeToProcess->process();
1163     } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1164       // Push the next child onto the stack.
1165       DomTreeNode *child = NodeToProcess->nextChild();
1166       nodesToProcess.push_back(
1167           new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1168                         AvailableCalls, NodeToProcess->childGeneration(),
1169                         child, child->begin(), child->end()));
1170     } else {
1171       // It has been processed, and there are no more children to process,
1172       // so delete it and pop it off the stack.
1173       delete NodeToProcess;
1174       nodesToProcess.pop_back();
1175     }
1176   } // while (!nodes...)
1177 
1178   // Reset the current generation.
1179   CurrentGeneration = LiveOutGeneration;
1180 
1181   return Changed;
1182 }
1183 
1184 PreservedAnalyses EarlyCSEPass::run(Function &F,
1185                                     FunctionAnalysisManager &AM) {
1186   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1187   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1188   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1189   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1190   auto *MSSA =
1191       UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1192 
1193   EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1194 
1195   if (!CSE.run())
1196     return PreservedAnalyses::all();
1197 
1198   PreservedAnalyses PA;
1199   PA.preserveSet<CFGAnalyses>();
1200   PA.preserve<GlobalsAA>();
1201   if (UseMemorySSA)
1202     PA.preserve<MemorySSAAnalysis>();
1203   return PA;
1204 }
1205 
1206 namespace {
1207 
1208 /// A simple and fast domtree-based CSE pass.
1209 ///
1210 /// This pass does a simple depth-first walk over the dominator tree,
1211 /// eliminating trivially redundant instructions and using instsimplify to
1212 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1213 /// cases so that instcombine and other passes are more effective. It is
1214 /// expected that a later pass of GVN will catch the interesting/hard cases.
1215 template<bool UseMemorySSA>
1216 class EarlyCSELegacyCommonPass : public FunctionPass {
1217 public:
1218   static char ID;
1219 
1220   EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1221     if (UseMemorySSA)
1222       initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1223     else
1224       initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1225   }
1226 
1227   bool runOnFunction(Function &F) override {
1228     if (skipFunction(F))
1229       return false;
1230 
1231     auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1232     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1233     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1234     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1235     auto *MSSA =
1236         UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1237 
1238     EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1239 
1240     return CSE.run();
1241   }
1242 
1243   void getAnalysisUsage(AnalysisUsage &AU) const override {
1244     AU.addRequired<AssumptionCacheTracker>();
1245     AU.addRequired<DominatorTreeWrapperPass>();
1246     AU.addRequired<TargetLibraryInfoWrapperPass>();
1247     AU.addRequired<TargetTransformInfoWrapperPass>();
1248     if (UseMemorySSA) {
1249       AU.addRequired<MemorySSAWrapperPass>();
1250       AU.addPreserved<MemorySSAWrapperPass>();
1251     }
1252     AU.addPreserved<GlobalsAAWrapperPass>();
1253     AU.setPreservesCFG();
1254   }
1255 };
1256 
1257 } // end anonymous namespace
1258 
1259 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1260 
1261 template<>
1262 char EarlyCSELegacyPass::ID = 0;
1263 
1264 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1265                       false)
1266 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1267 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1268 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1269 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1270 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1271 
1272 using EarlyCSEMemSSALegacyPass =
1273     EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1274 
1275 template<>
1276 char EarlyCSEMemSSALegacyPass::ID = 0;
1277 
1278 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1279   if (UseMemorySSA)
1280     return new EarlyCSEMemSSALegacyPass();
1281   else
1282     return new EarlyCSELegacyPass();
1283 }
1284 
1285 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1286                       "Early CSE w/ MemorySSA", false, false)
1287 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1288 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1289 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1290 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1291 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1292 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1293                     "Early CSE w/ MemorySSA", false, false)
1294