xref: /llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp (revision 05da2fe52162c80dfa18aedf70cf73cb11201811)
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs a simple dominator tree walk that eliminates trivially
10 // redundant instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Scalar/EarlyCSE.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/ScopedHashTable.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/GuardUtils.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/InstrTypes.h"
37 #include "llvm/IR/Instruction.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/IR/PassManager.h"
43 #include "llvm/IR/PatternMatch.h"
44 #include "llvm/IR/Type.h"
45 #include "llvm/IR/Use.h"
46 #include "llvm/IR/Value.h"
47 #include "llvm/InitializePasses.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/DebugCounter.h"
54 #include "llvm/Support/RecyclingAllocator.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Scalar.h"
57 #include "llvm/Transforms/Utils/GuardUtils.h"
58 #include "llvm/Transforms/Utils/Local.h"
59 #include <cassert>
60 #include <deque>
61 #include <memory>
62 #include <utility>
63 
64 using namespace llvm;
65 using namespace llvm::PatternMatch;
66 
67 #define DEBUG_TYPE "early-cse"
68 
69 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
70 STATISTIC(NumCSE,      "Number of instructions CSE'd");
71 STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
72 STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
73 STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
74 STATISTIC(NumDSE,      "Number of trivial dead stores removed");
75 
76 DEBUG_COUNTER(CSECounter, "early-cse",
77               "Controls which instructions are removed");
78 
79 static cl::opt<unsigned> EarlyCSEMssaOptCap(
80     "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden,
81     cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
82              "for faster compile. Caps the MemorySSA clobbering calls."));
83 
84 static cl::opt<bool> EarlyCSEDebugHash(
85     "earlycse-debug-hash", cl::init(false), cl::Hidden,
86     cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
87              "function is well-behaved w.r.t. its isEqual predicate"));
88 
89 //===----------------------------------------------------------------------===//
90 // SimpleValue
91 //===----------------------------------------------------------------------===//
92 
93 namespace {
94 
95 /// Struct representing the available values in the scoped hash table.
96 struct SimpleValue {
97   Instruction *Inst;
98 
99   SimpleValue(Instruction *I) : Inst(I) {
100     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
101   }
102 
103   bool isSentinel() const {
104     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
105            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
106   }
107 
108   static bool canHandle(Instruction *Inst) {
109     // This can only handle non-void readnone functions.
110     if (CallInst *CI = dyn_cast<CallInst>(Inst))
111       return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
112     return isa<CastInst>(Inst) || isa<UnaryOperator>(Inst) ||
113            isa<BinaryOperator>(Inst) || isa<GetElementPtrInst>(Inst) ||
114            isa<CmpInst>(Inst) || isa<SelectInst>(Inst) ||
115            isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
116            isa<ShuffleVectorInst>(Inst) || isa<ExtractValueInst>(Inst) ||
117            isa<InsertValueInst>(Inst);
118   }
119 };
120 
121 } // end anonymous namespace
122 
123 namespace llvm {
124 
125 template <> struct DenseMapInfo<SimpleValue> {
126   static inline SimpleValue getEmptyKey() {
127     return DenseMapInfo<Instruction *>::getEmptyKey();
128   }
129 
130   static inline SimpleValue getTombstoneKey() {
131     return DenseMapInfo<Instruction *>::getTombstoneKey();
132   }
133 
134   static unsigned getHashValue(SimpleValue Val);
135   static bool isEqual(SimpleValue LHS, SimpleValue RHS);
136 };
137 
138 } // end namespace llvm
139 
140 /// Match a 'select' including an optional 'not's of the condition.
141 static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A,
142                                            Value *&B,
143                                            SelectPatternFlavor &Flavor) {
144   // Return false if V is not even a select.
145   if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))))
146     return false;
147 
148   // Look through a 'not' of the condition operand by swapping A/B.
149   Value *CondNot;
150   if (match(Cond, m_Not(m_Value(CondNot)))) {
151     Cond = CondNot;
152     std::swap(A, B);
153   }
154 
155   // Set flavor if we find a match, or set it to unknown otherwise; in
156   // either case, return true to indicate that this is a select we can
157   // process.
158   if (auto *CmpI = dyn_cast<ICmpInst>(Cond))
159     Flavor = matchDecomposedSelectPattern(CmpI, A, B, A, B).Flavor;
160   else
161     Flavor = SPF_UNKNOWN;
162 
163   return true;
164 }
165 
166 static unsigned getHashValueImpl(SimpleValue Val) {
167   Instruction *Inst = Val.Inst;
168   // Hash in all of the operands as pointers.
169   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
170     Value *LHS = BinOp->getOperand(0);
171     Value *RHS = BinOp->getOperand(1);
172     if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
173       std::swap(LHS, RHS);
174 
175     return hash_combine(BinOp->getOpcode(), LHS, RHS);
176   }
177 
178   if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
179     // Compares can be commuted by swapping the comparands and
180     // updating the predicate.  Choose the form that has the
181     // comparands in sorted order, or in the case of a tie, the
182     // one with the lower predicate.
183     Value *LHS = CI->getOperand(0);
184     Value *RHS = CI->getOperand(1);
185     CmpInst::Predicate Pred = CI->getPredicate();
186     CmpInst::Predicate SwappedPred = CI->getSwappedPredicate();
187     if (std::tie(LHS, Pred) > std::tie(RHS, SwappedPred)) {
188       std::swap(LHS, RHS);
189       Pred = SwappedPred;
190     }
191     return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
192   }
193 
194   // Hash general selects to allow matching commuted true/false operands.
195   SelectPatternFlavor SPF;
196   Value *Cond, *A, *B;
197   if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) {
198     // Hash min/max/abs (cmp + select) to allow for commuted operands.
199     // Min/max may also have non-canonical compare predicate (eg, the compare for
200     // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
201     // compare.
202     // TODO: We should also detect FP min/max.
203     if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
204         SPF == SPF_UMIN || SPF == SPF_UMAX) {
205       if (A > B)
206         std::swap(A, B);
207       return hash_combine(Inst->getOpcode(), SPF, A, B);
208     }
209     if (SPF == SPF_ABS || SPF == SPF_NABS) {
210       // ABS/NABS always puts the input in A and its negation in B.
211       return hash_combine(Inst->getOpcode(), SPF, A, B);
212     }
213 
214     // Hash general selects to allow matching commuted true/false operands.
215 
216     // If we do not have a compare as the condition, just hash in the condition.
217     CmpInst::Predicate Pred;
218     Value *X, *Y;
219     if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y))))
220       return hash_combine(Inst->getOpcode(), Cond, A, B);
221 
222     // Similar to cmp normalization (above) - canonicalize the predicate value:
223     // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
224     if (CmpInst::getInversePredicate(Pred) < Pred) {
225       Pred = CmpInst::getInversePredicate(Pred);
226       std::swap(A, B);
227     }
228     return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B);
229   }
230 
231   if (CastInst *CI = dyn_cast<CastInst>(Inst))
232     return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
233 
234   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
235     return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
236                         hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
237 
238   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
239     return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
240                         IVI->getOperand(1),
241                         hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
242 
243   assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
244           isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
245           isa<ShuffleVectorInst>(Inst) || isa<UnaryOperator>(Inst)) &&
246          "Invalid/unknown instruction");
247 
248   // Mix in the opcode.
249   return hash_combine(
250       Inst->getOpcode(),
251       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
252 }
253 
254 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
255 #ifndef NDEBUG
256   // If -earlycse-debug-hash was specified, return a constant -- this
257   // will force all hashing to collide, so we'll exhaustively search
258   // the table for a match, and the assertion in isEqual will fire if
259   // there's a bug causing equal keys to hash differently.
260   if (EarlyCSEDebugHash)
261     return 0;
262 #endif
263   return getHashValueImpl(Val);
264 }
265 
266 static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
267   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
268 
269   if (LHS.isSentinel() || RHS.isSentinel())
270     return LHSI == RHSI;
271 
272   if (LHSI->getOpcode() != RHSI->getOpcode())
273     return false;
274   if (LHSI->isIdenticalToWhenDefined(RHSI))
275     return true;
276 
277   // If we're not strictly identical, we still might be a commutable instruction
278   if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
279     if (!LHSBinOp->isCommutative())
280       return false;
281 
282     assert(isa<BinaryOperator>(RHSI) &&
283            "same opcode, but different instruction type?");
284     BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
285 
286     // Commuted equality
287     return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
288            LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
289   }
290   if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
291     assert(isa<CmpInst>(RHSI) &&
292            "same opcode, but different instruction type?");
293     CmpInst *RHSCmp = cast<CmpInst>(RHSI);
294     // Commuted equality
295     return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
296            LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
297            LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
298   }
299 
300   // Min/max/abs can occur with commuted operands, non-canonical predicates,
301   // and/or non-canonical operands.
302   // Selects can be non-trivially equivalent via inverted conditions and swaps.
303   SelectPatternFlavor LSPF, RSPF;
304   Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
305   if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) &&
306       matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) {
307     if (LSPF == RSPF) {
308       // TODO: We should also detect FP min/max.
309       if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
310           LSPF == SPF_UMIN || LSPF == SPF_UMAX)
311         return ((LHSA == RHSA && LHSB == RHSB) ||
312                 (LHSA == RHSB && LHSB == RHSA));
313 
314       if (LSPF == SPF_ABS || LSPF == SPF_NABS) {
315         // Abs results are placed in a defined order by matchSelectPattern.
316         return LHSA == RHSA && LHSB == RHSB;
317       }
318 
319       // select Cond, A, B <--> select not(Cond), B, A
320       if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
321         return true;
322     }
323 
324     // If the true/false operands are swapped and the conditions are compares
325     // with inverted predicates, the selects are equal:
326     // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
327     //
328     // This also handles patterns with a double-negation in the sense of not +
329     // inverse, because we looked through a 'not' in the matching function and
330     // swapped A/B:
331     // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
332     //
333     // This intentionally does NOT handle patterns with a double-negation in
334     // the sense of not + not, because doing so could result in values
335     // comparing
336     // as equal that hash differently in the min/max/abs cases like:
337     // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
338     //   ^ hashes as min                  ^ would not hash as min
339     // In the context of the EarlyCSE pass, however, such cases never reach
340     // this code, as we simplify the double-negation before hashing the second
341     // select (and so still succeed at CSEing them).
342     if (LHSA == RHSB && LHSB == RHSA) {
343       CmpInst::Predicate PredL, PredR;
344       Value *X, *Y;
345       if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) &&
346           match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) &&
347           CmpInst::getInversePredicate(PredL) == PredR)
348         return true;
349     }
350   }
351 
352   return false;
353 }
354 
355 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
356   // These comparisons are nontrivial, so assert that equality implies
357   // hash equality (DenseMap demands this as an invariant).
358   bool Result = isEqualImpl(LHS, RHS);
359   assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
360          getHashValueImpl(LHS) == getHashValueImpl(RHS));
361   return Result;
362 }
363 
364 //===----------------------------------------------------------------------===//
365 // CallValue
366 //===----------------------------------------------------------------------===//
367 
368 namespace {
369 
370 /// Struct representing the available call values in the scoped hash
371 /// table.
372 struct CallValue {
373   Instruction *Inst;
374 
375   CallValue(Instruction *I) : Inst(I) {
376     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
377   }
378 
379   bool isSentinel() const {
380     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
381            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
382   }
383 
384   static bool canHandle(Instruction *Inst) {
385     // Don't value number anything that returns void.
386     if (Inst->getType()->isVoidTy())
387       return false;
388 
389     CallInst *CI = dyn_cast<CallInst>(Inst);
390     if (!CI || !CI->onlyReadsMemory())
391       return false;
392     return true;
393   }
394 };
395 
396 } // end anonymous namespace
397 
398 namespace llvm {
399 
400 template <> struct DenseMapInfo<CallValue> {
401   static inline CallValue getEmptyKey() {
402     return DenseMapInfo<Instruction *>::getEmptyKey();
403   }
404 
405   static inline CallValue getTombstoneKey() {
406     return DenseMapInfo<Instruction *>::getTombstoneKey();
407   }
408 
409   static unsigned getHashValue(CallValue Val);
410   static bool isEqual(CallValue LHS, CallValue RHS);
411 };
412 
413 } // end namespace llvm
414 
415 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
416   Instruction *Inst = Val.Inst;
417   // Hash all of the operands as pointers and mix in the opcode.
418   return hash_combine(
419       Inst->getOpcode(),
420       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
421 }
422 
423 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
424   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
425   if (LHS.isSentinel() || RHS.isSentinel())
426     return LHSI == RHSI;
427   return LHSI->isIdenticalTo(RHSI);
428 }
429 
430 //===----------------------------------------------------------------------===//
431 // EarlyCSE implementation
432 //===----------------------------------------------------------------------===//
433 
434 namespace {
435 
436 /// A simple and fast domtree-based CSE pass.
437 ///
438 /// This pass does a simple depth-first walk over the dominator tree,
439 /// eliminating trivially redundant instructions and using instsimplify to
440 /// canonicalize things as it goes. It is intended to be fast and catch obvious
441 /// cases so that instcombine and other passes are more effective. It is
442 /// expected that a later pass of GVN will catch the interesting/hard cases.
443 class EarlyCSE {
444 public:
445   const TargetLibraryInfo &TLI;
446   const TargetTransformInfo &TTI;
447   DominatorTree &DT;
448   AssumptionCache &AC;
449   const SimplifyQuery SQ;
450   MemorySSA *MSSA;
451   std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
452 
453   using AllocatorTy =
454       RecyclingAllocator<BumpPtrAllocator,
455                          ScopedHashTableVal<SimpleValue, Value *>>;
456   using ScopedHTType =
457       ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
458                       AllocatorTy>;
459 
460   /// A scoped hash table of the current values of all of our simple
461   /// scalar expressions.
462   ///
463   /// As we walk down the domtree, we look to see if instructions are in this:
464   /// if so, we replace them with what we find, otherwise we insert them so
465   /// that dominated values can succeed in their lookup.
466   ScopedHTType AvailableValues;
467 
468   /// A scoped hash table of the current values of previously encountered
469   /// memory locations.
470   ///
471   /// This allows us to get efficient access to dominating loads or stores when
472   /// we have a fully redundant load.  In addition to the most recent load, we
473   /// keep track of a generation count of the read, which is compared against
474   /// the current generation count.  The current generation count is incremented
475   /// after every possibly writing memory operation, which ensures that we only
476   /// CSE loads with other loads that have no intervening store.  Ordering
477   /// events (such as fences or atomic instructions) increment the generation
478   /// count as well; essentially, we model these as writes to all possible
479   /// locations.  Note that atomic and/or volatile loads and stores can be
480   /// present the table; it is the responsibility of the consumer to inspect
481   /// the atomicity/volatility if needed.
482   struct LoadValue {
483     Instruction *DefInst = nullptr;
484     unsigned Generation = 0;
485     int MatchingId = -1;
486     bool IsAtomic = false;
487 
488     LoadValue() = default;
489     LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
490               bool IsAtomic)
491         : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
492           IsAtomic(IsAtomic) {}
493   };
494 
495   using LoadMapAllocator =
496       RecyclingAllocator<BumpPtrAllocator,
497                          ScopedHashTableVal<Value *, LoadValue>>;
498   using LoadHTType =
499       ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
500                       LoadMapAllocator>;
501 
502   LoadHTType AvailableLoads;
503 
504   // A scoped hash table mapping memory locations (represented as typed
505   // addresses) to generation numbers at which that memory location became
506   // (henceforth indefinitely) invariant.
507   using InvariantMapAllocator =
508       RecyclingAllocator<BumpPtrAllocator,
509                          ScopedHashTableVal<MemoryLocation, unsigned>>;
510   using InvariantHTType =
511       ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
512                       InvariantMapAllocator>;
513   InvariantHTType AvailableInvariants;
514 
515   /// A scoped hash table of the current values of read-only call
516   /// values.
517   ///
518   /// It uses the same generation count as loads.
519   using CallHTType =
520       ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
521   CallHTType AvailableCalls;
522 
523   /// This is the current generation of the memory value.
524   unsigned CurrentGeneration = 0;
525 
526   /// Set up the EarlyCSE runner for a particular function.
527   EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
528            const TargetTransformInfo &TTI, DominatorTree &DT,
529            AssumptionCache &AC, MemorySSA *MSSA)
530       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
531         MSSAUpdater(std::make_unique<MemorySSAUpdater>(MSSA)) {}
532 
533   bool run();
534 
535 private:
536   unsigned ClobberCounter = 0;
537   // Almost a POD, but needs to call the constructors for the scoped hash
538   // tables so that a new scope gets pushed on. These are RAII so that the
539   // scope gets popped when the NodeScope is destroyed.
540   class NodeScope {
541   public:
542     NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
543               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
544       : Scope(AvailableValues), LoadScope(AvailableLoads),
545         InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
546     NodeScope(const NodeScope &) = delete;
547     NodeScope &operator=(const NodeScope &) = delete;
548 
549   private:
550     ScopedHTType::ScopeTy Scope;
551     LoadHTType::ScopeTy LoadScope;
552     InvariantHTType::ScopeTy InvariantScope;
553     CallHTType::ScopeTy CallScope;
554   };
555 
556   // Contains all the needed information to create a stack for doing a depth
557   // first traversal of the tree. This includes scopes for values, loads, and
558   // calls as well as the generation. There is a child iterator so that the
559   // children do not need to be store separately.
560   class StackNode {
561   public:
562     StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
563               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
564               unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
565               DomTreeNode::iterator end)
566         : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
567           EndIter(end),
568           Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
569                  AvailableCalls)
570           {}
571     StackNode(const StackNode &) = delete;
572     StackNode &operator=(const StackNode &) = delete;
573 
574     // Accessors.
575     unsigned currentGeneration() { return CurrentGeneration; }
576     unsigned childGeneration() { return ChildGeneration; }
577     void childGeneration(unsigned generation) { ChildGeneration = generation; }
578     DomTreeNode *node() { return Node; }
579     DomTreeNode::iterator childIter() { return ChildIter; }
580 
581     DomTreeNode *nextChild() {
582       DomTreeNode *child = *ChildIter;
583       ++ChildIter;
584       return child;
585     }
586 
587     DomTreeNode::iterator end() { return EndIter; }
588     bool isProcessed() { return Processed; }
589     void process() { Processed = true; }
590 
591   private:
592     unsigned CurrentGeneration;
593     unsigned ChildGeneration;
594     DomTreeNode *Node;
595     DomTreeNode::iterator ChildIter;
596     DomTreeNode::iterator EndIter;
597     NodeScope Scopes;
598     bool Processed = false;
599   };
600 
601   /// Wrapper class to handle memory instructions, including loads,
602   /// stores and intrinsic loads and stores defined by the target.
603   class ParseMemoryInst {
604   public:
605     ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
606       : Inst(Inst) {
607       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
608         if (TTI.getTgtMemIntrinsic(II, Info))
609           IsTargetMemInst = true;
610     }
611 
612     bool isLoad() const {
613       if (IsTargetMemInst) return Info.ReadMem;
614       return isa<LoadInst>(Inst);
615     }
616 
617     bool isStore() const {
618       if (IsTargetMemInst) return Info.WriteMem;
619       return isa<StoreInst>(Inst);
620     }
621 
622     bool isAtomic() const {
623       if (IsTargetMemInst)
624         return Info.Ordering != AtomicOrdering::NotAtomic;
625       return Inst->isAtomic();
626     }
627 
628     bool isUnordered() const {
629       if (IsTargetMemInst)
630         return Info.isUnordered();
631 
632       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
633         return LI->isUnordered();
634       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
635         return SI->isUnordered();
636       }
637       // Conservative answer
638       return !Inst->isAtomic();
639     }
640 
641     bool isVolatile() const {
642       if (IsTargetMemInst)
643         return Info.IsVolatile;
644 
645       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
646         return LI->isVolatile();
647       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
648         return SI->isVolatile();
649       }
650       // Conservative answer
651       return true;
652     }
653 
654     bool isInvariantLoad() const {
655       if (auto *LI = dyn_cast<LoadInst>(Inst))
656         return LI->hasMetadata(LLVMContext::MD_invariant_load);
657       return false;
658     }
659 
660     bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
661       return (getPointerOperand() == Inst.getPointerOperand() &&
662               getMatchingId() == Inst.getMatchingId());
663     }
664 
665     bool isValid() const { return getPointerOperand() != nullptr; }
666 
667     // For regular (non-intrinsic) loads/stores, this is set to -1. For
668     // intrinsic loads/stores, the id is retrieved from the corresponding
669     // field in the MemIntrinsicInfo structure.  That field contains
670     // non-negative values only.
671     int getMatchingId() const {
672       if (IsTargetMemInst) return Info.MatchingId;
673       return -1;
674     }
675 
676     Value *getPointerOperand() const {
677       if (IsTargetMemInst) return Info.PtrVal;
678       return getLoadStorePointerOperand(Inst);
679     }
680 
681     bool mayReadFromMemory() const {
682       if (IsTargetMemInst) return Info.ReadMem;
683       return Inst->mayReadFromMemory();
684     }
685 
686     bool mayWriteToMemory() const {
687       if (IsTargetMemInst) return Info.WriteMem;
688       return Inst->mayWriteToMemory();
689     }
690 
691   private:
692     bool IsTargetMemInst = false;
693     MemIntrinsicInfo Info;
694     Instruction *Inst;
695   };
696 
697   bool processNode(DomTreeNode *Node);
698 
699   bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
700                              const BasicBlock *BB, const BasicBlock *Pred);
701 
702   Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
703     if (auto *LI = dyn_cast<LoadInst>(Inst))
704       return LI;
705     if (auto *SI = dyn_cast<StoreInst>(Inst))
706       return SI->getValueOperand();
707     assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
708     return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
709                                                  ExpectedType);
710   }
711 
712   /// Return true if the instruction is known to only operate on memory
713   /// provably invariant in the given "generation".
714   bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
715 
716   bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
717                            Instruction *EarlierInst, Instruction *LaterInst);
718 
719   void removeMSSA(Instruction *Inst) {
720     if (!MSSA)
721       return;
722     if (VerifyMemorySSA)
723       MSSA->verifyMemorySSA();
724     // Removing a store here can leave MemorySSA in an unoptimized state by
725     // creating MemoryPhis that have identical arguments and by creating
726     // MemoryUses whose defining access is not an actual clobber. The phi case
727     // is handled by MemorySSA when passing OptimizePhis = true to
728     // removeMemoryAccess.  The non-optimized MemoryUse case is lazily updated
729     // by MemorySSA's getClobberingMemoryAccess.
730     MSSAUpdater->removeMemoryAccess(Inst, true);
731   }
732 };
733 
734 } // end anonymous namespace
735 
736 /// Determine if the memory referenced by LaterInst is from the same heap
737 /// version as EarlierInst.
738 /// This is currently called in two scenarios:
739 ///
740 ///   load p
741 ///   ...
742 ///   load p
743 ///
744 /// and
745 ///
746 ///   x = load p
747 ///   ...
748 ///   store x, p
749 ///
750 /// in both cases we want to verify that there are no possible writes to the
751 /// memory referenced by p between the earlier and later instruction.
752 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
753                                    unsigned LaterGeneration,
754                                    Instruction *EarlierInst,
755                                    Instruction *LaterInst) {
756   // Check the simple memory generation tracking first.
757   if (EarlierGeneration == LaterGeneration)
758     return true;
759 
760   if (!MSSA)
761     return false;
762 
763   // If MemorySSA has determined that one of EarlierInst or LaterInst does not
764   // read/write memory, then we can safely return true here.
765   // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
766   // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
767   // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
768   // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
769   // with the default optimization pipeline.
770   auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
771   if (!EarlierMA)
772     return true;
773   auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
774   if (!LaterMA)
775     return true;
776 
777   // Since we know LaterDef dominates LaterInst and EarlierInst dominates
778   // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
779   // EarlierInst and LaterInst and neither can any other write that potentially
780   // clobbers LaterInst.
781   MemoryAccess *LaterDef;
782   if (ClobberCounter < EarlyCSEMssaOptCap) {
783     LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
784     ClobberCounter++;
785   } else
786     LaterDef = LaterMA->getDefiningAccess();
787 
788   return MSSA->dominates(LaterDef, EarlierMA);
789 }
790 
791 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
792   // A location loaded from with an invariant_load is assumed to *never* change
793   // within the visible scope of the compilation.
794   if (auto *LI = dyn_cast<LoadInst>(I))
795     if (LI->hasMetadata(LLVMContext::MD_invariant_load))
796       return true;
797 
798   auto MemLocOpt = MemoryLocation::getOrNone(I);
799   if (!MemLocOpt)
800     // "target" intrinsic forms of loads aren't currently known to
801     // MemoryLocation::get.  TODO
802     return false;
803   MemoryLocation MemLoc = *MemLocOpt;
804   if (!AvailableInvariants.count(MemLoc))
805     return false;
806 
807   // Is the generation at which this became invariant older than the
808   // current one?
809   return AvailableInvariants.lookup(MemLoc) <= GenAt;
810 }
811 
812 bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
813                                      const BranchInst *BI, const BasicBlock *BB,
814                                      const BasicBlock *Pred) {
815   assert(BI->isConditional() && "Should be a conditional branch!");
816   assert(BI->getCondition() == CondInst && "Wrong condition?");
817   assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
818   auto *TorF = (BI->getSuccessor(0) == BB)
819                    ? ConstantInt::getTrue(BB->getContext())
820                    : ConstantInt::getFalse(BB->getContext());
821   auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
822     if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
823       return BOp->getOpcode() == Opcode;
824     return false;
825   };
826   // If the condition is AND operation, we can propagate its operands into the
827   // true branch. If it is OR operation, we can propagate them into the false
828   // branch.
829   unsigned PropagateOpcode =
830       (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
831 
832   bool MadeChanges = false;
833   SmallVector<Instruction *, 4> WorkList;
834   SmallPtrSet<Instruction *, 4> Visited;
835   WorkList.push_back(CondInst);
836   while (!WorkList.empty()) {
837     Instruction *Curr = WorkList.pop_back_val();
838 
839     AvailableValues.insert(Curr, TorF);
840     LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
841                       << Curr->getName() << "' as " << *TorF << " in "
842                       << BB->getName() << "\n");
843     if (!DebugCounter::shouldExecute(CSECounter)) {
844       LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
845     } else {
846       // Replace all dominated uses with the known value.
847       if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
848                                                     BasicBlockEdge(Pred, BB))) {
849         NumCSECVP += Count;
850         MadeChanges = true;
851       }
852     }
853 
854     if (MatchBinOp(Curr, PropagateOpcode))
855       for (auto &Op : cast<BinaryOperator>(Curr)->operands())
856         if (Instruction *OPI = dyn_cast<Instruction>(Op))
857           if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
858             WorkList.push_back(OPI);
859   }
860 
861   return MadeChanges;
862 }
863 
864 bool EarlyCSE::processNode(DomTreeNode *Node) {
865   bool Changed = false;
866   BasicBlock *BB = Node->getBlock();
867 
868   // If this block has a single predecessor, then the predecessor is the parent
869   // of the domtree node and all of the live out memory values are still current
870   // in this block.  If this block has multiple predecessors, then they could
871   // have invalidated the live-out memory values of our parent value.  For now,
872   // just be conservative and invalidate memory if this block has multiple
873   // predecessors.
874   if (!BB->getSinglePredecessor())
875     ++CurrentGeneration;
876 
877   // If this node has a single predecessor which ends in a conditional branch,
878   // we can infer the value of the branch condition given that we took this
879   // path.  We need the single predecessor to ensure there's not another path
880   // which reaches this block where the condition might hold a different
881   // value.  Since we're adding this to the scoped hash table (like any other
882   // def), it will have been popped if we encounter a future merge block.
883   if (BasicBlock *Pred = BB->getSinglePredecessor()) {
884     auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
885     if (BI && BI->isConditional()) {
886       auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
887       if (CondInst && SimpleValue::canHandle(CondInst))
888         Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
889     }
890   }
891 
892   /// LastStore - Keep track of the last non-volatile store that we saw... for
893   /// as long as there in no instruction that reads memory.  If we see a store
894   /// to the same location, we delete the dead store.  This zaps trivial dead
895   /// stores which can occur in bitfield code among other things.
896   Instruction *LastStore = nullptr;
897 
898   // See if any instructions in the block can be eliminated.  If so, do it.  If
899   // not, add them to AvailableValues.
900   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
901     Instruction *Inst = &*I++;
902 
903     // Dead instructions should just be removed.
904     if (isInstructionTriviallyDead(Inst, &TLI)) {
905       LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
906       if (!DebugCounter::shouldExecute(CSECounter)) {
907         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
908         continue;
909       }
910       if (!salvageDebugInfo(*Inst))
911         replaceDbgUsesWithUndef(Inst);
912       removeMSSA(Inst);
913       Inst->eraseFromParent();
914       Changed = true;
915       ++NumSimplify;
916       continue;
917     }
918 
919     // Skip assume intrinsics, they don't really have side effects (although
920     // they're marked as such to ensure preservation of control dependencies),
921     // and this pass will not bother with its removal. However, we should mark
922     // its condition as true for all dominated blocks.
923     if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
924       auto *CondI =
925           dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
926       if (CondI && SimpleValue::canHandle(CondI)) {
927         LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
928                           << '\n');
929         AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
930       } else
931         LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
932       continue;
933     }
934 
935     // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
936     if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
937       LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
938       continue;
939     }
940 
941     // We can skip all invariant.start intrinsics since they only read memory,
942     // and we can forward values across it. For invariant starts without
943     // invariant ends, we can use the fact that the invariantness never ends to
944     // start a scope in the current generaton which is true for all future
945     // generations.  Also, we dont need to consume the last store since the
946     // semantics of invariant.start allow us to perform   DSE of the last
947     // store, if there was a store following invariant.start. Consider:
948     //
949     // store 30, i8* p
950     // invariant.start(p)
951     // store 40, i8* p
952     // We can DSE the store to 30, since the store 40 to invariant location p
953     // causes undefined behaviour.
954     if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
955       // If there are any uses, the scope might end.
956       if (!Inst->use_empty())
957         continue;
958       auto *CI = cast<CallInst>(Inst);
959       MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
960       // Don't start a scope if we already have a better one pushed
961       if (!AvailableInvariants.count(MemLoc))
962         AvailableInvariants.insert(MemLoc, CurrentGeneration);
963       continue;
964     }
965 
966     if (isGuard(Inst)) {
967       if (auto *CondI =
968               dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
969         if (SimpleValue::canHandle(CondI)) {
970           // Do we already know the actual value of this condition?
971           if (auto *KnownCond = AvailableValues.lookup(CondI)) {
972             // Is the condition known to be true?
973             if (isa<ConstantInt>(KnownCond) &&
974                 cast<ConstantInt>(KnownCond)->isOne()) {
975               LLVM_DEBUG(dbgs()
976                          << "EarlyCSE removing guard: " << *Inst << '\n');
977               removeMSSA(Inst);
978               Inst->eraseFromParent();
979               Changed = true;
980               continue;
981             } else
982               // Use the known value if it wasn't true.
983               cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
984           }
985           // The condition we're on guarding here is true for all dominated
986           // locations.
987           AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
988         }
989       }
990 
991       // Guard intrinsics read all memory, but don't write any memory.
992       // Accordingly, don't update the generation but consume the last store (to
993       // avoid an incorrect DSE).
994       LastStore = nullptr;
995       continue;
996     }
997 
998     // If the instruction can be simplified (e.g. X+0 = X) then replace it with
999     // its simpler value.
1000     if (Value *V = SimplifyInstruction(Inst, SQ)) {
1001       LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V
1002                         << '\n');
1003       if (!DebugCounter::shouldExecute(CSECounter)) {
1004         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1005       } else {
1006         bool Killed = false;
1007         if (!Inst->use_empty()) {
1008           Inst->replaceAllUsesWith(V);
1009           Changed = true;
1010         }
1011         if (isInstructionTriviallyDead(Inst, &TLI)) {
1012           removeMSSA(Inst);
1013           Inst->eraseFromParent();
1014           Changed = true;
1015           Killed = true;
1016         }
1017         if (Changed)
1018           ++NumSimplify;
1019         if (Killed)
1020           continue;
1021       }
1022     }
1023 
1024     // If this is a simple instruction that we can value number, process it.
1025     if (SimpleValue::canHandle(Inst)) {
1026       // See if the instruction has an available value.  If so, use it.
1027       if (Value *V = AvailableValues.lookup(Inst)) {
1028         LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V
1029                           << '\n');
1030         if (!DebugCounter::shouldExecute(CSECounter)) {
1031           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1032           continue;
1033         }
1034         if (auto *I = dyn_cast<Instruction>(V))
1035           I->andIRFlags(Inst);
1036         Inst->replaceAllUsesWith(V);
1037         removeMSSA(Inst);
1038         Inst->eraseFromParent();
1039         Changed = true;
1040         ++NumCSE;
1041         continue;
1042       }
1043 
1044       // Otherwise, just remember that this value is available.
1045       AvailableValues.insert(Inst, Inst);
1046       continue;
1047     }
1048 
1049     ParseMemoryInst MemInst(Inst, TTI);
1050     // If this is a non-volatile load, process it.
1051     if (MemInst.isValid() && MemInst.isLoad()) {
1052       // (conservatively) we can't peak past the ordering implied by this
1053       // operation, but we can add this load to our set of available values
1054       if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1055         LastStore = nullptr;
1056         ++CurrentGeneration;
1057       }
1058 
1059       if (MemInst.isInvariantLoad()) {
1060         // If we pass an invariant load, we know that memory location is
1061         // indefinitely constant from the moment of first dereferenceability.
1062         // We conservatively treat the invariant_load as that moment.  If we
1063         // pass a invariant load after already establishing a scope, don't
1064         // restart it since we want to preserve the earliest point seen.
1065         auto MemLoc = MemoryLocation::get(Inst);
1066         if (!AvailableInvariants.count(MemLoc))
1067           AvailableInvariants.insert(MemLoc, CurrentGeneration);
1068       }
1069 
1070       // If we have an available version of this load, and if it is the right
1071       // generation or the load is known to be from an invariant location,
1072       // replace this instruction.
1073       //
1074       // If either the dominating load or the current load are invariant, then
1075       // we can assume the current load loads the same value as the dominating
1076       // load.
1077       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1078       if (InVal.DefInst != nullptr &&
1079           InVal.MatchingId == MemInst.getMatchingId() &&
1080           // We don't yet handle removing loads with ordering of any kind.
1081           !MemInst.isVolatile() && MemInst.isUnordered() &&
1082           // We can't replace an atomic load with one which isn't also atomic.
1083           InVal.IsAtomic >= MemInst.isAtomic() &&
1084           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1085            isSameMemGeneration(InVal.Generation, CurrentGeneration,
1086                                InVal.DefInst, Inst))) {
1087         Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
1088         if (Op != nullptr) {
1089           LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
1090                             << "  to: " << *InVal.DefInst << '\n');
1091           if (!DebugCounter::shouldExecute(CSECounter)) {
1092             LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1093             continue;
1094           }
1095           if (!Inst->use_empty())
1096             Inst->replaceAllUsesWith(Op);
1097           removeMSSA(Inst);
1098           Inst->eraseFromParent();
1099           Changed = true;
1100           ++NumCSELoad;
1101           continue;
1102         }
1103       }
1104 
1105       // Otherwise, remember that we have this instruction.
1106       AvailableLoads.insert(
1107           MemInst.getPointerOperand(),
1108           LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1109                     MemInst.isAtomic()));
1110       LastStore = nullptr;
1111       continue;
1112     }
1113 
1114     // If this instruction may read from memory or throw (and potentially read
1115     // from memory in the exception handler), forget LastStore.  Load/store
1116     // intrinsics will indicate both a read and a write to memory.  The target
1117     // may override this (e.g. so that a store intrinsic does not read from
1118     // memory, and thus will be treated the same as a regular store for
1119     // commoning purposes).
1120     if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
1121         !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1122       LastStore = nullptr;
1123 
1124     // If this is a read-only call, process it.
1125     if (CallValue::canHandle(Inst)) {
1126       // If we have an available version of this call, and if it is the right
1127       // generation, replace this instruction.
1128       std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
1129       if (InVal.first != nullptr &&
1130           isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1131                               Inst)) {
1132         LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
1133                           << "  to: " << *InVal.first << '\n');
1134         if (!DebugCounter::shouldExecute(CSECounter)) {
1135           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1136           continue;
1137         }
1138         if (!Inst->use_empty())
1139           Inst->replaceAllUsesWith(InVal.first);
1140         removeMSSA(Inst);
1141         Inst->eraseFromParent();
1142         Changed = true;
1143         ++NumCSECall;
1144         continue;
1145       }
1146 
1147       // Otherwise, remember that we have this instruction.
1148       AvailableCalls.insert(
1149           Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1150       continue;
1151     }
1152 
1153     // A release fence requires that all stores complete before it, but does
1154     // not prevent the reordering of following loads 'before' the fence.  As a
1155     // result, we don't need to consider it as writing to memory and don't need
1156     // to advance the generation.  We do need to prevent DSE across the fence,
1157     // but that's handled above.
1158     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1159       if (FI->getOrdering() == AtomicOrdering::Release) {
1160         assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1161         continue;
1162       }
1163 
1164     // write back DSE - If we write back the same value we just loaded from
1165     // the same location and haven't passed any intervening writes or ordering
1166     // operations, we can remove the write.  The primary benefit is in allowing
1167     // the available load table to remain valid and value forward past where
1168     // the store originally was.
1169     if (MemInst.isValid() && MemInst.isStore()) {
1170       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1171       if (InVal.DefInst &&
1172           InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1173           InVal.MatchingId == MemInst.getMatchingId() &&
1174           // We don't yet handle removing stores with ordering of any kind.
1175           !MemInst.isVolatile() && MemInst.isUnordered() &&
1176           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1177            isSameMemGeneration(InVal.Generation, CurrentGeneration,
1178                                InVal.DefInst, Inst))) {
1179         // It is okay to have a LastStore to a different pointer here if MemorySSA
1180         // tells us that the load and store are from the same memory generation.
1181         // In that case, LastStore should keep its present value since we're
1182         // removing the current store.
1183         assert((!LastStore ||
1184                 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1185                     MemInst.getPointerOperand() ||
1186                 MSSA) &&
1187                "can't have an intervening store if not using MemorySSA!");
1188         LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1189         if (!DebugCounter::shouldExecute(CSECounter)) {
1190           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1191           continue;
1192         }
1193         removeMSSA(Inst);
1194         Inst->eraseFromParent();
1195         Changed = true;
1196         ++NumDSE;
1197         // We can avoid incrementing the generation count since we were able
1198         // to eliminate this store.
1199         continue;
1200       }
1201     }
1202 
1203     // Okay, this isn't something we can CSE at all.  Check to see if it is
1204     // something that could modify memory.  If so, our available memory values
1205     // cannot be used so bump the generation count.
1206     if (Inst->mayWriteToMemory()) {
1207       ++CurrentGeneration;
1208 
1209       if (MemInst.isValid() && MemInst.isStore()) {
1210         // We do a trivial form of DSE if there are two stores to the same
1211         // location with no intervening loads.  Delete the earlier store.
1212         // At the moment, we don't remove ordered stores, but do remove
1213         // unordered atomic stores.  There's no special requirement (for
1214         // unordered atomics) about removing atomic stores only in favor of
1215         // other atomic stores since we were going to execute the non-atomic
1216         // one anyway and the atomic one might never have become visible.
1217         if (LastStore) {
1218           ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1219           assert(LastStoreMemInst.isUnordered() &&
1220                  !LastStoreMemInst.isVolatile() &&
1221                  "Violated invariant");
1222           if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1223             LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1224                               << "  due to: " << *Inst << '\n');
1225             if (!DebugCounter::shouldExecute(CSECounter)) {
1226               LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1227             } else {
1228               removeMSSA(LastStore);
1229               LastStore->eraseFromParent();
1230               Changed = true;
1231               ++NumDSE;
1232               LastStore = nullptr;
1233             }
1234           }
1235           // fallthrough - we can exploit information about this store
1236         }
1237 
1238         // Okay, we just invalidated anything we knew about loaded values.  Try
1239         // to salvage *something* by remembering that the stored value is a live
1240         // version of the pointer.  It is safe to forward from volatile stores
1241         // to non-volatile loads, so we don't have to check for volatility of
1242         // the store.
1243         AvailableLoads.insert(
1244             MemInst.getPointerOperand(),
1245             LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1246                       MemInst.isAtomic()));
1247 
1248         // Remember that this was the last unordered store we saw for DSE. We
1249         // don't yet handle DSE on ordered or volatile stores since we don't
1250         // have a good way to model the ordering requirement for following
1251         // passes  once the store is removed.  We could insert a fence, but
1252         // since fences are slightly stronger than stores in their ordering,
1253         // it's not clear this is a profitable transform. Another option would
1254         // be to merge the ordering with that of the post dominating store.
1255         if (MemInst.isUnordered() && !MemInst.isVolatile())
1256           LastStore = Inst;
1257         else
1258           LastStore = nullptr;
1259       }
1260     }
1261   }
1262 
1263   return Changed;
1264 }
1265 
1266 bool EarlyCSE::run() {
1267   // Note, deque is being used here because there is significant performance
1268   // gains over vector when the container becomes very large due to the
1269   // specific access patterns. For more information see the mailing list
1270   // discussion on this:
1271   // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1272   std::deque<StackNode *> nodesToProcess;
1273 
1274   bool Changed = false;
1275 
1276   // Process the root node.
1277   nodesToProcess.push_back(new StackNode(
1278       AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1279       CurrentGeneration, DT.getRootNode(),
1280       DT.getRootNode()->begin(), DT.getRootNode()->end()));
1281 
1282   assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1283 
1284   // Process the stack.
1285   while (!nodesToProcess.empty()) {
1286     // Grab the first item off the stack. Set the current generation, remove
1287     // the node from the stack, and process it.
1288     StackNode *NodeToProcess = nodesToProcess.back();
1289 
1290     // Initialize class members.
1291     CurrentGeneration = NodeToProcess->currentGeneration();
1292 
1293     // Check if the node needs to be processed.
1294     if (!NodeToProcess->isProcessed()) {
1295       // Process the node.
1296       Changed |= processNode(NodeToProcess->node());
1297       NodeToProcess->childGeneration(CurrentGeneration);
1298       NodeToProcess->process();
1299     } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1300       // Push the next child onto the stack.
1301       DomTreeNode *child = NodeToProcess->nextChild();
1302       nodesToProcess.push_back(
1303           new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1304                         AvailableCalls, NodeToProcess->childGeneration(),
1305                         child, child->begin(), child->end()));
1306     } else {
1307       // It has been processed, and there are no more children to process,
1308       // so delete it and pop it off the stack.
1309       delete NodeToProcess;
1310       nodesToProcess.pop_back();
1311     }
1312   } // while (!nodes...)
1313 
1314   return Changed;
1315 }
1316 
1317 PreservedAnalyses EarlyCSEPass::run(Function &F,
1318                                     FunctionAnalysisManager &AM) {
1319   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1320   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1321   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1322   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1323   auto *MSSA =
1324       UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1325 
1326   EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1327 
1328   if (!CSE.run())
1329     return PreservedAnalyses::all();
1330 
1331   PreservedAnalyses PA;
1332   PA.preserveSet<CFGAnalyses>();
1333   PA.preserve<GlobalsAA>();
1334   if (UseMemorySSA)
1335     PA.preserve<MemorySSAAnalysis>();
1336   return PA;
1337 }
1338 
1339 namespace {
1340 
1341 /// A simple and fast domtree-based CSE pass.
1342 ///
1343 /// This pass does a simple depth-first walk over the dominator tree,
1344 /// eliminating trivially redundant instructions and using instsimplify to
1345 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1346 /// cases so that instcombine and other passes are more effective. It is
1347 /// expected that a later pass of GVN will catch the interesting/hard cases.
1348 template<bool UseMemorySSA>
1349 class EarlyCSELegacyCommonPass : public FunctionPass {
1350 public:
1351   static char ID;
1352 
1353   EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1354     if (UseMemorySSA)
1355       initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1356     else
1357       initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1358   }
1359 
1360   bool runOnFunction(Function &F) override {
1361     if (skipFunction(F))
1362       return false;
1363 
1364     auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1365     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1366     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1367     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1368     auto *MSSA =
1369         UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1370 
1371     EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1372 
1373     return CSE.run();
1374   }
1375 
1376   void getAnalysisUsage(AnalysisUsage &AU) const override {
1377     AU.addRequired<AssumptionCacheTracker>();
1378     AU.addRequired<DominatorTreeWrapperPass>();
1379     AU.addRequired<TargetLibraryInfoWrapperPass>();
1380     AU.addRequired<TargetTransformInfoWrapperPass>();
1381     if (UseMemorySSA) {
1382       AU.addRequired<MemorySSAWrapperPass>();
1383       AU.addPreserved<MemorySSAWrapperPass>();
1384     }
1385     AU.addPreserved<GlobalsAAWrapperPass>();
1386     AU.addPreserved<AAResultsWrapperPass>();
1387     AU.setPreservesCFG();
1388   }
1389 };
1390 
1391 } // end anonymous namespace
1392 
1393 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1394 
1395 template<>
1396 char EarlyCSELegacyPass::ID = 0;
1397 
1398 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1399                       false)
1400 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1401 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1402 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1403 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1404 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1405 
1406 using EarlyCSEMemSSALegacyPass =
1407     EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1408 
1409 template<>
1410 char EarlyCSEMemSSALegacyPass::ID = 0;
1411 
1412 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1413   if (UseMemorySSA)
1414     return new EarlyCSEMemSSALegacyPass();
1415   else
1416     return new EarlyCSELegacyPass();
1417 }
1418 
1419 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1420                       "Early CSE w/ MemorySSA", false, false)
1421 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1422 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1423 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1424 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1425 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1426 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1427                     "Early CSE w/ MemorySSA", false, false)
1428