xref: /llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp (revision 3bc6e2a7aa3853b06045c42e81af094647c48676)
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs a simple dominator tree walk that eliminates trivially
10 // redundant instructions.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/Scalar/EarlyCSE.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/ScopedHashTable.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/GuardUtils.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/PassManager.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/DebugCounter.h"
54 #include "llvm/Support/RecyclingAllocator.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Scalar.h"
57 #include "llvm/Transforms/Utils/GuardUtils.h"
58 #include <cassert>
59 #include <deque>
60 #include <memory>
61 #include <utility>
62 
63 using namespace llvm;
64 using namespace llvm::PatternMatch;
65 
66 #define DEBUG_TYPE "early-cse"
67 
68 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
69 STATISTIC(NumCSE,      "Number of instructions CSE'd");
70 STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
71 STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
72 STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
73 STATISTIC(NumDSE,      "Number of trivial dead stores removed");
74 
75 DEBUG_COUNTER(CSECounter, "early-cse",
76               "Controls which instructions are removed");
77 
78 static cl::opt<unsigned> EarlyCSEMssaOptCap(
79     "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden,
80     cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
81              "for faster compile. Caps the MemorySSA clobbering calls."));
82 
83 static cl::opt<bool> EarlyCSEDebugHash(
84     "earlycse-debug-hash", cl::init(false), cl::Hidden,
85     cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
86              "function is well-behaved w.r.t. its isEqual predicate"));
87 
88 //===----------------------------------------------------------------------===//
89 // SimpleValue
90 //===----------------------------------------------------------------------===//
91 
92 namespace {
93 
94 /// Struct representing the available values in the scoped hash table.
95 struct SimpleValue {
96   Instruction *Inst;
97 
98   SimpleValue(Instruction *I) : Inst(I) {
99     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
100   }
101 
102   bool isSentinel() const {
103     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
104            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
105   }
106 
107   static bool canHandle(Instruction *Inst) {
108     // This can only handle non-void readnone functions.
109     if (CallInst *CI = dyn_cast<CallInst>(Inst))
110       return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
111     return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
112            isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
113            isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
114            isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
115            isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
116   }
117 };
118 
119 } // end anonymous namespace
120 
121 namespace llvm {
122 
123 template <> struct DenseMapInfo<SimpleValue> {
124   static inline SimpleValue getEmptyKey() {
125     return DenseMapInfo<Instruction *>::getEmptyKey();
126   }
127 
128   static inline SimpleValue getTombstoneKey() {
129     return DenseMapInfo<Instruction *>::getTombstoneKey();
130   }
131 
132   static unsigned getHashValue(SimpleValue Val);
133   static bool isEqual(SimpleValue LHS, SimpleValue RHS);
134 };
135 
136 } // end namespace llvm
137 
138 /// Match a 'select' including an optional 'not's of the condition.
139 static bool matchSelectWithOptionalNotCond(Value *V, Value *&Cond, Value *&A,
140                                            Value *&B,
141                                            SelectPatternFlavor &Flavor) {
142   // Return false if V is not even a select.
143   if (!match(V, m_Select(m_Value(Cond), m_Value(A), m_Value(B))))
144     return false;
145 
146   // Look through a 'not' of the condition operand by swapping A/B.
147   Value *CondNot;
148   if (match(Cond, m_Not(m_Value(CondNot)))) {
149     Cond = CondNot;
150     std::swap(A, B);
151   }
152 
153   // Set flavor if we find a match, or set it to unknown otherwise; in
154   // either case, return true to indicate that this is a select we can
155   // process.
156   if (auto *CmpI = dyn_cast<ICmpInst>(Cond))
157     Flavor = matchDecomposedSelectPattern(CmpI, A, B, A, B).Flavor;
158   else
159     Flavor = SPF_UNKNOWN;
160 
161   return true;
162 }
163 
164 static unsigned getHashValueImpl(SimpleValue Val) {
165   Instruction *Inst = Val.Inst;
166   // Hash in all of the operands as pointers.
167   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
168     Value *LHS = BinOp->getOperand(0);
169     Value *RHS = BinOp->getOperand(1);
170     if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
171       std::swap(LHS, RHS);
172 
173     return hash_combine(BinOp->getOpcode(), LHS, RHS);
174   }
175 
176   if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
177     Value *LHS = CI->getOperand(0);
178     Value *RHS = CI->getOperand(1);
179     CmpInst::Predicate Pred = CI->getPredicate();
180     if (Inst->getOperand(0) > Inst->getOperand(1)) {
181       std::swap(LHS, RHS);
182       Pred = CI->getSwappedPredicate();
183     }
184     return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
185   }
186 
187   // Hash general selects to allow matching commuted true/false operands.
188   SelectPatternFlavor SPF;
189   Value *Cond, *A, *B;
190   if (matchSelectWithOptionalNotCond(Inst, Cond, A, B, SPF)) {
191     // Hash min/max/abs (cmp + select) to allow for commuted operands.
192     // Min/max may also have non-canonical compare predicate (eg, the compare for
193     // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
194     // compare.
195     // TODO: We should also detect FP min/max.
196     if (SPF == SPF_SMIN || SPF == SPF_SMAX ||
197         SPF == SPF_UMIN || SPF == SPF_UMAX) {
198       if (A > B)
199         std::swap(A, B);
200       return hash_combine(Inst->getOpcode(), SPF, A, B);
201     }
202     if (SPF == SPF_ABS || SPF == SPF_NABS) {
203       // ABS/NABS always puts the input in A and its negation in B.
204       return hash_combine(Inst->getOpcode(), SPF, A, B);
205     }
206 
207     // Hash general selects to allow matching commuted true/false operands.
208 
209     // If we do not have a compare as the condition, just hash in the condition.
210     CmpInst::Predicate Pred;
211     Value *X, *Y;
212     if (!match(Cond, m_Cmp(Pred, m_Value(X), m_Value(Y))))
213       return hash_combine(Inst->getOpcode(), Cond, A, B);
214 
215     // Similar to cmp normalization (above) - canonicalize the predicate value:
216     // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
217     if (CmpInst::getInversePredicate(Pred) < Pred) {
218       Pred = CmpInst::getInversePredicate(Pred);
219       std::swap(A, B);
220     }
221     return hash_combine(Inst->getOpcode(), Pred, X, Y, A, B);
222   }
223 
224   if (CastInst *CI = dyn_cast<CastInst>(Inst))
225     return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
226 
227   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
228     return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
229                         hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
230 
231   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
232     return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
233                         IVI->getOperand(1),
234                         hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
235 
236   assert((isa<CallInst>(Inst) || isa<GetElementPtrInst>(Inst) ||
237           isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
238           isa<ShuffleVectorInst>(Inst)) &&
239          "Invalid/unknown instruction");
240 
241   // Mix in the opcode.
242   return hash_combine(
243       Inst->getOpcode(),
244       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
245 }
246 
247 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
248 #ifndef NDEBUG
249   // If -earlycse-debug-hash was specified, return a constant -- this
250   // will force all hashing to collide, so we'll exhaustively search
251   // the table for a match, and the assertion in isEqual will fire if
252   // there's a bug causing equal keys to hash differently.
253   if (EarlyCSEDebugHash)
254     return 0;
255 #endif
256   return getHashValueImpl(Val);
257 }
258 
259 static bool isEqualImpl(SimpleValue LHS, SimpleValue RHS) {
260   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
261 
262   if (LHS.isSentinel() || RHS.isSentinel())
263     return LHSI == RHSI;
264 
265   if (LHSI->getOpcode() != RHSI->getOpcode())
266     return false;
267   if (LHSI->isIdenticalToWhenDefined(RHSI))
268     return true;
269 
270   // If we're not strictly identical, we still might be a commutable instruction
271   if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
272     if (!LHSBinOp->isCommutative())
273       return false;
274 
275     assert(isa<BinaryOperator>(RHSI) &&
276            "same opcode, but different instruction type?");
277     BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
278 
279     // Commuted equality
280     return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
281            LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
282   }
283   if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
284     assert(isa<CmpInst>(RHSI) &&
285            "same opcode, but different instruction type?");
286     CmpInst *RHSCmp = cast<CmpInst>(RHSI);
287     // Commuted equality
288     return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
289            LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
290            LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
291   }
292 
293   // Min/max/abs can occur with commuted operands, non-canonical predicates,
294   // and/or non-canonical operands.
295   // Selects can be non-trivially equivalent via inverted conditions and swaps.
296   SelectPatternFlavor LSPF, RSPF;
297   Value *CondL, *CondR, *LHSA, *RHSA, *LHSB, *RHSB;
298   if (matchSelectWithOptionalNotCond(LHSI, CondL, LHSA, LHSB, LSPF) &&
299       matchSelectWithOptionalNotCond(RHSI, CondR, RHSA, RHSB, RSPF)) {
300     if (LSPF == RSPF) {
301       // TODO: We should also detect FP min/max.
302       if (LSPF == SPF_SMIN || LSPF == SPF_SMAX ||
303           LSPF == SPF_UMIN || LSPF == SPF_UMAX)
304         return ((LHSA == RHSA && LHSB == RHSB) ||
305                 (LHSA == RHSB && LHSB == RHSA));
306 
307       if (LSPF == SPF_ABS || LSPF == SPF_NABS) {
308         // Abs results are placed in a defined order by matchSelectPattern.
309         return LHSA == RHSA && LHSB == RHSB;
310       }
311 
312       // select Cond, A, B <--> select not(Cond), B, A
313       if (CondL == CondR && LHSA == RHSA && LHSB == RHSB)
314         return true;
315     }
316 
317     // If the true/false operands are swapped and the conditions are compares
318     // with inverted predicates, the selects are equal:
319     // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
320     //
321     // This also handles patterns with a double-negation in the sense of not +
322     // inverse, because we looked through a 'not' in the matching function and
323     // swapped A/B:
324     // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
325     //
326     // This intentionally does NOT handle patterns with a double-negation in
327     // the sense of not + not, because doing so could result in values
328     // comparing
329     // as equal that hash differently in the min/max/abs cases like:
330     // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
331     //   ^ hashes as min                  ^ would not hash as min
332     // In the context of the EarlyCSE pass, however, such cases never reach
333     // this code, as we simplify the double-negation before hashing the second
334     // select (and so still succeed at CSEing them).
335     if (LHSA == RHSB && LHSB == RHSA) {
336       CmpInst::Predicate PredL, PredR;
337       Value *X, *Y;
338       if (match(CondL, m_Cmp(PredL, m_Value(X), m_Value(Y))) &&
339           match(CondR, m_Cmp(PredR, m_Specific(X), m_Specific(Y))) &&
340           CmpInst::getInversePredicate(PredL) == PredR)
341         return true;
342     }
343   }
344 
345   return false;
346 }
347 
348 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
349   // These comparisons are nontrivial, so assert that equality implies
350   // hash equality (DenseMap demands this as an invariant).
351   bool Result = isEqualImpl(LHS, RHS);
352   assert(!Result || (LHS.isSentinel() && LHS.Inst == RHS.Inst) ||
353          getHashValueImpl(LHS) == getHashValueImpl(RHS));
354   return Result;
355 }
356 
357 //===----------------------------------------------------------------------===//
358 // CallValue
359 //===----------------------------------------------------------------------===//
360 
361 namespace {
362 
363 /// Struct representing the available call values in the scoped hash
364 /// table.
365 struct CallValue {
366   Instruction *Inst;
367 
368   CallValue(Instruction *I) : Inst(I) {
369     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
370   }
371 
372   bool isSentinel() const {
373     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
374            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
375   }
376 
377   static bool canHandle(Instruction *Inst) {
378     // Don't value number anything that returns void.
379     if (Inst->getType()->isVoidTy())
380       return false;
381 
382     CallInst *CI = dyn_cast<CallInst>(Inst);
383     if (!CI || !CI->onlyReadsMemory())
384       return false;
385     return true;
386   }
387 };
388 
389 } // end anonymous namespace
390 
391 namespace llvm {
392 
393 template <> struct DenseMapInfo<CallValue> {
394   static inline CallValue getEmptyKey() {
395     return DenseMapInfo<Instruction *>::getEmptyKey();
396   }
397 
398   static inline CallValue getTombstoneKey() {
399     return DenseMapInfo<Instruction *>::getTombstoneKey();
400   }
401 
402   static unsigned getHashValue(CallValue Val);
403   static bool isEqual(CallValue LHS, CallValue RHS);
404 };
405 
406 } // end namespace llvm
407 
408 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
409   Instruction *Inst = Val.Inst;
410   // Hash all of the operands as pointers and mix in the opcode.
411   return hash_combine(
412       Inst->getOpcode(),
413       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
414 }
415 
416 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
417   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
418   if (LHS.isSentinel() || RHS.isSentinel())
419     return LHSI == RHSI;
420   return LHSI->isIdenticalTo(RHSI);
421 }
422 
423 //===----------------------------------------------------------------------===//
424 // EarlyCSE implementation
425 //===----------------------------------------------------------------------===//
426 
427 namespace {
428 
429 /// A simple and fast domtree-based CSE pass.
430 ///
431 /// This pass does a simple depth-first walk over the dominator tree,
432 /// eliminating trivially redundant instructions and using instsimplify to
433 /// canonicalize things as it goes. It is intended to be fast and catch obvious
434 /// cases so that instcombine and other passes are more effective. It is
435 /// expected that a later pass of GVN will catch the interesting/hard cases.
436 class EarlyCSE {
437 public:
438   const TargetLibraryInfo &TLI;
439   const TargetTransformInfo &TTI;
440   DominatorTree &DT;
441   AssumptionCache &AC;
442   const SimplifyQuery SQ;
443   MemorySSA *MSSA;
444   std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
445 
446   using AllocatorTy =
447       RecyclingAllocator<BumpPtrAllocator,
448                          ScopedHashTableVal<SimpleValue, Value *>>;
449   using ScopedHTType =
450       ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
451                       AllocatorTy>;
452 
453   /// A scoped hash table of the current values of all of our simple
454   /// scalar expressions.
455   ///
456   /// As we walk down the domtree, we look to see if instructions are in this:
457   /// if so, we replace them with what we find, otherwise we insert them so
458   /// that dominated values can succeed in their lookup.
459   ScopedHTType AvailableValues;
460 
461   /// A scoped hash table of the current values of previously encountered
462   /// memory locations.
463   ///
464   /// This allows us to get efficient access to dominating loads or stores when
465   /// we have a fully redundant load.  In addition to the most recent load, we
466   /// keep track of a generation count of the read, which is compared against
467   /// the current generation count.  The current generation count is incremented
468   /// after every possibly writing memory operation, which ensures that we only
469   /// CSE loads with other loads that have no intervening store.  Ordering
470   /// events (such as fences or atomic instructions) increment the generation
471   /// count as well; essentially, we model these as writes to all possible
472   /// locations.  Note that atomic and/or volatile loads and stores can be
473   /// present the table; it is the responsibility of the consumer to inspect
474   /// the atomicity/volatility if needed.
475   struct LoadValue {
476     Instruction *DefInst = nullptr;
477     unsigned Generation = 0;
478     int MatchingId = -1;
479     bool IsAtomic = false;
480 
481     LoadValue() = default;
482     LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
483               bool IsAtomic)
484         : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
485           IsAtomic(IsAtomic) {}
486   };
487 
488   using LoadMapAllocator =
489       RecyclingAllocator<BumpPtrAllocator,
490                          ScopedHashTableVal<Value *, LoadValue>>;
491   using LoadHTType =
492       ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
493                       LoadMapAllocator>;
494 
495   LoadHTType AvailableLoads;
496 
497   // A scoped hash table mapping memory locations (represented as typed
498   // addresses) to generation numbers at which that memory location became
499   // (henceforth indefinitely) invariant.
500   using InvariantMapAllocator =
501       RecyclingAllocator<BumpPtrAllocator,
502                          ScopedHashTableVal<MemoryLocation, unsigned>>;
503   using InvariantHTType =
504       ScopedHashTable<MemoryLocation, unsigned, DenseMapInfo<MemoryLocation>,
505                       InvariantMapAllocator>;
506   InvariantHTType AvailableInvariants;
507 
508   /// A scoped hash table of the current values of read-only call
509   /// values.
510   ///
511   /// It uses the same generation count as loads.
512   using CallHTType =
513       ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
514   CallHTType AvailableCalls;
515 
516   /// This is the current generation of the memory value.
517   unsigned CurrentGeneration = 0;
518 
519   /// Set up the EarlyCSE runner for a particular function.
520   EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
521            const TargetTransformInfo &TTI, DominatorTree &DT,
522            AssumptionCache &AC, MemorySSA *MSSA)
523       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
524         MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
525 
526   bool run();
527 
528 private:
529   unsigned ClobberCounter = 0;
530   // Almost a POD, but needs to call the constructors for the scoped hash
531   // tables so that a new scope gets pushed on. These are RAII so that the
532   // scope gets popped when the NodeScope is destroyed.
533   class NodeScope {
534   public:
535     NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
536               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls)
537       : Scope(AvailableValues), LoadScope(AvailableLoads),
538         InvariantScope(AvailableInvariants), CallScope(AvailableCalls) {}
539     NodeScope(const NodeScope &) = delete;
540     NodeScope &operator=(const NodeScope &) = delete;
541 
542   private:
543     ScopedHTType::ScopeTy Scope;
544     LoadHTType::ScopeTy LoadScope;
545     InvariantHTType::ScopeTy InvariantScope;
546     CallHTType::ScopeTy CallScope;
547   };
548 
549   // Contains all the needed information to create a stack for doing a depth
550   // first traversal of the tree. This includes scopes for values, loads, and
551   // calls as well as the generation. There is a child iterator so that the
552   // children do not need to be store separately.
553   class StackNode {
554   public:
555     StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
556               InvariantHTType &AvailableInvariants, CallHTType &AvailableCalls,
557               unsigned cg, DomTreeNode *n, DomTreeNode::iterator child,
558               DomTreeNode::iterator end)
559         : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
560           EndIter(end),
561           Scopes(AvailableValues, AvailableLoads, AvailableInvariants,
562                  AvailableCalls)
563           {}
564     StackNode(const StackNode &) = delete;
565     StackNode &operator=(const StackNode &) = delete;
566 
567     // Accessors.
568     unsigned currentGeneration() { return CurrentGeneration; }
569     unsigned childGeneration() { return ChildGeneration; }
570     void childGeneration(unsigned generation) { ChildGeneration = generation; }
571     DomTreeNode *node() { return Node; }
572     DomTreeNode::iterator childIter() { return ChildIter; }
573 
574     DomTreeNode *nextChild() {
575       DomTreeNode *child = *ChildIter;
576       ++ChildIter;
577       return child;
578     }
579 
580     DomTreeNode::iterator end() { return EndIter; }
581     bool isProcessed() { return Processed; }
582     void process() { Processed = true; }
583 
584   private:
585     unsigned CurrentGeneration;
586     unsigned ChildGeneration;
587     DomTreeNode *Node;
588     DomTreeNode::iterator ChildIter;
589     DomTreeNode::iterator EndIter;
590     NodeScope Scopes;
591     bool Processed = false;
592   };
593 
594   /// Wrapper class to handle memory instructions, including loads,
595   /// stores and intrinsic loads and stores defined by the target.
596   class ParseMemoryInst {
597   public:
598     ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
599       : Inst(Inst) {
600       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
601         if (TTI.getTgtMemIntrinsic(II, Info))
602           IsTargetMemInst = true;
603     }
604 
605     bool isLoad() const {
606       if (IsTargetMemInst) return Info.ReadMem;
607       return isa<LoadInst>(Inst);
608     }
609 
610     bool isStore() const {
611       if (IsTargetMemInst) return Info.WriteMem;
612       return isa<StoreInst>(Inst);
613     }
614 
615     bool isAtomic() const {
616       if (IsTargetMemInst)
617         return Info.Ordering != AtomicOrdering::NotAtomic;
618       return Inst->isAtomic();
619     }
620 
621     bool isUnordered() const {
622       if (IsTargetMemInst)
623         return Info.isUnordered();
624 
625       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
626         return LI->isUnordered();
627       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
628         return SI->isUnordered();
629       }
630       // Conservative answer
631       return !Inst->isAtomic();
632     }
633 
634     bool isVolatile() const {
635       if (IsTargetMemInst)
636         return Info.IsVolatile;
637 
638       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
639         return LI->isVolatile();
640       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
641         return SI->isVolatile();
642       }
643       // Conservative answer
644       return true;
645     }
646 
647     bool isInvariantLoad() const {
648       if (auto *LI = dyn_cast<LoadInst>(Inst))
649         return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
650       return false;
651     }
652 
653     bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
654       return (getPointerOperand() == Inst.getPointerOperand() &&
655               getMatchingId() == Inst.getMatchingId());
656     }
657 
658     bool isValid() const { return getPointerOperand() != nullptr; }
659 
660     // For regular (non-intrinsic) loads/stores, this is set to -1. For
661     // intrinsic loads/stores, the id is retrieved from the corresponding
662     // field in the MemIntrinsicInfo structure.  That field contains
663     // non-negative values only.
664     int getMatchingId() const {
665       if (IsTargetMemInst) return Info.MatchingId;
666       return -1;
667     }
668 
669     Value *getPointerOperand() const {
670       if (IsTargetMemInst) return Info.PtrVal;
671       return getLoadStorePointerOperand(Inst);
672     }
673 
674     bool mayReadFromMemory() const {
675       if (IsTargetMemInst) return Info.ReadMem;
676       return Inst->mayReadFromMemory();
677     }
678 
679     bool mayWriteToMemory() const {
680       if (IsTargetMemInst) return Info.WriteMem;
681       return Inst->mayWriteToMemory();
682     }
683 
684   private:
685     bool IsTargetMemInst = false;
686     MemIntrinsicInfo Info;
687     Instruction *Inst;
688   };
689 
690   bool processNode(DomTreeNode *Node);
691 
692   bool handleBranchCondition(Instruction *CondInst, const BranchInst *BI,
693                              const BasicBlock *BB, const BasicBlock *Pred);
694 
695   Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
696     if (auto *LI = dyn_cast<LoadInst>(Inst))
697       return LI;
698     if (auto *SI = dyn_cast<StoreInst>(Inst))
699       return SI->getValueOperand();
700     assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
701     return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
702                                                  ExpectedType);
703   }
704 
705   /// Return true if the instruction is known to only operate on memory
706   /// provably invariant in the given "generation".
707   bool isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt);
708 
709   bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
710                            Instruction *EarlierInst, Instruction *LaterInst);
711 
712   void removeMSSA(Instruction *Inst) {
713     if (!MSSA)
714       return;
715     if (VerifyMemorySSA)
716       MSSA->verifyMemorySSA();
717     // Removing a store here can leave MemorySSA in an unoptimized state by
718     // creating MemoryPhis that have identical arguments and by creating
719     // MemoryUses whose defining access is not an actual clobber. The phi case
720     // is handled by MemorySSA when passing OptimizePhis = true to
721     // removeMemoryAccess.  The non-optimized MemoryUse case is lazily updated
722     // by MemorySSA's getClobberingMemoryAccess.
723     MSSAUpdater->removeMemoryAccess(Inst, true);
724   }
725 };
726 
727 } // end anonymous namespace
728 
729 /// Determine if the memory referenced by LaterInst is from the same heap
730 /// version as EarlierInst.
731 /// This is currently called in two scenarios:
732 ///
733 ///   load p
734 ///   ...
735 ///   load p
736 ///
737 /// and
738 ///
739 ///   x = load p
740 ///   ...
741 ///   store x, p
742 ///
743 /// in both cases we want to verify that there are no possible writes to the
744 /// memory referenced by p between the earlier and later instruction.
745 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
746                                    unsigned LaterGeneration,
747                                    Instruction *EarlierInst,
748                                    Instruction *LaterInst) {
749   // Check the simple memory generation tracking first.
750   if (EarlierGeneration == LaterGeneration)
751     return true;
752 
753   if (!MSSA)
754     return false;
755 
756   // If MemorySSA has determined that one of EarlierInst or LaterInst does not
757   // read/write memory, then we can safely return true here.
758   // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
759   // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
760   // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
761   // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
762   // with the default optimization pipeline.
763   auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
764   if (!EarlierMA)
765     return true;
766   auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
767   if (!LaterMA)
768     return true;
769 
770   // Since we know LaterDef dominates LaterInst and EarlierInst dominates
771   // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
772   // EarlierInst and LaterInst and neither can any other write that potentially
773   // clobbers LaterInst.
774   MemoryAccess *LaterDef;
775   if (ClobberCounter < EarlyCSEMssaOptCap) {
776     LaterDef = MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
777     ClobberCounter++;
778   } else
779     LaterDef = LaterMA->getDefiningAccess();
780 
781   return MSSA->dominates(LaterDef, EarlierMA);
782 }
783 
784 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction *I, unsigned GenAt) {
785   // A location loaded from with an invariant_load is assumed to *never* change
786   // within the visible scope of the compilation.
787   if (auto *LI = dyn_cast<LoadInst>(I))
788     if (LI->getMetadata(LLVMContext::MD_invariant_load))
789       return true;
790 
791   auto MemLocOpt = MemoryLocation::getOrNone(I);
792   if (!MemLocOpt)
793     // "target" intrinsic forms of loads aren't currently known to
794     // MemoryLocation::get.  TODO
795     return false;
796   MemoryLocation MemLoc = *MemLocOpt;
797   if (!AvailableInvariants.count(MemLoc))
798     return false;
799 
800   // Is the generation at which this became invariant older than the
801   // current one?
802   return AvailableInvariants.lookup(MemLoc) <= GenAt;
803 }
804 
805 bool EarlyCSE::handleBranchCondition(Instruction *CondInst,
806                                      const BranchInst *BI, const BasicBlock *BB,
807                                      const BasicBlock *Pred) {
808   assert(BI->isConditional() && "Should be a conditional branch!");
809   assert(BI->getCondition() == CondInst && "Wrong condition?");
810   assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
811   auto *TorF = (BI->getSuccessor(0) == BB)
812                    ? ConstantInt::getTrue(BB->getContext())
813                    : ConstantInt::getFalse(BB->getContext());
814   auto MatchBinOp = [](Instruction *I, unsigned Opcode) {
815     if (BinaryOperator *BOp = dyn_cast<BinaryOperator>(I))
816       return BOp->getOpcode() == Opcode;
817     return false;
818   };
819   // If the condition is AND operation, we can propagate its operands into the
820   // true branch. If it is OR operation, we can propagate them into the false
821   // branch.
822   unsigned PropagateOpcode =
823       (BI->getSuccessor(0) == BB) ? Instruction::And : Instruction::Or;
824 
825   bool MadeChanges = false;
826   SmallVector<Instruction *, 4> WorkList;
827   SmallPtrSet<Instruction *, 4> Visited;
828   WorkList.push_back(CondInst);
829   while (!WorkList.empty()) {
830     Instruction *Curr = WorkList.pop_back_val();
831 
832     AvailableValues.insert(Curr, TorF);
833     LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
834                       << Curr->getName() << "' as " << *TorF << " in "
835                       << BB->getName() << "\n");
836     if (!DebugCounter::shouldExecute(CSECounter)) {
837       LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
838     } else {
839       // Replace all dominated uses with the known value.
840       if (unsigned Count = replaceDominatedUsesWith(Curr, TorF, DT,
841                                                     BasicBlockEdge(Pred, BB))) {
842         NumCSECVP += Count;
843         MadeChanges = true;
844       }
845     }
846 
847     if (MatchBinOp(Curr, PropagateOpcode))
848       for (auto &Op : cast<BinaryOperator>(Curr)->operands())
849         if (Instruction *OPI = dyn_cast<Instruction>(Op))
850           if (SimpleValue::canHandle(OPI) && Visited.insert(OPI).second)
851             WorkList.push_back(OPI);
852   }
853 
854   return MadeChanges;
855 }
856 
857 bool EarlyCSE::processNode(DomTreeNode *Node) {
858   bool Changed = false;
859   BasicBlock *BB = Node->getBlock();
860 
861   // If this block has a single predecessor, then the predecessor is the parent
862   // of the domtree node and all of the live out memory values are still current
863   // in this block.  If this block has multiple predecessors, then they could
864   // have invalidated the live-out memory values of our parent value.  For now,
865   // just be conservative and invalidate memory if this block has multiple
866   // predecessors.
867   if (!BB->getSinglePredecessor())
868     ++CurrentGeneration;
869 
870   // If this node has a single predecessor which ends in a conditional branch,
871   // we can infer the value of the branch condition given that we took this
872   // path.  We need the single predecessor to ensure there's not another path
873   // which reaches this block where the condition might hold a different
874   // value.  Since we're adding this to the scoped hash table (like any other
875   // def), it will have been popped if we encounter a future merge block.
876   if (BasicBlock *Pred = BB->getSinglePredecessor()) {
877     auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
878     if (BI && BI->isConditional()) {
879       auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
880       if (CondInst && SimpleValue::canHandle(CondInst))
881         Changed |= handleBranchCondition(CondInst, BI, BB, Pred);
882     }
883   }
884 
885   /// LastStore - Keep track of the last non-volatile store that we saw... for
886   /// as long as there in no instruction that reads memory.  If we see a store
887   /// to the same location, we delete the dead store.  This zaps trivial dead
888   /// stores which can occur in bitfield code among other things.
889   Instruction *LastStore = nullptr;
890 
891   // See if any instructions in the block can be eliminated.  If so, do it.  If
892   // not, add them to AvailableValues.
893   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
894     Instruction *Inst = &*I++;
895 
896     // Dead instructions should just be removed.
897     if (isInstructionTriviallyDead(Inst, &TLI)) {
898       LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
899       if (!DebugCounter::shouldExecute(CSECounter)) {
900         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
901         continue;
902       }
903       if (!salvageDebugInfo(*Inst))
904         replaceDbgUsesWithUndef(Inst);
905       removeMSSA(Inst);
906       Inst->eraseFromParent();
907       Changed = true;
908       ++NumSimplify;
909       continue;
910     }
911 
912     // Skip assume intrinsics, they don't really have side effects (although
913     // they're marked as such to ensure preservation of control dependencies),
914     // and this pass will not bother with its removal. However, we should mark
915     // its condition as true for all dominated blocks.
916     if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
917       auto *CondI =
918           dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
919       if (CondI && SimpleValue::canHandle(CondI)) {
920         LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
921                           << '\n');
922         AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
923       } else
924         LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
925       continue;
926     }
927 
928     // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
929     if (match(Inst, m_Intrinsic<Intrinsic::sideeffect>())) {
930       LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst << '\n');
931       continue;
932     }
933 
934     // We can skip all invariant.start intrinsics since they only read memory,
935     // and we can forward values across it. For invariant starts without
936     // invariant ends, we can use the fact that the invariantness never ends to
937     // start a scope in the current generaton which is true for all future
938     // generations.  Also, we dont need to consume the last store since the
939     // semantics of invariant.start allow us to perform   DSE of the last
940     // store, if there was a store following invariant.start. Consider:
941     //
942     // store 30, i8* p
943     // invariant.start(p)
944     // store 40, i8* p
945     // We can DSE the store to 30, since the store 40 to invariant location p
946     // causes undefined behaviour.
947     if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>())) {
948       // If there are any uses, the scope might end.
949       if (!Inst->use_empty())
950         continue;
951       auto *CI = cast<CallInst>(Inst);
952       MemoryLocation MemLoc = MemoryLocation::getForArgument(CI, 1, TLI);
953       // Don't start a scope if we already have a better one pushed
954       if (!AvailableInvariants.count(MemLoc))
955         AvailableInvariants.insert(MemLoc, CurrentGeneration);
956       continue;
957     }
958 
959     if (isGuard(Inst)) {
960       if (auto *CondI =
961               dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
962         if (SimpleValue::canHandle(CondI)) {
963           // Do we already know the actual value of this condition?
964           if (auto *KnownCond = AvailableValues.lookup(CondI)) {
965             // Is the condition known to be true?
966             if (isa<ConstantInt>(KnownCond) &&
967                 cast<ConstantInt>(KnownCond)->isOne()) {
968               LLVM_DEBUG(dbgs()
969                          << "EarlyCSE removing guard: " << *Inst << '\n');
970               removeMSSA(Inst);
971               Inst->eraseFromParent();
972               Changed = true;
973               continue;
974             } else
975               // Use the known value if it wasn't true.
976               cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
977           }
978           // The condition we're on guarding here is true for all dominated
979           // locations.
980           AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
981         }
982       }
983 
984       // Guard intrinsics read all memory, but don't write any memory.
985       // Accordingly, don't update the generation but consume the last store (to
986       // avoid an incorrect DSE).
987       LastStore = nullptr;
988       continue;
989     }
990 
991     // If the instruction can be simplified (e.g. X+0 = X) then replace it with
992     // its simpler value.
993     if (Value *V = SimplifyInstruction(Inst, SQ)) {
994       LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V
995                         << '\n');
996       if (!DebugCounter::shouldExecute(CSECounter)) {
997         LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
998       } else {
999         bool Killed = false;
1000         if (!Inst->use_empty()) {
1001           Inst->replaceAllUsesWith(V);
1002           Changed = true;
1003         }
1004         if (isInstructionTriviallyDead(Inst, &TLI)) {
1005           removeMSSA(Inst);
1006           Inst->eraseFromParent();
1007           Changed = true;
1008           Killed = true;
1009         }
1010         if (Changed)
1011           ++NumSimplify;
1012         if (Killed)
1013           continue;
1014       }
1015     }
1016 
1017     // If this is a simple instruction that we can value number, process it.
1018     if (SimpleValue::canHandle(Inst)) {
1019       // See if the instruction has an available value.  If so, use it.
1020       if (Value *V = AvailableValues.lookup(Inst)) {
1021         LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V
1022                           << '\n');
1023         if (!DebugCounter::shouldExecute(CSECounter)) {
1024           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1025           continue;
1026         }
1027         if (auto *I = dyn_cast<Instruction>(V))
1028           I->andIRFlags(Inst);
1029         Inst->replaceAllUsesWith(V);
1030         removeMSSA(Inst);
1031         Inst->eraseFromParent();
1032         Changed = true;
1033         ++NumCSE;
1034         continue;
1035       }
1036 
1037       // Otherwise, just remember that this value is available.
1038       AvailableValues.insert(Inst, Inst);
1039       continue;
1040     }
1041 
1042     ParseMemoryInst MemInst(Inst, TTI);
1043     // If this is a non-volatile load, process it.
1044     if (MemInst.isValid() && MemInst.isLoad()) {
1045       // (conservatively) we can't peak past the ordering implied by this
1046       // operation, but we can add this load to our set of available values
1047       if (MemInst.isVolatile() || !MemInst.isUnordered()) {
1048         LastStore = nullptr;
1049         ++CurrentGeneration;
1050       }
1051 
1052       if (MemInst.isInvariantLoad()) {
1053         // If we pass an invariant load, we know that memory location is
1054         // indefinitely constant from the moment of first dereferenceability.
1055         // We conservatively treat the invariant_load as that moment.  If we
1056         // pass a invariant load after already establishing a scope, don't
1057         // restart it since we want to preserve the earliest point seen.
1058         auto MemLoc = MemoryLocation::get(Inst);
1059         if (!AvailableInvariants.count(MemLoc))
1060           AvailableInvariants.insert(MemLoc, CurrentGeneration);
1061       }
1062 
1063       // If we have an available version of this load, and if it is the right
1064       // generation or the load is known to be from an invariant location,
1065       // replace this instruction.
1066       //
1067       // If either the dominating load or the current load are invariant, then
1068       // we can assume the current load loads the same value as the dominating
1069       // load.
1070       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1071       if (InVal.DefInst != nullptr &&
1072           InVal.MatchingId == MemInst.getMatchingId() &&
1073           // We don't yet handle removing loads with ordering of any kind.
1074           !MemInst.isVolatile() && MemInst.isUnordered() &&
1075           // We can't replace an atomic load with one which isn't also atomic.
1076           InVal.IsAtomic >= MemInst.isAtomic() &&
1077           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1078            isSameMemGeneration(InVal.Generation, CurrentGeneration,
1079                                InVal.DefInst, Inst))) {
1080         Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
1081         if (Op != nullptr) {
1082           LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
1083                             << "  to: " << *InVal.DefInst << '\n');
1084           if (!DebugCounter::shouldExecute(CSECounter)) {
1085             LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1086             continue;
1087           }
1088           if (!Inst->use_empty())
1089             Inst->replaceAllUsesWith(Op);
1090           removeMSSA(Inst);
1091           Inst->eraseFromParent();
1092           Changed = true;
1093           ++NumCSELoad;
1094           continue;
1095         }
1096       }
1097 
1098       // Otherwise, remember that we have this instruction.
1099       AvailableLoads.insert(
1100           MemInst.getPointerOperand(),
1101           LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1102                     MemInst.isAtomic()));
1103       LastStore = nullptr;
1104       continue;
1105     }
1106 
1107     // If this instruction may read from memory or throw (and potentially read
1108     // from memory in the exception handler), forget LastStore.  Load/store
1109     // intrinsics will indicate both a read and a write to memory.  The target
1110     // may override this (e.g. so that a store intrinsic does not read from
1111     // memory, and thus will be treated the same as a regular store for
1112     // commoning purposes).
1113     if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
1114         !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
1115       LastStore = nullptr;
1116 
1117     // If this is a read-only call, process it.
1118     if (CallValue::canHandle(Inst)) {
1119       // If we have an available version of this call, and if it is the right
1120       // generation, replace this instruction.
1121       std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
1122       if (InVal.first != nullptr &&
1123           isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
1124                               Inst)) {
1125         LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
1126                           << "  to: " << *InVal.first << '\n');
1127         if (!DebugCounter::shouldExecute(CSECounter)) {
1128           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1129           continue;
1130         }
1131         if (!Inst->use_empty())
1132           Inst->replaceAllUsesWith(InVal.first);
1133         removeMSSA(Inst);
1134         Inst->eraseFromParent();
1135         Changed = true;
1136         ++NumCSECall;
1137         continue;
1138       }
1139 
1140       // Otherwise, remember that we have this instruction.
1141       AvailableCalls.insert(
1142           Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
1143       continue;
1144     }
1145 
1146     // A release fence requires that all stores complete before it, but does
1147     // not prevent the reordering of following loads 'before' the fence.  As a
1148     // result, we don't need to consider it as writing to memory and don't need
1149     // to advance the generation.  We do need to prevent DSE across the fence,
1150     // but that's handled above.
1151     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
1152       if (FI->getOrdering() == AtomicOrdering::Release) {
1153         assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
1154         continue;
1155       }
1156 
1157     // write back DSE - If we write back the same value we just loaded from
1158     // the same location and haven't passed any intervening writes or ordering
1159     // operations, we can remove the write.  The primary benefit is in allowing
1160     // the available load table to remain valid and value forward past where
1161     // the store originally was.
1162     if (MemInst.isValid() && MemInst.isStore()) {
1163       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
1164       if (InVal.DefInst &&
1165           InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
1166           InVal.MatchingId == MemInst.getMatchingId() &&
1167           // We don't yet handle removing stores with ordering of any kind.
1168           !MemInst.isVolatile() && MemInst.isUnordered() &&
1169           (isOperatingOnInvariantMemAt(Inst, InVal.Generation) ||
1170            isSameMemGeneration(InVal.Generation, CurrentGeneration,
1171                                InVal.DefInst, Inst))) {
1172         // It is okay to have a LastStore to a different pointer here if MemorySSA
1173         // tells us that the load and store are from the same memory generation.
1174         // In that case, LastStore should keep its present value since we're
1175         // removing the current store.
1176         assert((!LastStore ||
1177                 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
1178                     MemInst.getPointerOperand() ||
1179                 MSSA) &&
1180                "can't have an intervening store if not using MemorySSA!");
1181         LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
1182         if (!DebugCounter::shouldExecute(CSECounter)) {
1183           LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1184           continue;
1185         }
1186         removeMSSA(Inst);
1187         Inst->eraseFromParent();
1188         Changed = true;
1189         ++NumDSE;
1190         // We can avoid incrementing the generation count since we were able
1191         // to eliminate this store.
1192         continue;
1193       }
1194     }
1195 
1196     // Okay, this isn't something we can CSE at all.  Check to see if it is
1197     // something that could modify memory.  If so, our available memory values
1198     // cannot be used so bump the generation count.
1199     if (Inst->mayWriteToMemory()) {
1200       ++CurrentGeneration;
1201 
1202       if (MemInst.isValid() && MemInst.isStore()) {
1203         // We do a trivial form of DSE if there are two stores to the same
1204         // location with no intervening loads.  Delete the earlier store.
1205         // At the moment, we don't remove ordered stores, but do remove
1206         // unordered atomic stores.  There's no special requirement (for
1207         // unordered atomics) about removing atomic stores only in favor of
1208         // other atomic stores since we were going to execute the non-atomic
1209         // one anyway and the atomic one might never have become visible.
1210         if (LastStore) {
1211           ParseMemoryInst LastStoreMemInst(LastStore, TTI);
1212           assert(LastStoreMemInst.isUnordered() &&
1213                  !LastStoreMemInst.isVolatile() &&
1214                  "Violated invariant");
1215           if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
1216             LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1217                               << "  due to: " << *Inst << '\n');
1218             if (!DebugCounter::shouldExecute(CSECounter)) {
1219               LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1220             } else {
1221               removeMSSA(LastStore);
1222               LastStore->eraseFromParent();
1223               Changed = true;
1224               ++NumDSE;
1225               LastStore = nullptr;
1226             }
1227           }
1228           // fallthrough - we can exploit information about this store
1229         }
1230 
1231         // Okay, we just invalidated anything we knew about loaded values.  Try
1232         // to salvage *something* by remembering that the stored value is a live
1233         // version of the pointer.  It is safe to forward from volatile stores
1234         // to non-volatile loads, so we don't have to check for volatility of
1235         // the store.
1236         AvailableLoads.insert(
1237             MemInst.getPointerOperand(),
1238             LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
1239                       MemInst.isAtomic()));
1240 
1241         // Remember that this was the last unordered store we saw for DSE. We
1242         // don't yet handle DSE on ordered or volatile stores since we don't
1243         // have a good way to model the ordering requirement for following
1244         // passes  once the store is removed.  We could insert a fence, but
1245         // since fences are slightly stronger than stores in their ordering,
1246         // it's not clear this is a profitable transform. Another option would
1247         // be to merge the ordering with that of the post dominating store.
1248         if (MemInst.isUnordered() && !MemInst.isVolatile())
1249           LastStore = Inst;
1250         else
1251           LastStore = nullptr;
1252       }
1253     }
1254   }
1255 
1256   return Changed;
1257 }
1258 
1259 bool EarlyCSE::run() {
1260   // Note, deque is being used here because there is significant performance
1261   // gains over vector when the container becomes very large due to the
1262   // specific access patterns. For more information see the mailing list
1263   // discussion on this:
1264   // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1265   std::deque<StackNode *> nodesToProcess;
1266 
1267   bool Changed = false;
1268 
1269   // Process the root node.
1270   nodesToProcess.push_back(new StackNode(
1271       AvailableValues, AvailableLoads, AvailableInvariants, AvailableCalls,
1272       CurrentGeneration, DT.getRootNode(),
1273       DT.getRootNode()->begin(), DT.getRootNode()->end()));
1274 
1275   assert(!CurrentGeneration && "Create a new EarlyCSE instance to rerun it.");
1276 
1277   // Process the stack.
1278   while (!nodesToProcess.empty()) {
1279     // Grab the first item off the stack. Set the current generation, remove
1280     // the node from the stack, and process it.
1281     StackNode *NodeToProcess = nodesToProcess.back();
1282 
1283     // Initialize class members.
1284     CurrentGeneration = NodeToProcess->currentGeneration();
1285 
1286     // Check if the node needs to be processed.
1287     if (!NodeToProcess->isProcessed()) {
1288       // Process the node.
1289       Changed |= processNode(NodeToProcess->node());
1290       NodeToProcess->childGeneration(CurrentGeneration);
1291       NodeToProcess->process();
1292     } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1293       // Push the next child onto the stack.
1294       DomTreeNode *child = NodeToProcess->nextChild();
1295       nodesToProcess.push_back(
1296           new StackNode(AvailableValues, AvailableLoads, AvailableInvariants,
1297                         AvailableCalls, NodeToProcess->childGeneration(),
1298                         child, child->begin(), child->end()));
1299     } else {
1300       // It has been processed, and there are no more children to process,
1301       // so delete it and pop it off the stack.
1302       delete NodeToProcess;
1303       nodesToProcess.pop_back();
1304     }
1305   } // while (!nodes...)
1306 
1307   return Changed;
1308 }
1309 
1310 PreservedAnalyses EarlyCSEPass::run(Function &F,
1311                                     FunctionAnalysisManager &AM) {
1312   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1313   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1314   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1315   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1316   auto *MSSA =
1317       UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1318 
1319   EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1320 
1321   if (!CSE.run())
1322     return PreservedAnalyses::all();
1323 
1324   PreservedAnalyses PA;
1325   PA.preserveSet<CFGAnalyses>();
1326   PA.preserve<GlobalsAA>();
1327   if (UseMemorySSA)
1328     PA.preserve<MemorySSAAnalysis>();
1329   return PA;
1330 }
1331 
1332 namespace {
1333 
1334 /// A simple and fast domtree-based CSE pass.
1335 ///
1336 /// This pass does a simple depth-first walk over the dominator tree,
1337 /// eliminating trivially redundant instructions and using instsimplify to
1338 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1339 /// cases so that instcombine and other passes are more effective. It is
1340 /// expected that a later pass of GVN will catch the interesting/hard cases.
1341 template<bool UseMemorySSA>
1342 class EarlyCSELegacyCommonPass : public FunctionPass {
1343 public:
1344   static char ID;
1345 
1346   EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1347     if (UseMemorySSA)
1348       initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1349     else
1350       initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1351   }
1352 
1353   bool runOnFunction(Function &F) override {
1354     if (skipFunction(F))
1355       return false;
1356 
1357     auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1358     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1359     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1360     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1361     auto *MSSA =
1362         UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1363 
1364     EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1365 
1366     return CSE.run();
1367   }
1368 
1369   void getAnalysisUsage(AnalysisUsage &AU) const override {
1370     AU.addRequired<AssumptionCacheTracker>();
1371     AU.addRequired<DominatorTreeWrapperPass>();
1372     AU.addRequired<TargetLibraryInfoWrapperPass>();
1373     AU.addRequired<TargetTransformInfoWrapperPass>();
1374     if (UseMemorySSA) {
1375       AU.addRequired<MemorySSAWrapperPass>();
1376       AU.addPreserved<MemorySSAWrapperPass>();
1377     }
1378     AU.addPreserved<GlobalsAAWrapperPass>();
1379     AU.setPreservesCFG();
1380   }
1381 };
1382 
1383 } // end anonymous namespace
1384 
1385 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1386 
1387 template<>
1388 char EarlyCSELegacyPass::ID = 0;
1389 
1390 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1391                       false)
1392 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1393 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1394 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1395 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1396 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1397 
1398 using EarlyCSEMemSSALegacyPass =
1399     EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1400 
1401 template<>
1402 char EarlyCSEMemSSALegacyPass::ID = 0;
1403 
1404 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1405   if (UseMemorySSA)
1406     return new EarlyCSEMemSSALegacyPass();
1407   else
1408     return new EarlyCSELegacyPass();
1409 }
1410 
1411 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1412                       "Early CSE w/ MemorySSA", false, false)
1413 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1414 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1415 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1416 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1417 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1418 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1419                     "Early CSE w/ MemorySSA", false, false)
1420