xref: /llvm-project/llvm/lib/Transforms/Scalar/EarlyCSE.cpp (revision 3b87939604dbbac3dbe7a60927fb53dc4a7fb3da)
1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs a simple dominator tree walk that eliminates trivially
11 // redundant instructions.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/Transforms/Scalar/EarlyCSE.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/Hashing.h"
18 #include "llvm/ADT/STLExtras.h"
19 #include "llvm/ADT/ScopedHashTable.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/GlobalsModRef.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/Dominators.h"
34 #include "llvm/IR/Function.h"
35 #include "llvm/IR/InstrTypes.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/PatternMatch.h"
43 #include "llvm/IR/Type.h"
44 #include "llvm/IR/Use.h"
45 #include "llvm/IR/Value.h"
46 #include "llvm/Pass.h"
47 #include "llvm/Support/Allocator.h"
48 #include "llvm/Support/AtomicOrdering.h"
49 #include "llvm/Support/Casting.h"
50 #include "llvm/Support/Debug.h"
51 #include "llvm/Support/RecyclingAllocator.h"
52 #include "llvm/Support/raw_ostream.h"
53 #include "llvm/Transforms/Scalar.h"
54 #include "llvm/Transforms/Utils/Local.h"
55 #include <cassert>
56 #include <deque>
57 #include <memory>
58 #include <utility>
59 
60 using namespace llvm;
61 using namespace llvm::PatternMatch;
62 
63 #define DEBUG_TYPE "early-cse"
64 
65 STATISTIC(NumSimplify, "Number of instructions simplified or DCE'd");
66 STATISTIC(NumCSE,      "Number of instructions CSE'd");
67 STATISTIC(NumCSECVP,   "Number of compare instructions CVP'd");
68 STATISTIC(NumCSELoad,  "Number of load instructions CSE'd");
69 STATISTIC(NumCSECall,  "Number of call instructions CSE'd");
70 STATISTIC(NumDSE,      "Number of trivial dead stores removed");
71 
72 //===----------------------------------------------------------------------===//
73 // SimpleValue
74 //===----------------------------------------------------------------------===//
75 
76 namespace {
77 
78 /// \brief Struct representing the available values in the scoped hash table.
79 struct SimpleValue {
80   Instruction *Inst;
81 
82   SimpleValue(Instruction *I) : Inst(I) {
83     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
84   }
85 
86   bool isSentinel() const {
87     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
88            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
89   }
90 
91   static bool canHandle(Instruction *Inst) {
92     // This can only handle non-void readnone functions.
93     if (CallInst *CI = dyn_cast<CallInst>(Inst))
94       return CI->doesNotAccessMemory() && !CI->getType()->isVoidTy();
95     return isa<CastInst>(Inst) || isa<BinaryOperator>(Inst) ||
96            isa<GetElementPtrInst>(Inst) || isa<CmpInst>(Inst) ||
97            isa<SelectInst>(Inst) || isa<ExtractElementInst>(Inst) ||
98            isa<InsertElementInst>(Inst) || isa<ShuffleVectorInst>(Inst) ||
99            isa<ExtractValueInst>(Inst) || isa<InsertValueInst>(Inst);
100   }
101 };
102 
103 } // end anonymous namespace
104 
105 namespace llvm {
106 
107 template <> struct DenseMapInfo<SimpleValue> {
108   static inline SimpleValue getEmptyKey() {
109     return DenseMapInfo<Instruction *>::getEmptyKey();
110   }
111 
112   static inline SimpleValue getTombstoneKey() {
113     return DenseMapInfo<Instruction *>::getTombstoneKey();
114   }
115 
116   static unsigned getHashValue(SimpleValue Val);
117   static bool isEqual(SimpleValue LHS, SimpleValue RHS);
118 };
119 
120 } // end namespace llvm
121 
122 unsigned DenseMapInfo<SimpleValue>::getHashValue(SimpleValue Val) {
123   Instruction *Inst = Val.Inst;
124   // Hash in all of the operands as pointers.
125   if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Inst)) {
126     Value *LHS = BinOp->getOperand(0);
127     Value *RHS = BinOp->getOperand(1);
128     if (BinOp->isCommutative() && BinOp->getOperand(0) > BinOp->getOperand(1))
129       std::swap(LHS, RHS);
130 
131     return hash_combine(BinOp->getOpcode(), LHS, RHS);
132   }
133 
134   if (CmpInst *CI = dyn_cast<CmpInst>(Inst)) {
135     Value *LHS = CI->getOperand(0);
136     Value *RHS = CI->getOperand(1);
137     CmpInst::Predicate Pred = CI->getPredicate();
138     if (Inst->getOperand(0) > Inst->getOperand(1)) {
139       std::swap(LHS, RHS);
140       Pred = CI->getSwappedPredicate();
141     }
142     return hash_combine(Inst->getOpcode(), Pred, LHS, RHS);
143   }
144 
145   if (CastInst *CI = dyn_cast<CastInst>(Inst))
146     return hash_combine(CI->getOpcode(), CI->getType(), CI->getOperand(0));
147 
148   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(Inst))
149     return hash_combine(EVI->getOpcode(), EVI->getOperand(0),
150                         hash_combine_range(EVI->idx_begin(), EVI->idx_end()));
151 
152   if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(Inst))
153     return hash_combine(IVI->getOpcode(), IVI->getOperand(0),
154                         IVI->getOperand(1),
155                         hash_combine_range(IVI->idx_begin(), IVI->idx_end()));
156 
157   assert((isa<CallInst>(Inst) || isa<BinaryOperator>(Inst) ||
158           isa<GetElementPtrInst>(Inst) || isa<SelectInst>(Inst) ||
159           isa<ExtractElementInst>(Inst) || isa<InsertElementInst>(Inst) ||
160           isa<ShuffleVectorInst>(Inst)) &&
161          "Invalid/unknown instruction");
162 
163   // Mix in the opcode.
164   return hash_combine(
165       Inst->getOpcode(),
166       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
167 }
168 
169 bool DenseMapInfo<SimpleValue>::isEqual(SimpleValue LHS, SimpleValue RHS) {
170   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
171 
172   if (LHS.isSentinel() || RHS.isSentinel())
173     return LHSI == RHSI;
174 
175   if (LHSI->getOpcode() != RHSI->getOpcode())
176     return false;
177   if (LHSI->isIdenticalToWhenDefined(RHSI))
178     return true;
179 
180   // If we're not strictly identical, we still might be a commutable instruction
181   if (BinaryOperator *LHSBinOp = dyn_cast<BinaryOperator>(LHSI)) {
182     if (!LHSBinOp->isCommutative())
183       return false;
184 
185     assert(isa<BinaryOperator>(RHSI) &&
186            "same opcode, but different instruction type?");
187     BinaryOperator *RHSBinOp = cast<BinaryOperator>(RHSI);
188 
189     // Commuted equality
190     return LHSBinOp->getOperand(0) == RHSBinOp->getOperand(1) &&
191            LHSBinOp->getOperand(1) == RHSBinOp->getOperand(0);
192   }
193   if (CmpInst *LHSCmp = dyn_cast<CmpInst>(LHSI)) {
194     assert(isa<CmpInst>(RHSI) &&
195            "same opcode, but different instruction type?");
196     CmpInst *RHSCmp = cast<CmpInst>(RHSI);
197     // Commuted equality
198     return LHSCmp->getOperand(0) == RHSCmp->getOperand(1) &&
199            LHSCmp->getOperand(1) == RHSCmp->getOperand(0) &&
200            LHSCmp->getSwappedPredicate() == RHSCmp->getPredicate();
201   }
202 
203   return false;
204 }
205 
206 //===----------------------------------------------------------------------===//
207 // CallValue
208 //===----------------------------------------------------------------------===//
209 
210 namespace {
211 
212 /// \brief Struct representing the available call values in the scoped hash
213 /// table.
214 struct CallValue {
215   Instruction *Inst;
216 
217   CallValue(Instruction *I) : Inst(I) {
218     assert((isSentinel() || canHandle(I)) && "Inst can't be handled!");
219   }
220 
221   bool isSentinel() const {
222     return Inst == DenseMapInfo<Instruction *>::getEmptyKey() ||
223            Inst == DenseMapInfo<Instruction *>::getTombstoneKey();
224   }
225 
226   static bool canHandle(Instruction *Inst) {
227     // Don't value number anything that returns void.
228     if (Inst->getType()->isVoidTy())
229       return false;
230 
231     CallInst *CI = dyn_cast<CallInst>(Inst);
232     if (!CI || !CI->onlyReadsMemory())
233       return false;
234     return true;
235   }
236 };
237 
238 } // end anonymous namespace
239 
240 namespace llvm {
241 
242 template <> struct DenseMapInfo<CallValue> {
243   static inline CallValue getEmptyKey() {
244     return DenseMapInfo<Instruction *>::getEmptyKey();
245   }
246 
247   static inline CallValue getTombstoneKey() {
248     return DenseMapInfo<Instruction *>::getTombstoneKey();
249   }
250 
251   static unsigned getHashValue(CallValue Val);
252   static bool isEqual(CallValue LHS, CallValue RHS);
253 };
254 
255 } // end namespace llvm
256 
257 unsigned DenseMapInfo<CallValue>::getHashValue(CallValue Val) {
258   Instruction *Inst = Val.Inst;
259   // Hash all of the operands as pointers and mix in the opcode.
260   return hash_combine(
261       Inst->getOpcode(),
262       hash_combine_range(Inst->value_op_begin(), Inst->value_op_end()));
263 }
264 
265 bool DenseMapInfo<CallValue>::isEqual(CallValue LHS, CallValue RHS) {
266   Instruction *LHSI = LHS.Inst, *RHSI = RHS.Inst;
267   if (LHS.isSentinel() || RHS.isSentinel())
268     return LHSI == RHSI;
269   return LHSI->isIdenticalTo(RHSI);
270 }
271 
272 //===----------------------------------------------------------------------===//
273 // EarlyCSE implementation
274 //===----------------------------------------------------------------------===//
275 
276 namespace {
277 
278 /// \brief A simple and fast domtree-based CSE pass.
279 ///
280 /// This pass does a simple depth-first walk over the dominator tree,
281 /// eliminating trivially redundant instructions and using instsimplify to
282 /// canonicalize things as it goes. It is intended to be fast and catch obvious
283 /// cases so that instcombine and other passes are more effective. It is
284 /// expected that a later pass of GVN will catch the interesting/hard cases.
285 class EarlyCSE {
286 public:
287   const TargetLibraryInfo &TLI;
288   const TargetTransformInfo &TTI;
289   DominatorTree &DT;
290   AssumptionCache &AC;
291   const SimplifyQuery SQ;
292   MemorySSA *MSSA;
293   std::unique_ptr<MemorySSAUpdater> MSSAUpdater;
294 
295   using AllocatorTy =
296       RecyclingAllocator<BumpPtrAllocator,
297                          ScopedHashTableVal<SimpleValue, Value *>>;
298   using ScopedHTType =
299       ScopedHashTable<SimpleValue, Value *, DenseMapInfo<SimpleValue>,
300                       AllocatorTy>;
301 
302   /// \brief A scoped hash table of the current values of all of our simple
303   /// scalar expressions.
304   ///
305   /// As we walk down the domtree, we look to see if instructions are in this:
306   /// if so, we replace them with what we find, otherwise we insert them so
307   /// that dominated values can succeed in their lookup.
308   ScopedHTType AvailableValues;
309 
310   /// A scoped hash table of the current values of previously encounted memory
311   /// locations.
312   ///
313   /// This allows us to get efficient access to dominating loads or stores when
314   /// we have a fully redundant load.  In addition to the most recent load, we
315   /// keep track of a generation count of the read, which is compared against
316   /// the current generation count.  The current generation count is incremented
317   /// after every possibly writing memory operation, which ensures that we only
318   /// CSE loads with other loads that have no intervening store.  Ordering
319   /// events (such as fences or atomic instructions) increment the generation
320   /// count as well; essentially, we model these as writes to all possible
321   /// locations.  Note that atomic and/or volatile loads and stores can be
322   /// present the table; it is the responsibility of the consumer to inspect
323   /// the atomicity/volatility if needed.
324   struct LoadValue {
325     Instruction *DefInst = nullptr;
326     unsigned Generation = 0;
327     int MatchingId = -1;
328     bool IsAtomic = false;
329     bool IsInvariant = false;
330 
331     LoadValue() = default;
332     LoadValue(Instruction *Inst, unsigned Generation, unsigned MatchingId,
333               bool IsAtomic, bool IsInvariant)
334         : DefInst(Inst), Generation(Generation), MatchingId(MatchingId),
335           IsAtomic(IsAtomic), IsInvariant(IsInvariant) {}
336   };
337 
338   using LoadMapAllocator =
339       RecyclingAllocator<BumpPtrAllocator,
340                          ScopedHashTableVal<Value *, LoadValue>>;
341   using LoadHTType =
342       ScopedHashTable<Value *, LoadValue, DenseMapInfo<Value *>,
343                       LoadMapAllocator>;
344 
345   LoadHTType AvailableLoads;
346 
347   /// \brief A scoped hash table of the current values of read-only call
348   /// values.
349   ///
350   /// It uses the same generation count as loads.
351   using CallHTType =
352       ScopedHashTable<CallValue, std::pair<Instruction *, unsigned>>;
353   CallHTType AvailableCalls;
354 
355   /// \brief This is the current generation of the memory value.
356   unsigned CurrentGeneration = 0;
357 
358   /// \brief Set up the EarlyCSE runner for a particular function.
359   EarlyCSE(const DataLayout &DL, const TargetLibraryInfo &TLI,
360            const TargetTransformInfo &TTI, DominatorTree &DT,
361            AssumptionCache &AC, MemorySSA *MSSA)
362       : TLI(TLI), TTI(TTI), DT(DT), AC(AC), SQ(DL, &TLI, &DT, &AC), MSSA(MSSA),
363         MSSAUpdater(llvm::make_unique<MemorySSAUpdater>(MSSA)) {}
364 
365   bool run();
366 
367 private:
368   // Almost a POD, but needs to call the constructors for the scoped hash
369   // tables so that a new scope gets pushed on. These are RAII so that the
370   // scope gets popped when the NodeScope is destroyed.
371   class NodeScope {
372   public:
373     NodeScope(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
374               CallHTType &AvailableCalls)
375         : Scope(AvailableValues), LoadScope(AvailableLoads),
376           CallScope(AvailableCalls) {}
377     NodeScope(const NodeScope &) = delete;
378     NodeScope &operator=(const NodeScope &) = delete;
379 
380   private:
381     ScopedHTType::ScopeTy Scope;
382     LoadHTType::ScopeTy LoadScope;
383     CallHTType::ScopeTy CallScope;
384   };
385 
386   // Contains all the needed information to create a stack for doing a depth
387   // first traversal of the tree. This includes scopes for values, loads, and
388   // calls as well as the generation. There is a child iterator so that the
389   // children do not need to be store separately.
390   class StackNode {
391   public:
392     StackNode(ScopedHTType &AvailableValues, LoadHTType &AvailableLoads,
393               CallHTType &AvailableCalls, unsigned cg, DomTreeNode *n,
394               DomTreeNode::iterator child, DomTreeNode::iterator end)
395         : CurrentGeneration(cg), ChildGeneration(cg), Node(n), ChildIter(child),
396           EndIter(end), Scopes(AvailableValues, AvailableLoads, AvailableCalls)
397           {}
398     StackNode(const StackNode &) = delete;
399     StackNode &operator=(const StackNode &) = delete;
400 
401     // Accessors.
402     unsigned currentGeneration() { return CurrentGeneration; }
403     unsigned childGeneration() { return ChildGeneration; }
404     void childGeneration(unsigned generation) { ChildGeneration = generation; }
405     DomTreeNode *node() { return Node; }
406     DomTreeNode::iterator childIter() { return ChildIter; }
407 
408     DomTreeNode *nextChild() {
409       DomTreeNode *child = *ChildIter;
410       ++ChildIter;
411       return child;
412     }
413 
414     DomTreeNode::iterator end() { return EndIter; }
415     bool isProcessed() { return Processed; }
416     void process() { Processed = true; }
417 
418   private:
419     unsigned CurrentGeneration;
420     unsigned ChildGeneration;
421     DomTreeNode *Node;
422     DomTreeNode::iterator ChildIter;
423     DomTreeNode::iterator EndIter;
424     NodeScope Scopes;
425     bool Processed = false;
426   };
427 
428   /// \brief Wrapper class to handle memory instructions, including loads,
429   /// stores and intrinsic loads and stores defined by the target.
430   class ParseMemoryInst {
431   public:
432     ParseMemoryInst(Instruction *Inst, const TargetTransformInfo &TTI)
433       : Inst(Inst) {
434       if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
435         if (TTI.getTgtMemIntrinsic(II, Info))
436           IsTargetMemInst = true;
437     }
438 
439     bool isLoad() const {
440       if (IsTargetMemInst) return Info.ReadMem;
441       return isa<LoadInst>(Inst);
442     }
443 
444     bool isStore() const {
445       if (IsTargetMemInst) return Info.WriteMem;
446       return isa<StoreInst>(Inst);
447     }
448 
449     bool isAtomic() const {
450       if (IsTargetMemInst)
451         return Info.Ordering != AtomicOrdering::NotAtomic;
452       return Inst->isAtomic();
453     }
454 
455     bool isUnordered() const {
456       if (IsTargetMemInst)
457         return Info.isUnordered();
458 
459       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
460         return LI->isUnordered();
461       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
462         return SI->isUnordered();
463       }
464       // Conservative answer
465       return !Inst->isAtomic();
466     }
467 
468     bool isVolatile() const {
469       if (IsTargetMemInst)
470         return Info.IsVolatile;
471 
472       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
473         return LI->isVolatile();
474       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
475         return SI->isVolatile();
476       }
477       // Conservative answer
478       return true;
479     }
480 
481     bool isInvariantLoad() const {
482       if (auto *LI = dyn_cast<LoadInst>(Inst))
483         return LI->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
484       return false;
485     }
486 
487     bool isMatchingMemLoc(const ParseMemoryInst &Inst) const {
488       return (getPointerOperand() == Inst.getPointerOperand() &&
489               getMatchingId() == Inst.getMatchingId());
490     }
491 
492     bool isValid() const { return getPointerOperand() != nullptr; }
493 
494     // For regular (non-intrinsic) loads/stores, this is set to -1. For
495     // intrinsic loads/stores, the id is retrieved from the corresponding
496     // field in the MemIntrinsicInfo structure.  That field contains
497     // non-negative values only.
498     int getMatchingId() const {
499       if (IsTargetMemInst) return Info.MatchingId;
500       return -1;
501     }
502 
503     Value *getPointerOperand() const {
504       if (IsTargetMemInst) return Info.PtrVal;
505       if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
506         return LI->getPointerOperand();
507       } else if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
508         return SI->getPointerOperand();
509       }
510       return nullptr;
511     }
512 
513     bool mayReadFromMemory() const {
514       if (IsTargetMemInst) return Info.ReadMem;
515       return Inst->mayReadFromMemory();
516     }
517 
518     bool mayWriteToMemory() const {
519       if (IsTargetMemInst) return Info.WriteMem;
520       return Inst->mayWriteToMemory();
521     }
522 
523   private:
524     bool IsTargetMemInst = false;
525     MemIntrinsicInfo Info;
526     Instruction *Inst;
527   };
528 
529   bool processNode(DomTreeNode *Node);
530 
531   Value *getOrCreateResult(Value *Inst, Type *ExpectedType) const {
532     if (auto *LI = dyn_cast<LoadInst>(Inst))
533       return LI;
534     if (auto *SI = dyn_cast<StoreInst>(Inst))
535       return SI->getValueOperand();
536     assert(isa<IntrinsicInst>(Inst) && "Instruction not supported");
537     return TTI.getOrCreateResultFromMemIntrinsic(cast<IntrinsicInst>(Inst),
538                                                  ExpectedType);
539   }
540 
541   bool isSameMemGeneration(unsigned EarlierGeneration, unsigned LaterGeneration,
542                            Instruction *EarlierInst, Instruction *LaterInst);
543 
544   void removeMSSA(Instruction *Inst) {
545     if (!MSSA)
546       return;
547     // Removing a store here can leave MemorySSA in an unoptimized state by
548     // creating MemoryPhis that have identical arguments and by creating
549     // MemoryUses whose defining access is not an actual clobber.  We handle the
550     // phi case eagerly here.  The non-optimized MemoryUse case is lazily
551     // updated by MemorySSA getClobberingMemoryAccess.
552     if (MemoryAccess *MA = MSSA->getMemoryAccess(Inst)) {
553       // Optimize MemoryPhi nodes that may become redundant by having all the
554       // same input values once MA is removed.
555       SmallSetVector<MemoryPhi *, 4> PhisToCheck;
556       SmallVector<MemoryAccess *, 8> WorkQueue;
557       WorkQueue.push_back(MA);
558       // Process MemoryPhi nodes in FIFO order using a ever-growing vector since
559       // we shouldn't be processing that many phis and this will avoid an
560       // allocation in almost all cases.
561       for (unsigned I = 0; I < WorkQueue.size(); ++I) {
562         MemoryAccess *WI = WorkQueue[I];
563 
564         for (auto *U : WI->users())
565           if (MemoryPhi *MP = dyn_cast<MemoryPhi>(U))
566             PhisToCheck.insert(MP);
567 
568         MSSAUpdater->removeMemoryAccess(WI);
569 
570         for (MemoryPhi *MP : PhisToCheck) {
571           MemoryAccess *FirstIn = MP->getIncomingValue(0);
572           if (llvm::all_of(MP->incoming_values(),
573                            [=](Use &In) { return In == FirstIn; }))
574             WorkQueue.push_back(MP);
575         }
576         PhisToCheck.clear();
577       }
578     }
579   }
580 };
581 
582 } // end anonymous namespace
583 
584 /// Determine if the memory referenced by LaterInst is from the same heap
585 /// version as EarlierInst.
586 /// This is currently called in two scenarios:
587 ///
588 ///   load p
589 ///   ...
590 ///   load p
591 ///
592 /// and
593 ///
594 ///   x = load p
595 ///   ...
596 ///   store x, p
597 ///
598 /// in both cases we want to verify that there are no possible writes to the
599 /// memory referenced by p between the earlier and later instruction.
600 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration,
601                                    unsigned LaterGeneration,
602                                    Instruction *EarlierInst,
603                                    Instruction *LaterInst) {
604   // Check the simple memory generation tracking first.
605   if (EarlierGeneration == LaterGeneration)
606     return true;
607 
608   if (!MSSA)
609     return false;
610 
611   // If MemorySSA has determined that one of EarlierInst or LaterInst does not
612   // read/write memory, then we can safely return true here.
613   // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
614   // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
615   // by also checking the MemorySSA MemoryAccess on the instruction.  Initial
616   // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
617   // with the default optimization pipeline.
618   auto *EarlierMA = MSSA->getMemoryAccess(EarlierInst);
619   if (!EarlierMA)
620     return true;
621   auto *LaterMA = MSSA->getMemoryAccess(LaterInst);
622   if (!LaterMA)
623     return true;
624 
625   // Since we know LaterDef dominates LaterInst and EarlierInst dominates
626   // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
627   // EarlierInst and LaterInst and neither can any other write that potentially
628   // clobbers LaterInst.
629   MemoryAccess *LaterDef =
630       MSSA->getWalker()->getClobberingMemoryAccess(LaterInst);
631   return MSSA->dominates(LaterDef, EarlierMA);
632 }
633 
634 bool EarlyCSE::processNode(DomTreeNode *Node) {
635   bool Changed = false;
636   BasicBlock *BB = Node->getBlock();
637 
638   // If this block has a single predecessor, then the predecessor is the parent
639   // of the domtree node and all of the live out memory values are still current
640   // in this block.  If this block has multiple predecessors, then they could
641   // have invalidated the live-out memory values of our parent value.  For now,
642   // just be conservative and invalidate memory if this block has multiple
643   // predecessors.
644   if (!BB->getSinglePredecessor())
645     ++CurrentGeneration;
646 
647   // If this node has a single predecessor which ends in a conditional branch,
648   // we can infer the value of the branch condition given that we took this
649   // path.  We need the single predecessor to ensure there's not another path
650   // which reaches this block where the condition might hold a different
651   // value.  Since we're adding this to the scoped hash table (like any other
652   // def), it will have been popped if we encounter a future merge block.
653   if (BasicBlock *Pred = BB->getSinglePredecessor()) {
654     auto *BI = dyn_cast<BranchInst>(Pred->getTerminator());
655     if (BI && BI->isConditional()) {
656       auto *CondInst = dyn_cast<Instruction>(BI->getCondition());
657       if (CondInst && SimpleValue::canHandle(CondInst)) {
658         assert(BI->getSuccessor(0) == BB || BI->getSuccessor(1) == BB);
659         auto *TorF = (BI->getSuccessor(0) == BB)
660                          ? ConstantInt::getTrue(BB->getContext())
661                          : ConstantInt::getFalse(BB->getContext());
662         AvailableValues.insert(CondInst, TorF);
663         DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
664                      << CondInst->getName() << "' as " << *TorF << " in "
665                      << BB->getName() << "\n");
666         // Replace all dominated uses with the known value.
667         if (unsigned Count = replaceDominatedUsesWith(
668                 CondInst, TorF, DT, BasicBlockEdge(Pred, BB))) {
669           Changed = true;
670           NumCSECVP += Count;
671         }
672       }
673     }
674   }
675 
676   /// LastStore - Keep track of the last non-volatile store that we saw... for
677   /// as long as there in no instruction that reads memory.  If we see a store
678   /// to the same location, we delete the dead store.  This zaps trivial dead
679   /// stores which can occur in bitfield code among other things.
680   Instruction *LastStore = nullptr;
681 
682   // See if any instructions in the block can be eliminated.  If so, do it.  If
683   // not, add them to AvailableValues.
684   for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) {
685     Instruction *Inst = &*I++;
686 
687     // Dead instructions should just be removed.
688     if (isInstructionTriviallyDead(Inst, &TLI)) {
689       DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
690       removeMSSA(Inst);
691       Inst->eraseFromParent();
692       Changed = true;
693       ++NumSimplify;
694       continue;
695     }
696 
697     // Skip assume intrinsics, they don't really have side effects (although
698     // they're marked as such to ensure preservation of control dependencies),
699     // and this pass will not bother with its removal. However, we should mark
700     // its condition as true for all dominated blocks.
701     if (match(Inst, m_Intrinsic<Intrinsic::assume>())) {
702       auto *CondI =
703           dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0));
704       if (CondI && SimpleValue::canHandle(CondI)) {
705         DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst << '\n');
706         AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
707       } else
708         DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst << '\n');
709       continue;
710     }
711 
712     // Skip invariant.start intrinsics since they only read memory, and we can
713     // forward values across it. Also, we dont need to consume the last store
714     // since the semantics of invariant.start allow us to perform DSE of the
715     // last store, if there was a store following invariant.start. Consider:
716     //
717     // store 30, i8* p
718     // invariant.start(p)
719     // store 40, i8* p
720     // We can DSE the store to 30, since the store 40 to invariant location p
721     // causes undefined behaviour.
722     if (match(Inst, m_Intrinsic<Intrinsic::invariant_start>()))
723       continue;
724 
725     if (match(Inst, m_Intrinsic<Intrinsic::experimental_guard>())) {
726       if (auto *CondI =
727               dyn_cast<Instruction>(cast<CallInst>(Inst)->getArgOperand(0))) {
728         if (SimpleValue::canHandle(CondI)) {
729           // Do we already know the actual value of this condition?
730           if (auto *KnownCond = AvailableValues.lookup(CondI)) {
731             // Is the condition known to be true?
732             if (isa<ConstantInt>(KnownCond) &&
733                 cast<ConstantInt>(KnownCond)->isOne()) {
734               DEBUG(dbgs() << "EarlyCSE removing guard: " << *Inst << '\n');
735               removeMSSA(Inst);
736               Inst->eraseFromParent();
737               Changed = true;
738               continue;
739             } else
740               // Use the known value if it wasn't true.
741               cast<CallInst>(Inst)->setArgOperand(0, KnownCond);
742           }
743           // The condition we're on guarding here is true for all dominated
744           // locations.
745           AvailableValues.insert(CondI, ConstantInt::getTrue(BB->getContext()));
746         }
747       }
748 
749       // Guard intrinsics read all memory, but don't write any memory.
750       // Accordingly, don't update the generation but consume the last store (to
751       // avoid an incorrect DSE).
752       LastStore = nullptr;
753       continue;
754     }
755 
756     // If the instruction can be simplified (e.g. X+0 = X) then replace it with
757     // its simpler value.
758     if (Value *V = SimplifyInstruction(Inst, SQ)) {
759       DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst << "  to: " << *V << '\n');
760       bool Killed = false;
761       if (!Inst->use_empty()) {
762         Inst->replaceAllUsesWith(V);
763         Changed = true;
764       }
765       if (isInstructionTriviallyDead(Inst, &TLI)) {
766         removeMSSA(Inst);
767         Inst->eraseFromParent();
768         Changed = true;
769         Killed = true;
770       }
771       if (Changed)
772         ++NumSimplify;
773       if (Killed)
774         continue;
775     }
776 
777     // If this is a simple instruction that we can value number, process it.
778     if (SimpleValue::canHandle(Inst)) {
779       // See if the instruction has an available value.  If so, use it.
780       if (Value *V = AvailableValues.lookup(Inst)) {
781         DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst << "  to: " << *V << '\n');
782         if (auto *I = dyn_cast<Instruction>(V))
783           I->andIRFlags(Inst);
784         Inst->replaceAllUsesWith(V);
785         removeMSSA(Inst);
786         Inst->eraseFromParent();
787         Changed = true;
788         ++NumCSE;
789         continue;
790       }
791 
792       // Otherwise, just remember that this value is available.
793       AvailableValues.insert(Inst, Inst);
794       continue;
795     }
796 
797     ParseMemoryInst MemInst(Inst, TTI);
798     // If this is a non-volatile load, process it.
799     if (MemInst.isValid() && MemInst.isLoad()) {
800       // (conservatively) we can't peak past the ordering implied by this
801       // operation, but we can add this load to our set of available values
802       if (MemInst.isVolatile() || !MemInst.isUnordered()) {
803         LastStore = nullptr;
804         ++CurrentGeneration;
805       }
806 
807       // If we have an available version of this load, and if it is the right
808       // generation or the load is known to be from an invariant location,
809       // replace this instruction.
810       //
811       // If either the dominating load or the current load are invariant, then
812       // we can assume the current load loads the same value as the dominating
813       // load.
814       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
815       if (InVal.DefInst != nullptr &&
816           InVal.MatchingId == MemInst.getMatchingId() &&
817           // We don't yet handle removing loads with ordering of any kind.
818           !MemInst.isVolatile() && MemInst.isUnordered() &&
819           // We can't replace an atomic load with one which isn't also atomic.
820           InVal.IsAtomic >= MemInst.isAtomic() &&
821           (InVal.IsInvariant || MemInst.isInvariantLoad() ||
822            isSameMemGeneration(InVal.Generation, CurrentGeneration,
823                                InVal.DefInst, Inst))) {
824         Value *Op = getOrCreateResult(InVal.DefInst, Inst->getType());
825         if (Op != nullptr) {
826           DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
827                        << "  to: " << *InVal.DefInst << '\n');
828           if (!Inst->use_empty())
829             Inst->replaceAllUsesWith(Op);
830           removeMSSA(Inst);
831           Inst->eraseFromParent();
832           Changed = true;
833           ++NumCSELoad;
834           continue;
835         }
836       }
837 
838       // Otherwise, remember that we have this instruction.
839       AvailableLoads.insert(
840           MemInst.getPointerOperand(),
841           LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
842                     MemInst.isAtomic(), MemInst.isInvariantLoad()));
843       LastStore = nullptr;
844       continue;
845     }
846 
847     // If this instruction may read from memory or throw (and potentially read
848     // from memory in the exception handler), forget LastStore.  Load/store
849     // intrinsics will indicate both a read and a write to memory.  The target
850     // may override this (e.g. so that a store intrinsic does not read from
851     // memory, and thus will be treated the same as a regular store for
852     // commoning purposes).
853     if ((Inst->mayReadFromMemory() || Inst->mayThrow()) &&
854         !(MemInst.isValid() && !MemInst.mayReadFromMemory()))
855       LastStore = nullptr;
856 
857     // If this is a read-only call, process it.
858     if (CallValue::canHandle(Inst)) {
859       // If we have an available version of this call, and if it is the right
860       // generation, replace this instruction.
861       std::pair<Instruction *, unsigned> InVal = AvailableCalls.lookup(Inst);
862       if (InVal.first != nullptr &&
863           isSameMemGeneration(InVal.second, CurrentGeneration, InVal.first,
864                               Inst)) {
865         DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
866                      << "  to: " << *InVal.first << '\n');
867         if (!Inst->use_empty())
868           Inst->replaceAllUsesWith(InVal.first);
869         removeMSSA(Inst);
870         Inst->eraseFromParent();
871         Changed = true;
872         ++NumCSECall;
873         continue;
874       }
875 
876       // Otherwise, remember that we have this instruction.
877       AvailableCalls.insert(
878           Inst, std::pair<Instruction *, unsigned>(Inst, CurrentGeneration));
879       continue;
880     }
881 
882     // A release fence requires that all stores complete before it, but does
883     // not prevent the reordering of following loads 'before' the fence.  As a
884     // result, we don't need to consider it as writing to memory and don't need
885     // to advance the generation.  We do need to prevent DSE across the fence,
886     // but that's handled above.
887     if (FenceInst *FI = dyn_cast<FenceInst>(Inst))
888       if (FI->getOrdering() == AtomicOrdering::Release) {
889         assert(Inst->mayReadFromMemory() && "relied on to prevent DSE above");
890         continue;
891       }
892 
893     // write back DSE - If we write back the same value we just loaded from
894     // the same location and haven't passed any intervening writes or ordering
895     // operations, we can remove the write.  The primary benefit is in allowing
896     // the available load table to remain valid and value forward past where
897     // the store originally was.
898     if (MemInst.isValid() && MemInst.isStore()) {
899       LoadValue InVal = AvailableLoads.lookup(MemInst.getPointerOperand());
900       if (InVal.DefInst &&
901           InVal.DefInst == getOrCreateResult(Inst, InVal.DefInst->getType()) &&
902           InVal.MatchingId == MemInst.getMatchingId() &&
903           // We don't yet handle removing stores with ordering of any kind.
904           !MemInst.isVolatile() && MemInst.isUnordered() &&
905           isSameMemGeneration(InVal.Generation, CurrentGeneration,
906                               InVal.DefInst, Inst)) {
907         // It is okay to have a LastStore to a different pointer here if MemorySSA
908         // tells us that the load and store are from the same memory generation.
909         // In that case, LastStore should keep its present value since we're
910         // removing the current store.
911         assert((!LastStore ||
912                 ParseMemoryInst(LastStore, TTI).getPointerOperand() ==
913                     MemInst.getPointerOperand() ||
914                 MSSA) &&
915                "can't have an intervening store if not using MemorySSA!");
916         DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst << '\n');
917         removeMSSA(Inst);
918         Inst->eraseFromParent();
919         Changed = true;
920         ++NumDSE;
921         // We can avoid incrementing the generation count since we were able
922         // to eliminate this store.
923         continue;
924       }
925     }
926 
927     // Okay, this isn't something we can CSE at all.  Check to see if it is
928     // something that could modify memory.  If so, our available memory values
929     // cannot be used so bump the generation count.
930     if (Inst->mayWriteToMemory()) {
931       ++CurrentGeneration;
932 
933       if (MemInst.isValid() && MemInst.isStore()) {
934         // We do a trivial form of DSE if there are two stores to the same
935         // location with no intervening loads.  Delete the earlier store.
936         // At the moment, we don't remove ordered stores, but do remove
937         // unordered atomic stores.  There's no special requirement (for
938         // unordered atomics) about removing atomic stores only in favor of
939         // other atomic stores since we we're going to execute the non-atomic
940         // one anyway and the atomic one might never have become visible.
941         if (LastStore) {
942           ParseMemoryInst LastStoreMemInst(LastStore, TTI);
943           assert(LastStoreMemInst.isUnordered() &&
944                  !LastStoreMemInst.isVolatile() &&
945                  "Violated invariant");
946           if (LastStoreMemInst.isMatchingMemLoc(MemInst)) {
947             DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
948                          << "  due to: " << *Inst << '\n');
949             removeMSSA(LastStore);
950             LastStore->eraseFromParent();
951             Changed = true;
952             ++NumDSE;
953             LastStore = nullptr;
954           }
955           // fallthrough - we can exploit information about this store
956         }
957 
958         // Okay, we just invalidated anything we knew about loaded values.  Try
959         // to salvage *something* by remembering that the stored value is a live
960         // version of the pointer.  It is safe to forward from volatile stores
961         // to non-volatile loads, so we don't have to check for volatility of
962         // the store.
963         AvailableLoads.insert(
964             MemInst.getPointerOperand(),
965             LoadValue(Inst, CurrentGeneration, MemInst.getMatchingId(),
966                       MemInst.isAtomic(), /*IsInvariant=*/false));
967 
968         // Remember that this was the last unordered store we saw for DSE. We
969         // don't yet handle DSE on ordered or volatile stores since we don't
970         // have a good way to model the ordering requirement for following
971         // passes  once the store is removed.  We could insert a fence, but
972         // since fences are slightly stronger than stores in their ordering,
973         // it's not clear this is a profitable transform. Another option would
974         // be to merge the ordering with that of the post dominating store.
975         if (MemInst.isUnordered() && !MemInst.isVolatile())
976           LastStore = Inst;
977         else
978           LastStore = nullptr;
979       }
980     }
981   }
982 
983   return Changed;
984 }
985 
986 bool EarlyCSE::run() {
987   // Note, deque is being used here because there is significant performance
988   // gains over vector when the container becomes very large due to the
989   // specific access patterns. For more information see the mailing list
990   // discussion on this:
991   // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
992   std::deque<StackNode *> nodesToProcess;
993 
994   bool Changed = false;
995 
996   // Process the root node.
997   nodesToProcess.push_back(new StackNode(
998       AvailableValues, AvailableLoads, AvailableCalls, CurrentGeneration,
999       DT.getRootNode(), DT.getRootNode()->begin(), DT.getRootNode()->end()));
1000 
1001   // Save the current generation.
1002   unsigned LiveOutGeneration = CurrentGeneration;
1003 
1004   // Process the stack.
1005   while (!nodesToProcess.empty()) {
1006     // Grab the first item off the stack. Set the current generation, remove
1007     // the node from the stack, and process it.
1008     StackNode *NodeToProcess = nodesToProcess.back();
1009 
1010     // Initialize class members.
1011     CurrentGeneration = NodeToProcess->currentGeneration();
1012 
1013     // Check if the node needs to be processed.
1014     if (!NodeToProcess->isProcessed()) {
1015       // Process the node.
1016       Changed |= processNode(NodeToProcess->node());
1017       NodeToProcess->childGeneration(CurrentGeneration);
1018       NodeToProcess->process();
1019     } else if (NodeToProcess->childIter() != NodeToProcess->end()) {
1020       // Push the next child onto the stack.
1021       DomTreeNode *child = NodeToProcess->nextChild();
1022       nodesToProcess.push_back(
1023           new StackNode(AvailableValues, AvailableLoads, AvailableCalls,
1024                         NodeToProcess->childGeneration(), child, child->begin(),
1025                         child->end()));
1026     } else {
1027       // It has been processed, and there are no more children to process,
1028       // so delete it and pop it off the stack.
1029       delete NodeToProcess;
1030       nodesToProcess.pop_back();
1031     }
1032   } // while (!nodes...)
1033 
1034   // Reset the current generation.
1035   CurrentGeneration = LiveOutGeneration;
1036 
1037   return Changed;
1038 }
1039 
1040 PreservedAnalyses EarlyCSEPass::run(Function &F,
1041                                     FunctionAnalysisManager &AM) {
1042   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1043   auto &TTI = AM.getResult<TargetIRAnalysis>(F);
1044   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
1045   auto &AC = AM.getResult<AssumptionAnalysis>(F);
1046   auto *MSSA =
1047       UseMemorySSA ? &AM.getResult<MemorySSAAnalysis>(F).getMSSA() : nullptr;
1048 
1049   EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1050 
1051   if (!CSE.run())
1052     return PreservedAnalyses::all();
1053 
1054   PreservedAnalyses PA;
1055   PA.preserveSet<CFGAnalyses>();
1056   PA.preserve<GlobalsAA>();
1057   if (UseMemorySSA)
1058     PA.preserve<MemorySSAAnalysis>();
1059   return PA;
1060 }
1061 
1062 namespace {
1063 
1064 /// \brief A simple and fast domtree-based CSE pass.
1065 ///
1066 /// This pass does a simple depth-first walk over the dominator tree,
1067 /// eliminating trivially redundant instructions and using instsimplify to
1068 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1069 /// cases so that instcombine and other passes are more effective. It is
1070 /// expected that a later pass of GVN will catch the interesting/hard cases.
1071 template<bool UseMemorySSA>
1072 class EarlyCSELegacyCommonPass : public FunctionPass {
1073 public:
1074   static char ID;
1075 
1076   EarlyCSELegacyCommonPass() : FunctionPass(ID) {
1077     if (UseMemorySSA)
1078       initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1079     else
1080       initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1081   }
1082 
1083   bool runOnFunction(Function &F) override {
1084     if (skipFunction(F))
1085       return false;
1086 
1087     auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1088     auto &TTI = getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
1089     auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1090     auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1091     auto *MSSA =
1092         UseMemorySSA ? &getAnalysis<MemorySSAWrapperPass>().getMSSA() : nullptr;
1093 
1094     EarlyCSE CSE(F.getParent()->getDataLayout(), TLI, TTI, DT, AC, MSSA);
1095 
1096     return CSE.run();
1097   }
1098 
1099   void getAnalysisUsage(AnalysisUsage &AU) const override {
1100     AU.addRequired<AssumptionCacheTracker>();
1101     AU.addRequired<DominatorTreeWrapperPass>();
1102     AU.addRequired<TargetLibraryInfoWrapperPass>();
1103     AU.addRequired<TargetTransformInfoWrapperPass>();
1104     if (UseMemorySSA) {
1105       AU.addRequired<MemorySSAWrapperPass>();
1106       AU.addPreserved<MemorySSAWrapperPass>();
1107     }
1108     AU.addPreserved<GlobalsAAWrapperPass>();
1109     AU.setPreservesCFG();
1110   }
1111 };
1112 
1113 } // end anonymous namespace
1114 
1115 using EarlyCSELegacyPass = EarlyCSELegacyCommonPass</*UseMemorySSA=*/false>;
1116 
1117 template<>
1118 char EarlyCSELegacyPass::ID = 0;
1119 
1120 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass, "early-cse", "Early CSE", false,
1121                       false)
1122 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1123 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1124 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1125 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1126 INITIALIZE_PASS_END(EarlyCSELegacyPass, "early-cse", "Early CSE", false, false)
1127 
1128 using EarlyCSEMemSSALegacyPass =
1129     EarlyCSELegacyCommonPass</*UseMemorySSA=*/true>;
1130 
1131 template<>
1132 char EarlyCSEMemSSALegacyPass::ID = 0;
1133 
1134 FunctionPass *llvm::createEarlyCSEPass(bool UseMemorySSA) {
1135   if (UseMemorySSA)
1136     return new EarlyCSEMemSSALegacyPass();
1137   else
1138     return new EarlyCSELegacyPass();
1139 }
1140 
1141 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1142                       "Early CSE w/ MemorySSA", false, false)
1143 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
1144 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1145 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1146 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1147 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
1148 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass, "early-cse-memssa",
1149                     "Early CSE w/ MemorySSA", false, false)
1150