xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/Scalar/DeadStoreElimination.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
14 //
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
17 //    upwards.
18 // 2. Check that there are no reads between EarlierAccess and the StartDef by
19 //    checking all uses starting at EarlierAccess and walking until we see
20 //    StartDef.
21 // 3. For each found CurrentDef, check that:
22 //   1. There are no barrier instructions between CurrentDef and StartDef (like
23 //       throws or stores with ordering constraints).
24 //   2. StartDef is executed whenever CurrentDef is executed.
25 //   3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
27 //
28 //===----------------------------------------------------------------------===//
29 
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/MemoryBuiltins.h"
44 #include "llvm/Analysis/MemoryLocation.h"
45 #include "llvm/Analysis/MemorySSA.h"
46 #include "llvm/Analysis/MemorySSAUpdater.h"
47 #include "llvm/Analysis/PostDominators.h"
48 #include "llvm/Analysis/TargetLibraryInfo.h"
49 #include "llvm/Analysis/ValueTracking.h"
50 #include "llvm/IR/Argument.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/Constant.h"
53 #include "llvm/IR/Constants.h"
54 #include "llvm/IR/DataLayout.h"
55 #include "llvm/IR/Dominators.h"
56 #include "llvm/IR/Function.h"
57 #include "llvm/IR/InstIterator.h"
58 #include "llvm/IR/InstrTypes.h"
59 #include "llvm/IR/Instruction.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Intrinsics.h"
63 #include "llvm/IR/LLVMContext.h"
64 #include "llvm/IR/Module.h"
65 #include "llvm/IR/PassManager.h"
66 #include "llvm/IR/PatternMatch.h"
67 #include "llvm/IR/Value.h"
68 #include "llvm/InitializePasses.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CommandLine.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/DebugCounter.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include "llvm/Transforms/Scalar.h"
78 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
79 #include "llvm/Transforms/Utils/Local.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <cstddef>
83 #include <cstdint>
84 #include <iterator>
85 #include <map>
86 #include <utility>
87 
88 using namespace llvm;
89 using namespace PatternMatch;
90 
91 #define DEBUG_TYPE "dse"
92 
93 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
94 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
95 STATISTIC(NumFastStores, "Number of stores deleted");
96 STATISTIC(NumFastOther, "Number of other instrs removed");
97 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
98 STATISTIC(NumModifiedStores, "Number of stores modified");
99 STATISTIC(NumCFGChecks, "Number of stores modified");
100 STATISTIC(NumCFGTries, "Number of stores modified");
101 STATISTIC(NumCFGSuccess, "Number of stores modified");
102 STATISTIC(NumGetDomMemoryDefPassed,
103           "Number of times a valid candidate is returned from getDomMemoryDef");
104 STATISTIC(NumDomMemDefChecks,
105           "Number iterations check for reads in getDomMemoryDef");
106 
107 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
108               "Controls which MemoryDefs are eliminated.");
109 
110 static cl::opt<bool>
111 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
112   cl::init(true), cl::Hidden,
113   cl::desc("Enable partial-overwrite tracking in DSE"));
114 
115 static cl::opt<bool>
116 EnablePartialStoreMerging("enable-dse-partial-store-merging",
117   cl::init(true), cl::Hidden,
118   cl::desc("Enable partial store merging in DSE"));
119 
120 static cl::opt<unsigned>
121     MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
122                        cl::desc("The number of memory instructions to scan for "
123                                 "dead store elimination (default = 100)"));
124 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
125     "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
126     cl::desc("The maximum number of steps while walking upwards to find "
127              "MemoryDefs that may be killed (default = 90)"));
128 
129 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
130     "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
131     cl::desc("The maximum number candidates that only partially overwrite the "
132              "killing MemoryDef to consider"
133              " (default = 5)"));
134 
135 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
136     "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
137     cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
138              "other stores per basic block (default = 5000)"));
139 
140 static cl::opt<unsigned> MemorySSASameBBStepCost(
141     "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
142     cl::desc(
143         "The cost of a step in the same basic block as the killing MemoryDef"
144         "(default = 1)"));
145 
146 static cl::opt<unsigned>
147     MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
148                              cl::Hidden,
149                              cl::desc("The cost of a step in a different basic "
150                                       "block than the killing MemoryDef"
151                                       "(default = 5)"));
152 
153 static cl::opt<unsigned> MemorySSAPathCheckLimit(
154     "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
155     cl::desc("The maximum number of blocks to check when trying to prove that "
156              "all paths to an exit go through a killing block (default = 50)"));
157 
158 //===----------------------------------------------------------------------===//
159 // Helper functions
160 //===----------------------------------------------------------------------===//
161 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
162 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
163 
164 /// Does this instruction write some memory?  This only returns true for things
165 /// that we can analyze with other helpers below.
hasAnalyzableMemoryWrite(Instruction * I,const TargetLibraryInfo & TLI)166 static bool hasAnalyzableMemoryWrite(Instruction *I,
167                                      const TargetLibraryInfo &TLI) {
168   if (isa<StoreInst>(I))
169     return true;
170   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
171     switch (II->getIntrinsicID()) {
172     default:
173       return false;
174     case Intrinsic::memset:
175     case Intrinsic::memmove:
176     case Intrinsic::memcpy:
177     case Intrinsic::memcpy_inline:
178     case Intrinsic::memcpy_element_unordered_atomic:
179     case Intrinsic::memmove_element_unordered_atomic:
180     case Intrinsic::memset_element_unordered_atomic:
181     case Intrinsic::init_trampoline:
182     case Intrinsic::lifetime_end:
183     case Intrinsic::masked_store:
184       return true;
185     }
186   }
187   if (auto *CB = dyn_cast<CallBase>(I)) {
188     LibFunc LF;
189     if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
190       switch (LF) {
191       case LibFunc_strcpy:
192       case LibFunc_strncpy:
193       case LibFunc_strcat:
194       case LibFunc_strncat:
195         return true;
196       default:
197         return false;
198       }
199     }
200   }
201   return false;
202 }
203 
204 /// Return a Location stored to by the specified instruction. If isRemovable
205 /// returns true, this function and getLocForRead completely describe the memory
206 /// operations for this instruction.
getLocForWrite(Instruction * Inst,const TargetLibraryInfo & TLI)207 static MemoryLocation getLocForWrite(Instruction *Inst,
208                                      const TargetLibraryInfo &TLI) {
209   if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
210     return MemoryLocation::get(SI);
211 
212   // memcpy/memmove/memset.
213   if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst))
214     return MemoryLocation::getForDest(MI);
215 
216   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
217     switch (II->getIntrinsicID()) {
218     default:
219       return MemoryLocation(); // Unhandled intrinsic.
220     case Intrinsic::init_trampoline:
221       return MemoryLocation::getAfter(II->getArgOperand(0));
222     case Intrinsic::masked_store:
223       return MemoryLocation::getForArgument(II, 1, TLI);
224     case Intrinsic::lifetime_end: {
225       uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
226       return MemoryLocation(II->getArgOperand(1), Len);
227     }
228     }
229   }
230   if (auto *CB = dyn_cast<CallBase>(Inst))
231     // All the supported TLI functions so far happen to have dest as their
232     // first argument.
233     return MemoryLocation::getAfter(CB->getArgOperand(0));
234   return MemoryLocation();
235 }
236 
237 /// If the value of this instruction and the memory it writes to is unused, may
238 /// we delete this instruction?
isRemovable(Instruction * I)239 static bool isRemovable(Instruction *I) {
240   // Don't remove volatile/atomic stores.
241   if (StoreInst *SI = dyn_cast<StoreInst>(I))
242     return SI->isUnordered();
243 
244   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
245     switch (II->getIntrinsicID()) {
246     default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
247     case Intrinsic::lifetime_end:
248       // Never remove dead lifetime_end's, e.g. because it is followed by a
249       // free.
250       return false;
251     case Intrinsic::init_trampoline:
252       // Always safe to remove init_trampoline.
253       return true;
254     case Intrinsic::memset:
255     case Intrinsic::memmove:
256     case Intrinsic::memcpy:
257     case Intrinsic::memcpy_inline:
258       // Don't remove volatile memory intrinsics.
259       return !cast<MemIntrinsic>(II)->isVolatile();
260     case Intrinsic::memcpy_element_unordered_atomic:
261     case Intrinsic::memmove_element_unordered_atomic:
262     case Intrinsic::memset_element_unordered_atomic:
263     case Intrinsic::masked_store:
264       return true;
265     }
266   }
267 
268   // note: only get here for calls with analyzable writes - i.e. libcalls
269   if (auto *CB = dyn_cast<CallBase>(I))
270     return CB->use_empty();
271 
272   return false;
273 }
274 
275 /// Returns true if the end of this instruction can be safely shortened in
276 /// length.
isShortenableAtTheEnd(Instruction * I)277 static bool isShortenableAtTheEnd(Instruction *I) {
278   // Don't shorten stores for now
279   if (isa<StoreInst>(I))
280     return false;
281 
282   if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
283     switch (II->getIntrinsicID()) {
284       default: return false;
285       case Intrinsic::memset:
286       case Intrinsic::memcpy:
287       case Intrinsic::memcpy_element_unordered_atomic:
288       case Intrinsic::memset_element_unordered_atomic:
289         // Do shorten memory intrinsics.
290         // FIXME: Add memmove if it's also safe to transform.
291         return true;
292     }
293   }
294 
295   // Don't shorten libcalls calls for now.
296 
297   return false;
298 }
299 
300 /// Returns true if the beginning of this instruction can be safely shortened
301 /// in length.
isShortenableAtTheBeginning(Instruction * I)302 static bool isShortenableAtTheBeginning(Instruction *I) {
303   // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
304   // easily done by offsetting the source address.
305   return isa<AnyMemSetInst>(I);
306 }
307 
getPointerSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,const Function * F)308 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
309                                const TargetLibraryInfo &TLI,
310                                const Function *F) {
311   uint64_t Size;
312   ObjectSizeOpts Opts;
313   Opts.NullIsUnknownSize = NullPointerIsDefined(F);
314 
315   if (getObjectSize(V, Size, DL, &TLI, Opts))
316     return Size;
317   return MemoryLocation::UnknownSize;
318 }
319 
320 namespace {
321 
322 enum OverwriteResult {
323   OW_Begin,
324   OW_Complete,
325   OW_End,
326   OW_PartialEarlierWithFullLater,
327   OW_MaybePartial,
328   OW_Unknown
329 };
330 
331 } // end anonymous namespace
332 
333 /// Check if two instruction are masked stores that completely
334 /// overwrite one another. More specifically, \p Later has to
335 /// overwrite \p Earlier.
isMaskedStoreOverwrite(const Instruction * Later,const Instruction * Earlier,BatchAAResults & AA)336 static OverwriteResult isMaskedStoreOverwrite(const Instruction *Later,
337                                               const Instruction *Earlier,
338                                               BatchAAResults &AA) {
339   const auto *IIL = dyn_cast<IntrinsicInst>(Later);
340   const auto *IIE = dyn_cast<IntrinsicInst>(Earlier);
341   if (IIL == nullptr || IIE == nullptr)
342     return OW_Unknown;
343   if (IIL->getIntrinsicID() != Intrinsic::masked_store ||
344       IIE->getIntrinsicID() != Intrinsic::masked_store)
345     return OW_Unknown;
346   // Pointers.
347   Value *LP = IIL->getArgOperand(1)->stripPointerCasts();
348   Value *EP = IIE->getArgOperand(1)->stripPointerCasts();
349   if (LP != EP && !AA.isMustAlias(LP, EP))
350     return OW_Unknown;
351   // Masks.
352   // TODO: check that Later's mask is a superset of the Earlier's mask.
353   if (IIL->getArgOperand(3) != IIE->getArgOperand(3))
354     return OW_Unknown;
355   return OW_Complete;
356 }
357 
358 /// Return 'OW_Complete' if a store to the 'Later' location completely
359 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
360 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
361 /// beginning of the 'Earlier' location is overwritten by 'Later'.
362 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
363 /// overwritten by a latter (smaller) store which doesn't write outside the big
364 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
365 /// NOTE: This function must only be called if both \p Later and \p Earlier
366 /// write to the same underlying object with valid \p EarlierOff and \p
367 /// LaterOff.
isPartialOverwrite(const MemoryLocation & Later,const MemoryLocation & Earlier,int64_t EarlierOff,int64_t LaterOff,Instruction * DepWrite,InstOverlapIntervalsTy & IOL)368 static OverwriteResult isPartialOverwrite(const MemoryLocation &Later,
369                                           const MemoryLocation &Earlier,
370                                           int64_t EarlierOff, int64_t LaterOff,
371                                           Instruction *DepWrite,
372                                           InstOverlapIntervalsTy &IOL) {
373   const uint64_t LaterSize = Later.Size.getValue();
374   const uint64_t EarlierSize = Earlier.Size.getValue();
375   // We may now overlap, although the overlap is not complete. There might also
376   // be other incomplete overlaps, and together, they might cover the complete
377   // earlier write.
378   // Note: The correctness of this logic depends on the fact that this function
379   // is not even called providing DepWrite when there are any intervening reads.
380   if (EnablePartialOverwriteTracking &&
381       LaterOff < int64_t(EarlierOff + EarlierSize) &&
382       int64_t(LaterOff + LaterSize) >= EarlierOff) {
383 
384     // Insert our part of the overlap into the map.
385     auto &IM = IOL[DepWrite];
386     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
387                       << ", " << int64_t(EarlierOff + EarlierSize)
388                       << ") Later [" << LaterOff << ", "
389                       << int64_t(LaterOff + LaterSize) << ")\n");
390 
391     // Make sure that we only insert non-overlapping intervals and combine
392     // adjacent intervals. The intervals are stored in the map with the ending
393     // offset as the key (in the half-open sense) and the starting offset as
394     // the value.
395     int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
396 
397     // Find any intervals ending at, or after, LaterIntStart which start
398     // before LaterIntEnd.
399     auto ILI = IM.lower_bound(LaterIntStart);
400     if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
401       // This existing interval is overlapped with the current store somewhere
402       // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
403       // intervals and adjusting our start and end.
404       LaterIntStart = std::min(LaterIntStart, ILI->second);
405       LaterIntEnd = std::max(LaterIntEnd, ILI->first);
406       ILI = IM.erase(ILI);
407 
408       // Continue erasing and adjusting our end in case other previous
409       // intervals are also overlapped with the current store.
410       //
411       // |--- ealier 1 ---|  |--- ealier 2 ---|
412       //     |------- later---------|
413       //
414       while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
415         assert(ILI->second > LaterIntStart && "Unexpected interval");
416         LaterIntEnd = std::max(LaterIntEnd, ILI->first);
417         ILI = IM.erase(ILI);
418       }
419     }
420 
421     IM[LaterIntEnd] = LaterIntStart;
422 
423     ILI = IM.begin();
424     if (ILI->second <= EarlierOff &&
425         ILI->first >= int64_t(EarlierOff + EarlierSize)) {
426       LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
427                         << EarlierOff << ", "
428                         << int64_t(EarlierOff + EarlierSize)
429                         << ") Composite Later [" << ILI->second << ", "
430                         << ILI->first << ")\n");
431       ++NumCompletePartials;
432       return OW_Complete;
433     }
434   }
435 
436   // Check for an earlier store which writes to all the memory locations that
437   // the later store writes to.
438   if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
439       int64_t(EarlierOff + EarlierSize) > LaterOff &&
440       uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
441     LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
442                       << EarlierOff << ", "
443                       << int64_t(EarlierOff + EarlierSize)
444                       << ") by a later store [" << LaterOff << ", "
445                       << int64_t(LaterOff + LaterSize) << ")\n");
446     // TODO: Maybe come up with a better name?
447     return OW_PartialEarlierWithFullLater;
448   }
449 
450   // Another interesting case is if the later store overwrites the end of the
451   // earlier store.
452   //
453   //      |--earlier--|
454   //                |--   later   --|
455   //
456   // In this case we may want to trim the size of earlier to avoid generating
457   // writes to addresses which will definitely be overwritten later
458   if (!EnablePartialOverwriteTracking &&
459       (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
460        int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
461     return OW_End;
462 
463   // Finally, we also need to check if the later store overwrites the beginning
464   // of the earlier store.
465   //
466   //                |--earlier--|
467   //      |--   later   --|
468   //
469   // In this case we may want to move the destination address and trim the size
470   // of earlier to avoid generating writes to addresses which will definitely
471   // be overwritten later.
472   if (!EnablePartialOverwriteTracking &&
473       (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
474     assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
475            "Expect to be handled as OW_Complete");
476     return OW_Begin;
477   }
478   // Otherwise, they don't completely overlap.
479   return OW_Unknown;
480 }
481 
482 /// Returns true if the memory which is accessed by the second instruction is not
483 /// modified between the first and the second instruction.
484 /// Precondition: Second instruction must be dominated by the first
485 /// instruction.
486 static bool
memoryIsNotModifiedBetween(Instruction * FirstI,Instruction * SecondI,BatchAAResults & AA,const DataLayout & DL,DominatorTree * DT)487 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
488                            BatchAAResults &AA, const DataLayout &DL,
489                            DominatorTree *DT) {
490   // Do a backwards scan through the CFG from SecondI to FirstI. Look for
491   // instructions which can modify the memory location accessed by SecondI.
492   //
493   // While doing the walk keep track of the address to check. It might be
494   // different in different basic blocks due to PHI translation.
495   using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
496   SmallVector<BlockAddressPair, 16> WorkList;
497   // Keep track of the address we visited each block with. Bail out if we
498   // visit a block with different addresses.
499   DenseMap<BasicBlock *, Value *> Visited;
500 
501   BasicBlock::iterator FirstBBI(FirstI);
502   ++FirstBBI;
503   BasicBlock::iterator SecondBBI(SecondI);
504   BasicBlock *FirstBB = FirstI->getParent();
505   BasicBlock *SecondBB = SecondI->getParent();
506   MemoryLocation MemLoc = MemoryLocation::get(SecondI);
507   auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
508 
509   // Start checking the SecondBB.
510   WorkList.push_back(
511       std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
512   bool isFirstBlock = true;
513 
514   // Check all blocks going backward until we reach the FirstBB.
515   while (!WorkList.empty()) {
516     BlockAddressPair Current = WorkList.pop_back_val();
517     BasicBlock *B = Current.first;
518     PHITransAddr &Addr = Current.second;
519     Value *Ptr = Addr.getAddr();
520 
521     // Ignore instructions before FirstI if this is the FirstBB.
522     BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
523 
524     BasicBlock::iterator EI;
525     if (isFirstBlock) {
526       // Ignore instructions after SecondI if this is the first visit of SecondBB.
527       assert(B == SecondBB && "first block is not the store block");
528       EI = SecondBBI;
529       isFirstBlock = false;
530     } else {
531       // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
532       // In this case we also have to look at instructions after SecondI.
533       EI = B->end();
534     }
535     for (; BI != EI; ++BI) {
536       Instruction *I = &*BI;
537       if (I->mayWriteToMemory() && I != SecondI)
538         if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
539           return false;
540     }
541     if (B != FirstBB) {
542       assert(B != &FirstBB->getParent()->getEntryBlock() &&
543           "Should not hit the entry block because SI must be dominated by LI");
544       for (BasicBlock *Pred : predecessors(B)) {
545         PHITransAddr PredAddr = Addr;
546         if (PredAddr.NeedsPHITranslationFromBlock(B)) {
547           if (!PredAddr.IsPotentiallyPHITranslatable())
548             return false;
549           if (PredAddr.PHITranslateValue(B, Pred, DT, false))
550             return false;
551         }
552         Value *TranslatedPtr = PredAddr.getAddr();
553         auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
554         if (!Inserted.second) {
555           // We already visited this block before. If it was with a different
556           // address - bail out!
557           if (TranslatedPtr != Inserted.first->second)
558             return false;
559           // ... otherwise just skip it.
560           continue;
561         }
562         WorkList.push_back(std::make_pair(Pred, PredAddr));
563       }
564     }
565   }
566   return true;
567 }
568 
tryToShorten(Instruction * EarlierWrite,int64_t & EarlierStart,uint64_t & EarlierSize,int64_t LaterStart,uint64_t LaterSize,bool IsOverwriteEnd)569 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierStart,
570                          uint64_t &EarlierSize, int64_t LaterStart,
571                          uint64_t LaterSize, bool IsOverwriteEnd) {
572   auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
573   Align PrefAlign = EarlierIntrinsic->getDestAlign().valueOrOne();
574 
575   // We assume that memet/memcpy operates in chunks of the "largest" native
576   // type size and aligned on the same value. That means optimal start and size
577   // of memset/memcpy should be modulo of preferred alignment of that type. That
578   // is it there is no any sense in trying to reduce store size any further
579   // since any "extra" stores comes for free anyway.
580   // On the other hand, maximum alignment we can achieve is limited by alignment
581   // of initial store.
582 
583   // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
584   // "largest" native type.
585   // Note: What is the proper way to get that value?
586   // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
587   // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
588 
589   int64_t ToRemoveStart = 0;
590   uint64_t ToRemoveSize = 0;
591   // Compute start and size of the region to remove. Make sure 'PrefAlign' is
592   // maintained on the remaining store.
593   if (IsOverwriteEnd) {
594     // Calculate required adjustment for 'LaterStart'in order to keep remaining
595     // store size aligned on 'PerfAlign'.
596     uint64_t Off =
597         offsetToAlignment(uint64_t(LaterStart - EarlierStart), PrefAlign);
598     ToRemoveStart = LaterStart + Off;
599     if (EarlierSize <= uint64_t(ToRemoveStart - EarlierStart))
600       return false;
601     ToRemoveSize = EarlierSize - uint64_t(ToRemoveStart - EarlierStart);
602   } else {
603     ToRemoveStart = EarlierStart;
604     assert(LaterSize >= uint64_t(EarlierStart - LaterStart) &&
605            "Not overlapping accesses?");
606     ToRemoveSize = LaterSize - uint64_t(EarlierStart - LaterStart);
607     // Calculate required adjustment for 'ToRemoveSize'in order to keep
608     // start of the remaining store aligned on 'PerfAlign'.
609     uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
610     if (Off != 0) {
611       if (ToRemoveSize <= (PrefAlign.value() - Off))
612         return false;
613       ToRemoveSize -= PrefAlign.value() - Off;
614     }
615     assert(isAligned(PrefAlign, ToRemoveSize) &&
616            "Should preserve selected alignment");
617   }
618 
619   assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
620   assert(EarlierSize > ToRemoveSize && "Can't remove more than original size");
621 
622   uint64_t NewSize = EarlierSize - ToRemoveSize;
623   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
624     // When shortening an atomic memory intrinsic, the newly shortened
625     // length must remain an integer multiple of the element size.
626     const uint32_t ElementSize = AMI->getElementSizeInBytes();
627     if (0 != NewSize % ElementSize)
628       return false;
629   }
630 
631   LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  OW "
632                     << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
633                     << *EarlierWrite << "\n  KILLER [" << ToRemoveStart << ", "
634                     << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
635 
636   Value *EarlierWriteLength = EarlierIntrinsic->getLength();
637   Value *TrimmedLength =
638       ConstantInt::get(EarlierWriteLength->getType(), NewSize);
639   EarlierIntrinsic->setLength(TrimmedLength);
640   EarlierIntrinsic->setDestAlignment(PrefAlign);
641 
642   if (!IsOverwriteEnd) {
643     Value *Indices[1] = {
644         ConstantInt::get(EarlierWriteLength->getType(), ToRemoveSize)};
645     GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
646         EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
647         EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
648     NewDestGEP->setDebugLoc(EarlierIntrinsic->getDebugLoc());
649     EarlierIntrinsic->setDest(NewDestGEP);
650   }
651 
652   // Finally update start and size of earlier access.
653   if (!IsOverwriteEnd)
654     EarlierStart += ToRemoveSize;
655   EarlierSize = NewSize;
656 
657   return true;
658 }
659 
tryToShortenEnd(Instruction * EarlierWrite,OverlapIntervalsTy & IntervalMap,int64_t & EarlierStart,uint64_t & EarlierSize)660 static bool tryToShortenEnd(Instruction *EarlierWrite,
661                             OverlapIntervalsTy &IntervalMap,
662                             int64_t &EarlierStart, uint64_t &EarlierSize) {
663   if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
664     return false;
665 
666   OverlapIntervalsTy::iterator OII = --IntervalMap.end();
667   int64_t LaterStart = OII->second;
668   uint64_t LaterSize = OII->first - LaterStart;
669 
670   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
671 
672   if (LaterStart > EarlierStart &&
673       // Note: "LaterStart - EarlierStart" is known to be positive due to
674       // preceding check.
675       (uint64_t)(LaterStart - EarlierStart) < EarlierSize &&
676       // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
677       // be non negative due to preceding checks.
678       LaterSize >= EarlierSize - (uint64_t)(LaterStart - EarlierStart)) {
679     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
680                      LaterSize, true)) {
681       IntervalMap.erase(OII);
682       return true;
683     }
684   }
685   return false;
686 }
687 
tryToShortenBegin(Instruction * EarlierWrite,OverlapIntervalsTy & IntervalMap,int64_t & EarlierStart,uint64_t & EarlierSize)688 static bool tryToShortenBegin(Instruction *EarlierWrite,
689                               OverlapIntervalsTy &IntervalMap,
690                               int64_t &EarlierStart, uint64_t &EarlierSize) {
691   if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
692     return false;
693 
694   OverlapIntervalsTy::iterator OII = IntervalMap.begin();
695   int64_t LaterStart = OII->second;
696   uint64_t LaterSize = OII->first - LaterStart;
697 
698   assert(OII->first - LaterStart >= 0 && "Size expected to be positive");
699 
700   if (LaterStart <= EarlierStart &&
701       // Note: "EarlierStart - LaterStart" is known to be non negative due to
702       // preceding check.
703       LaterSize > (uint64_t)(EarlierStart - LaterStart)) {
704     // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
705     // positive due to preceding checks.
706     assert(LaterSize - (uint64_t)(EarlierStart - LaterStart) < EarlierSize &&
707            "Should have been handled as OW_Complete");
708     if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
709                      LaterSize, false)) {
710       IntervalMap.erase(OII);
711       return true;
712     }
713   }
714   return false;
715 }
716 
removePartiallyOverlappedStores(const DataLayout & DL,InstOverlapIntervalsTy & IOL,const TargetLibraryInfo & TLI)717 static bool removePartiallyOverlappedStores(const DataLayout &DL,
718                                             InstOverlapIntervalsTy &IOL,
719                                             const TargetLibraryInfo &TLI) {
720   bool Changed = false;
721   for (auto OI : IOL) {
722     Instruction *EarlierWrite = OI.first;
723     MemoryLocation Loc = getLocForWrite(EarlierWrite, TLI);
724     assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
725 
726     const Value *Ptr = Loc.Ptr->stripPointerCasts();
727     int64_t EarlierStart = 0;
728     uint64_t EarlierSize = Loc.Size.getValue();
729     GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
730     OverlapIntervalsTy &IntervalMap = OI.second;
731     Changed |=
732         tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
733     if (IntervalMap.empty())
734       continue;
735     Changed |=
736         tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
737   }
738   return Changed;
739 }
740 
tryToMergePartialOverlappingStores(StoreInst * Earlier,StoreInst * Later,int64_t InstWriteOffset,int64_t DepWriteOffset,const DataLayout & DL,BatchAAResults & AA,DominatorTree * DT)741 static Constant *tryToMergePartialOverlappingStores(
742     StoreInst *Earlier, StoreInst *Later, int64_t InstWriteOffset,
743     int64_t DepWriteOffset, const DataLayout &DL, BatchAAResults &AA,
744     DominatorTree *DT) {
745 
746   if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
747       DL.typeSizeEqualsStoreSize(Earlier->getValueOperand()->getType()) &&
748       Later && isa<ConstantInt>(Later->getValueOperand()) &&
749       DL.typeSizeEqualsStoreSize(Later->getValueOperand()->getType()) &&
750       memoryIsNotModifiedBetween(Earlier, Later, AA, DL, DT)) {
751     // If the store we find is:
752     //   a) partially overwritten by the store to 'Loc'
753     //   b) the later store is fully contained in the earlier one and
754     //   c) they both have a constant value
755     //   d) none of the two stores need padding
756     // Merge the two stores, replacing the earlier store's value with a
757     // merge of both values.
758     // TODO: Deal with other constant types (vectors, etc), and probably
759     // some mem intrinsics (if needed)
760 
761     APInt EarlierValue =
762         cast<ConstantInt>(Earlier->getValueOperand())->getValue();
763     APInt LaterValue = cast<ConstantInt>(Later->getValueOperand())->getValue();
764     unsigned LaterBits = LaterValue.getBitWidth();
765     assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
766     LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
767 
768     // Offset of the smaller store inside the larger store
769     unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
770     unsigned LShiftAmount = DL.isBigEndian() ? EarlierValue.getBitWidth() -
771                                                    BitOffsetDiff - LaterBits
772                                              : BitOffsetDiff;
773     APInt Mask = APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
774                                    LShiftAmount + LaterBits);
775     // Clear the bits we'll be replacing, then OR with the smaller
776     // store, shifted appropriately.
777     APInt Merged = (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
778     LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n  Earlier: " << *Earlier
779                       << "\n  Later: " << *Later
780                       << "\n  Merged Value: " << Merged << '\n');
781     return ConstantInt::get(Earlier->getValueOperand()->getType(), Merged);
782   }
783   return nullptr;
784 }
785 
786 namespace {
787 // Returns true if \p I is an intrisnic that does not read or write memory.
isNoopIntrinsic(Instruction * I)788 bool isNoopIntrinsic(Instruction *I) {
789   if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
790     switch (II->getIntrinsicID()) {
791     case Intrinsic::lifetime_start:
792     case Intrinsic::lifetime_end:
793     case Intrinsic::invariant_end:
794     case Intrinsic::launder_invariant_group:
795     case Intrinsic::assume:
796       return true;
797     case Intrinsic::dbg_addr:
798     case Intrinsic::dbg_declare:
799     case Intrinsic::dbg_label:
800     case Intrinsic::dbg_value:
801       llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
802     default:
803       return false;
804     }
805   }
806   return false;
807 }
808 
809 // Check if we can ignore \p D for DSE.
canSkipDef(MemoryDef * D,bool DefVisibleToCaller)810 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
811   Instruction *DI = D->getMemoryInst();
812   // Calls that only access inaccessible memory cannot read or write any memory
813   // locations we consider for elimination.
814   if (auto *CB = dyn_cast<CallBase>(DI))
815     if (CB->onlyAccessesInaccessibleMemory())
816       return true;
817 
818   // We can eliminate stores to locations not visible to the caller across
819   // throwing instructions.
820   if (DI->mayThrow() && !DefVisibleToCaller)
821     return true;
822 
823   // We can remove the dead stores, irrespective of the fence and its ordering
824   // (release/acquire/seq_cst). Fences only constraints the ordering of
825   // already visible stores, it does not make a store visible to other
826   // threads. So, skipping over a fence does not change a store from being
827   // dead.
828   if (isa<FenceInst>(DI))
829     return true;
830 
831   // Skip intrinsics that do not really read or modify memory.
832   if (isNoopIntrinsic(D->getMemoryInst()))
833     return true;
834 
835   return false;
836 }
837 
838 struct DSEState {
839   Function &F;
840   AliasAnalysis &AA;
841 
842   /// The single BatchAA instance that is used to cache AA queries. It will
843   /// not be invalidated over the whole run. This is safe, because:
844   /// 1. Only memory writes are removed, so the alias cache for memory
845   ///    locations remains valid.
846   /// 2. No new instructions are added (only instructions removed), so cached
847   ///    information for a deleted value cannot be accessed by a re-used new
848   ///    value pointer.
849   BatchAAResults BatchAA;
850 
851   MemorySSA &MSSA;
852   DominatorTree &DT;
853   PostDominatorTree &PDT;
854   const TargetLibraryInfo &TLI;
855   const DataLayout &DL;
856 
857   // All MemoryDefs that potentially could kill other MemDefs.
858   SmallVector<MemoryDef *, 64> MemDefs;
859   // Any that should be skipped as they are already deleted
860   SmallPtrSet<MemoryAccess *, 4> SkipStores;
861   // Keep track of all of the objects that are invisible to the caller before
862   // the function returns.
863   // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
864   DenseMap<const Value *, bool> InvisibleToCallerBeforeRet;
865   // Keep track of all of the objects that are invisible to the caller after
866   // the function returns.
867   DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
868   // Keep track of blocks with throwing instructions not modeled in MemorySSA.
869   SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
870   // Post-order numbers for each basic block. Used to figure out if memory
871   // accesses are executed before another access.
872   DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
873 
874   /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
875   /// basic block.
876   DenseMap<BasicBlock *, InstOverlapIntervalsTy> IOLs;
877 
DSEState__anon065d15520211::DSEState878   DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
879            PostDominatorTree &PDT, const TargetLibraryInfo &TLI)
880       : F(F), AA(AA), BatchAA(AA), MSSA(MSSA), DT(DT), PDT(PDT), TLI(TLI),
881         DL(F.getParent()->getDataLayout()) {}
882 
get__anon065d15520211::DSEState883   static DSEState get(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
884                       DominatorTree &DT, PostDominatorTree &PDT,
885                       const TargetLibraryInfo &TLI) {
886     DSEState State(F, AA, MSSA, DT, PDT, TLI);
887     // Collect blocks with throwing instructions not modeled in MemorySSA and
888     // alloc-like objects.
889     unsigned PO = 0;
890     for (BasicBlock *BB : post_order(&F)) {
891       State.PostOrderNumbers[BB] = PO++;
892       for (Instruction &I : *BB) {
893         MemoryAccess *MA = MSSA.getMemoryAccess(&I);
894         if (I.mayThrow() && !MA)
895           State.ThrowingBlocks.insert(I.getParent());
896 
897         auto *MD = dyn_cast_or_null<MemoryDef>(MA);
898         if (MD && State.MemDefs.size() < MemorySSADefsPerBlockLimit &&
899             (State.getLocForWriteEx(&I) || State.isMemTerminatorInst(&I)))
900           State.MemDefs.push_back(MD);
901       }
902     }
903 
904     // Treat byval or inalloca arguments the same as Allocas, stores to them are
905     // dead at the end of the function.
906     for (Argument &AI : F.args())
907       if (AI.hasPassPointeeByValueCopyAttr()) {
908         // For byval, the caller doesn't know the address of the allocation.
909         if (AI.hasByValAttr())
910           State.InvisibleToCallerBeforeRet.insert({&AI, true});
911         State.InvisibleToCallerAfterRet.insert({&AI, true});
912       }
913 
914     return State;
915   }
916 
917   /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
918   /// instruction) completely overwrites a store to the 'Earlier' location.
919   /// (by \p EarlierI instruction).
920   /// Return OW_MaybePartial if \p Later does not completely overwrite
921   /// \p Earlier, but they both write to the same underlying object. In that
922   /// case, use isPartialOverwrite to check if \p Later partially overwrites
923   /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
924   OverwriteResult
isOverwrite__anon065d15520211::DSEState925   isOverwrite(const Instruction *LaterI, const Instruction *EarlierI,
926               const MemoryLocation &Later, const MemoryLocation &Earlier,
927               int64_t &EarlierOff, int64_t &LaterOff) {
928     // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
929     // get imprecise values here, though (except for unknown sizes).
930     if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise()) {
931       // In case no constant size is known, try to an IR values for the number
932       // of bytes written and check if they match.
933       const auto *LaterMemI = dyn_cast<MemIntrinsic>(LaterI);
934       const auto *EarlierMemI = dyn_cast<MemIntrinsic>(EarlierI);
935       if (LaterMemI && EarlierMemI) {
936         const Value *LaterV = LaterMemI->getLength();
937         const Value *EarlierV = EarlierMemI->getLength();
938         if (LaterV == EarlierV && BatchAA.isMustAlias(Earlier, Later))
939           return OW_Complete;
940       }
941 
942       // Masked stores have imprecise locations, but we can reason about them
943       // to some extent.
944       return isMaskedStoreOverwrite(LaterI, EarlierI, BatchAA);
945     }
946 
947     const uint64_t LaterSize = Later.Size.getValue();
948     const uint64_t EarlierSize = Earlier.Size.getValue();
949 
950     // Query the alias information
951     AliasResult AAR = BatchAA.alias(Later, Earlier);
952 
953     // If the start pointers are the same, we just have to compare sizes to see if
954     // the later store was larger than the earlier store.
955     if (AAR == AliasResult::MustAlias) {
956       // Make sure that the Later size is >= the Earlier size.
957       if (LaterSize >= EarlierSize)
958         return OW_Complete;
959     }
960 
961     // If we hit a partial alias we may have a full overwrite
962     if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
963       int32_t Off = AAR.getOffset();
964       if (Off >= 0 && (uint64_t)Off + EarlierSize <= LaterSize)
965         return OW_Complete;
966     }
967 
968     // Check to see if the later store is to the entire object (either a global,
969     // an alloca, or a byval/inalloca argument).  If so, then it clearly
970     // overwrites any other store to the same object.
971     const Value *P1 = Earlier.Ptr->stripPointerCasts();
972     const Value *P2 = Later.Ptr->stripPointerCasts();
973     const Value *UO1 = getUnderlyingObject(P1), *UO2 = getUnderlyingObject(P2);
974 
975     // If we can't resolve the same pointers to the same object, then we can't
976     // analyze them at all.
977     if (UO1 != UO2)
978       return OW_Unknown;
979 
980     // If the "Later" store is to a recognizable object, get its size.
981     uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, &F);
982     if (ObjectSize != MemoryLocation::UnknownSize)
983       if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
984         return OW_Complete;
985 
986     // Okay, we have stores to two completely different pointers.  Try to
987     // decompose the pointer into a "base + constant_offset" form.  If the base
988     // pointers are equal, then we can reason about the two stores.
989     EarlierOff = 0;
990     LaterOff = 0;
991     const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
992     const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
993 
994     // If the base pointers still differ, we have two completely different stores.
995     if (BP1 != BP2)
996       return OW_Unknown;
997 
998     // The later access completely overlaps the earlier store if and only if
999     // both start and end of the earlier one is "inside" the later one:
1000     //    |<->|--earlier--|<->|
1001     //    |-------later-------|
1002     // Accesses may overlap if and only if start of one of them is "inside"
1003     // another one:
1004     //    |<->|--earlier--|<----->|
1005     //    |-------later-------|
1006     //           OR
1007     //    |----- earlier -----|
1008     //    |<->|---later---|<----->|
1009     //
1010     // We have to be careful here as *Off is signed while *.Size is unsigned.
1011 
1012     // Check if the earlier access starts "not before" the later one.
1013     if (EarlierOff >= LaterOff) {
1014       // If the earlier access ends "not after" the later access then the earlier
1015       // one is completely overwritten by the later one.
1016       if (uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
1017         return OW_Complete;
1018       // If start of the earlier access is "before" end of the later access then
1019       // accesses overlap.
1020       else if ((uint64_t)(EarlierOff - LaterOff) < LaterSize)
1021         return OW_MaybePartial;
1022     }
1023     // If start of the later access is "before" end of the earlier access then
1024     // accesses overlap.
1025     else if ((uint64_t)(LaterOff - EarlierOff) < EarlierSize) {
1026       return OW_MaybePartial;
1027     }
1028 
1029     // Can reach here only if accesses are known not to overlap. There is no
1030     // dedicated code to indicate no overlap so signal "unknown".
1031     return OW_Unknown;
1032   }
1033 
isInvisibleToCallerAfterRet__anon065d15520211::DSEState1034   bool isInvisibleToCallerAfterRet(const Value *V) {
1035     if (isa<AllocaInst>(V))
1036       return true;
1037     auto I = InvisibleToCallerAfterRet.insert({V, false});
1038     if (I.second) {
1039       if (!isInvisibleToCallerBeforeRet(V)) {
1040         I.first->second = false;
1041       } else {
1042         auto *Inst = dyn_cast<Instruction>(V);
1043         if (Inst && isAllocLikeFn(Inst, &TLI))
1044           I.first->second = !PointerMayBeCaptured(V, true, false);
1045       }
1046     }
1047     return I.first->second;
1048   }
1049 
isInvisibleToCallerBeforeRet__anon065d15520211::DSEState1050   bool isInvisibleToCallerBeforeRet(const Value *V) {
1051     if (isa<AllocaInst>(V))
1052       return true;
1053     auto I = InvisibleToCallerBeforeRet.insert({V, false});
1054     if (I.second) {
1055       auto *Inst = dyn_cast<Instruction>(V);
1056       if (Inst && isAllocLikeFn(Inst, &TLI))
1057         // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1058         // with the killing MemoryDef. But we refrain from doing so for now to
1059         // limit compile-time and this does not cause any changes to the number
1060         // of stores removed on a large test set in practice.
1061         I.first->second = !PointerMayBeCaptured(V, false, true);
1062     }
1063     return I.first->second;
1064   }
1065 
getLocForWriteEx__anon065d15520211::DSEState1066   Optional<MemoryLocation> getLocForWriteEx(Instruction *I) const {
1067     if (!I->mayWriteToMemory())
1068       return None;
1069 
1070     if (auto *MTI = dyn_cast<AnyMemIntrinsic>(I))
1071       return {MemoryLocation::getForDest(MTI)};
1072 
1073     if (auto *CB = dyn_cast<CallBase>(I)) {
1074       // If the functions may write to memory we do not know about, bail out.
1075       if (!CB->onlyAccessesArgMemory() &&
1076           !CB->onlyAccessesInaccessibleMemOrArgMem())
1077         return None;
1078 
1079       LibFunc LF;
1080       if (TLI.getLibFunc(*CB, LF) && TLI.has(LF)) {
1081         switch (LF) {
1082         case LibFunc_strcpy:
1083         case LibFunc_strncpy:
1084         case LibFunc_strcat:
1085         case LibFunc_strncat:
1086           return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1087         default:
1088           break;
1089         }
1090       }
1091       switch (CB->getIntrinsicID()) {
1092       case Intrinsic::init_trampoline:
1093         return {MemoryLocation::getAfter(CB->getArgOperand(0))};
1094       case Intrinsic::masked_store:
1095         return {MemoryLocation::getForArgument(CB, 1, TLI)};
1096       default:
1097         break;
1098       }
1099       return None;
1100     }
1101 
1102     return MemoryLocation::getOrNone(I);
1103   }
1104 
1105   /// Returns true if \p UseInst completely overwrites \p DefLoc
1106   /// (stored by \p DefInst).
isCompleteOverwrite__anon065d15520211::DSEState1107   bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1108                            Instruction *UseInst) {
1109     // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1110     // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1111     // MemoryDef.
1112     if (!UseInst->mayWriteToMemory())
1113       return false;
1114 
1115     if (auto *CB = dyn_cast<CallBase>(UseInst))
1116       if (CB->onlyAccessesInaccessibleMemory())
1117         return false;
1118 
1119     int64_t InstWriteOffset, DepWriteOffset;
1120     if (auto CC = getLocForWriteEx(UseInst))
1121       return isOverwrite(UseInst, DefInst, *CC, DefLoc, DepWriteOffset,
1122                          InstWriteOffset) == OW_Complete;
1123     return false;
1124   }
1125 
1126   /// Returns true if \p Def is not read before returning from the function.
isWriteAtEndOfFunction__anon065d15520211::DSEState1127   bool isWriteAtEndOfFunction(MemoryDef *Def) {
1128     LLVM_DEBUG(dbgs() << "  Check if def " << *Def << " ("
1129                       << *Def->getMemoryInst()
1130                       << ") is at the end the function \n");
1131 
1132     auto MaybeLoc = getLocForWriteEx(Def->getMemoryInst());
1133     if (!MaybeLoc) {
1134       LLVM_DEBUG(dbgs() << "  ... could not get location for write.\n");
1135       return false;
1136     }
1137 
1138     SmallVector<MemoryAccess *, 4> WorkList;
1139     SmallPtrSet<MemoryAccess *, 8> Visited;
1140     auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1141       if (!Visited.insert(Acc).second)
1142         return;
1143       for (Use &U : Acc->uses())
1144         WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1145     };
1146     PushMemUses(Def);
1147     for (unsigned I = 0; I < WorkList.size(); I++) {
1148       if (WorkList.size() >= MemorySSAScanLimit) {
1149         LLVM_DEBUG(dbgs() << "  ... hit exploration limit.\n");
1150         return false;
1151       }
1152 
1153       MemoryAccess *UseAccess = WorkList[I];
1154       // Simply adding the users of MemoryPhi to the worklist is not enough,
1155       // because we might miss read clobbers in different iterations of a loop,
1156       // for example.
1157       // TODO: Add support for phi translation to handle the loop case.
1158       if (isa<MemoryPhi>(UseAccess))
1159         return false;
1160 
1161       // TODO: Checking for aliasing is expensive. Consider reducing the amount
1162       // of times this is called and/or caching it.
1163       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1164       if (isReadClobber(*MaybeLoc, UseInst)) {
1165         LLVM_DEBUG(dbgs() << "  ... hit read clobber " << *UseInst << ".\n");
1166         return false;
1167       }
1168 
1169       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1170         PushMemUses(UseDef);
1171     }
1172     return true;
1173   }
1174 
1175   /// If \p I is a memory  terminator like llvm.lifetime.end or free, return a
1176   /// pair with the MemoryLocation terminated by \p I and a boolean flag
1177   /// indicating whether \p I is a free-like call.
1178   Optional<std::pair<MemoryLocation, bool>>
getLocForTerminator__anon065d15520211::DSEState1179   getLocForTerminator(Instruction *I) const {
1180     uint64_t Len;
1181     Value *Ptr;
1182     if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1183                                                       m_Value(Ptr))))
1184       return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1185 
1186     if (auto *CB = dyn_cast<CallBase>(I)) {
1187       if (isFreeCall(I, &TLI))
1188         return {std::make_pair(MemoryLocation::getAfter(CB->getArgOperand(0)),
1189                                true)};
1190     }
1191 
1192     return None;
1193   }
1194 
1195   /// Returns true if \p I is a memory terminator instruction like
1196   /// llvm.lifetime.end or free.
isMemTerminatorInst__anon065d15520211::DSEState1197   bool isMemTerminatorInst(Instruction *I) const {
1198     IntrinsicInst *II = dyn_cast<IntrinsicInst>(I);
1199     return (II && II->getIntrinsicID() == Intrinsic::lifetime_end) ||
1200            isFreeCall(I, &TLI);
1201   }
1202 
1203   /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1204   /// instruction \p AccessI.
isMemTerminator__anon065d15520211::DSEState1205   bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1206                        Instruction *MaybeTerm) {
1207     Optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1208         getLocForTerminator(MaybeTerm);
1209 
1210     if (!MaybeTermLoc)
1211       return false;
1212 
1213     // If the terminator is a free-like call, all accesses to the underlying
1214     // object can be considered terminated.
1215     if (getUnderlyingObject(Loc.Ptr) !=
1216         getUnderlyingObject(MaybeTermLoc->first.Ptr))
1217       return false;
1218 
1219     auto TermLoc = MaybeTermLoc->first;
1220     if (MaybeTermLoc->second) {
1221       const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1222       return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1223     }
1224     int64_t InstWriteOffset, DepWriteOffset;
1225     return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, DepWriteOffset,
1226                        InstWriteOffset) == OW_Complete;
1227   }
1228 
1229   // Returns true if \p Use may read from \p DefLoc.
isReadClobber__anon065d15520211::DSEState1230   bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1231     if (isNoopIntrinsic(UseInst))
1232       return false;
1233 
1234     // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1235     // treated as read clobber.
1236     if (auto SI = dyn_cast<StoreInst>(UseInst))
1237       return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1238 
1239     if (!UseInst->mayReadFromMemory())
1240       return false;
1241 
1242     if (auto *CB = dyn_cast<CallBase>(UseInst))
1243       if (CB->onlyAccessesInaccessibleMemory())
1244         return false;
1245 
1246     // NOTE: For calls, the number of stores removed could be slightly improved
1247     // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1248     // be expensive compared to the benefits in practice. For now, avoid more
1249     // expensive analysis to limit compile-time.
1250     return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1251   }
1252 
1253   /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1254   /// loop. In particular, this guarantees that it only references a single
1255   /// MemoryLocation during execution of the containing function.
IsGuaranteedLoopInvariant__anon065d15520211::DSEState1256   bool IsGuaranteedLoopInvariant(Value *Ptr) {
1257     auto IsGuaranteedLoopInvariantBase = [this](Value *Ptr) {
1258       Ptr = Ptr->stripPointerCasts();
1259       if (auto *I = dyn_cast<Instruction>(Ptr)) {
1260         if (isa<AllocaInst>(Ptr))
1261           return true;
1262 
1263         if (isAllocLikeFn(I, &TLI))
1264           return true;
1265 
1266         return false;
1267       }
1268       return true;
1269     };
1270 
1271     Ptr = Ptr->stripPointerCasts();
1272     if (auto *I = dyn_cast<Instruction>(Ptr)) {
1273       if (I->getParent()->isEntryBlock())
1274         return true;
1275     }
1276     if (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
1277       return IsGuaranteedLoopInvariantBase(GEP->getPointerOperand()) &&
1278              GEP->hasAllConstantIndices();
1279     }
1280     return IsGuaranteedLoopInvariantBase(Ptr);
1281   }
1282 
1283   // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1284   // no read access between them or on any other path to a function exit block
1285   // if \p DefLoc is not accessible after the function returns. If there is no
1286   // such MemoryDef, return None. The returned value may not (completely)
1287   // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1288   // MemoryUse (read).
1289   Optional<MemoryAccess *>
getDomMemoryDef__anon065d15520211::DSEState1290   getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1291                   const MemoryLocation &DefLoc, const Value *DefUO,
1292                   unsigned &ScanLimit, unsigned &WalkerStepLimit,
1293                   bool IsMemTerm, unsigned &PartialLimit) {
1294     if (ScanLimit == 0 || WalkerStepLimit == 0) {
1295       LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1296       return None;
1297     }
1298 
1299     MemoryAccess *Current = StartAccess;
1300     Instruction *KillingI = KillingDef->getMemoryInst();
1301     bool StepAgain;
1302     LLVM_DEBUG(dbgs() << "  trying to get dominating access\n");
1303 
1304     // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1305     Optional<MemoryLocation> CurrentLoc;
1306     do {
1307       StepAgain = false;
1308       LLVM_DEBUG({
1309         dbgs() << "   visiting " << *Current;
1310         if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1311           dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1312                  << ")";
1313         dbgs() << "\n";
1314       });
1315 
1316       // Reached TOP.
1317       if (MSSA.isLiveOnEntryDef(Current)) {
1318         LLVM_DEBUG(dbgs() << "   ...  found LiveOnEntryDef\n");
1319         return None;
1320       }
1321 
1322       // Cost of a step. Accesses in the same block are more likely to be valid
1323       // candidates for elimination, hence consider them cheaper.
1324       unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1325                               ? MemorySSASameBBStepCost
1326                               : MemorySSAOtherBBStepCost;
1327       if (WalkerStepLimit <= StepCost) {
1328         LLVM_DEBUG(dbgs() << "   ...  hit walker step limit\n");
1329         return None;
1330       }
1331       WalkerStepLimit -= StepCost;
1332 
1333       // Return for MemoryPhis. They cannot be eliminated directly and the
1334       // caller is responsible for traversing them.
1335       if (isa<MemoryPhi>(Current)) {
1336         LLVM_DEBUG(dbgs() << "   ...  found MemoryPhi\n");
1337         return Current;
1338       }
1339 
1340       // Below, check if CurrentDef is a valid candidate to be eliminated by
1341       // KillingDef. If it is not, check the next candidate.
1342       MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1343       Instruction *CurrentI = CurrentDef->getMemoryInst();
1344 
1345       if (canSkipDef(CurrentDef, !isInvisibleToCallerBeforeRet(DefUO))) {
1346         StepAgain = true;
1347         Current = CurrentDef->getDefiningAccess();
1348         continue;
1349       }
1350 
1351       // Before we try to remove anything, check for any extra throwing
1352       // instructions that block us from DSEing
1353       if (mayThrowBetween(KillingI, CurrentI, DefUO)) {
1354         LLVM_DEBUG(dbgs() << "  ... skip, may throw!\n");
1355         return None;
1356       }
1357 
1358       // Check for anything that looks like it will be a barrier to further
1359       // removal
1360       if (isDSEBarrier(DefUO, CurrentI)) {
1361         LLVM_DEBUG(dbgs() << "  ... skip, barrier\n");
1362         return None;
1363       }
1364 
1365       // If Current is known to be on path that reads DefLoc or is a read
1366       // clobber, bail out, as the path is not profitable. We skip this check
1367       // for intrinsic calls, because the code knows how to handle memcpy
1368       // intrinsics.
1369       if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(DefLoc, CurrentI))
1370         return None;
1371 
1372       // Quick check if there are direct uses that are read-clobbers.
1373       if (any_of(Current->uses(), [this, &DefLoc, StartAccess](Use &U) {
1374             if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1375               return !MSSA.dominates(StartAccess, UseOrDef) &&
1376                      isReadClobber(DefLoc, UseOrDef->getMemoryInst());
1377             return false;
1378           })) {
1379         LLVM_DEBUG(dbgs() << "   ...  found a read clobber\n");
1380         return None;
1381       }
1382 
1383       // If Current cannot be analyzed or is not removable, check the next
1384       // candidate.
1385       if (!hasAnalyzableMemoryWrite(CurrentI, TLI) || !isRemovable(CurrentI)) {
1386         StepAgain = true;
1387         Current = CurrentDef->getDefiningAccess();
1388         continue;
1389       }
1390 
1391       // If Current does not have an analyzable write location, skip it
1392       CurrentLoc = getLocForWriteEx(CurrentI);
1393       if (!CurrentLoc) {
1394         StepAgain = true;
1395         Current = CurrentDef->getDefiningAccess();
1396         continue;
1397       }
1398 
1399       // AliasAnalysis does not account for loops. Limit elimination to
1400       // candidates for which we can guarantee they always store to the same
1401       // memory location and not multiple locations in a loop.
1402       if (Current->getBlock() != KillingDef->getBlock() &&
1403           !IsGuaranteedLoopInvariant(const_cast<Value *>(CurrentLoc->Ptr))) {
1404         StepAgain = true;
1405         Current = CurrentDef->getDefiningAccess();
1406         WalkerStepLimit -= 1;
1407         continue;
1408       }
1409 
1410       if (IsMemTerm) {
1411         // If the killing def is a memory terminator (e.g. lifetime.end), check
1412         // the next candidate if the current Current does not write the same
1413         // underlying object as the terminator.
1414         if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1415           StepAgain = true;
1416           Current = CurrentDef->getDefiningAccess();
1417         }
1418         continue;
1419       } else {
1420         int64_t InstWriteOffset, DepWriteOffset;
1421         auto OR = isOverwrite(KillingI, CurrentI, DefLoc, *CurrentLoc,
1422                               DepWriteOffset, InstWriteOffset);
1423         // If Current does not write to the same object as KillingDef, check
1424         // the next candidate.
1425         if (OR == OW_Unknown) {
1426           StepAgain = true;
1427           Current = CurrentDef->getDefiningAccess();
1428         } else if (OR == OW_MaybePartial) {
1429           // If KillingDef only partially overwrites Current, check the next
1430           // candidate if the partial step limit is exceeded. This aggressively
1431           // limits the number of candidates for partial store elimination,
1432           // which are less likely to be removable in the end.
1433           if (PartialLimit <= 1) {
1434             StepAgain = true;
1435             Current = CurrentDef->getDefiningAccess();
1436             WalkerStepLimit -= 1;
1437             continue;
1438           }
1439           PartialLimit -= 1;
1440         }
1441       }
1442     } while (StepAgain);
1443 
1444     // Accesses to objects accessible after the function returns can only be
1445     // eliminated if the access is killed along all paths to the exit. Collect
1446     // the blocks with killing (=completely overwriting MemoryDefs) and check if
1447     // they cover all paths from EarlierAccess to any function exit.
1448     SmallPtrSet<Instruction *, 16> KillingDefs;
1449     KillingDefs.insert(KillingDef->getMemoryInst());
1450     MemoryAccess *EarlierAccess = Current;
1451     Instruction *EarlierMemInst =
1452         cast<MemoryDef>(EarlierAccess)->getMemoryInst();
1453     LLVM_DEBUG(dbgs() << "  Checking for reads of " << *EarlierAccess << " ("
1454                       << *EarlierMemInst << ")\n");
1455 
1456     SmallSetVector<MemoryAccess *, 32> WorkList;
1457     auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1458       for (Use &U : Acc->uses())
1459         WorkList.insert(cast<MemoryAccess>(U.getUser()));
1460     };
1461     PushMemUses(EarlierAccess);
1462 
1463     // Optimistically collect all accesses for reads. If we do not find any
1464     // read clobbers, add them to the cache.
1465     SmallPtrSet<MemoryAccess *, 16> KnownNoReads;
1466     if (!EarlierMemInst->mayReadFromMemory())
1467       KnownNoReads.insert(EarlierAccess);
1468     // Check if EarlierDef may be read.
1469     for (unsigned I = 0; I < WorkList.size(); I++) {
1470       MemoryAccess *UseAccess = WorkList[I];
1471 
1472       LLVM_DEBUG(dbgs() << "   " << *UseAccess);
1473       // Bail out if the number of accesses to check exceeds the scan limit.
1474       if (ScanLimit < (WorkList.size() - I)) {
1475         LLVM_DEBUG(dbgs() << "\n    ...  hit scan limit\n");
1476         return None;
1477       }
1478       --ScanLimit;
1479       NumDomMemDefChecks++;
1480       KnownNoReads.insert(UseAccess);
1481 
1482       if (isa<MemoryPhi>(UseAccess)) {
1483         if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1484               return DT.properlyDominates(KI->getParent(),
1485                                           UseAccess->getBlock());
1486             })) {
1487           LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1488           continue;
1489         }
1490         LLVM_DEBUG(dbgs() << "\n    ... adding PHI uses\n");
1491         PushMemUses(UseAccess);
1492         continue;
1493       }
1494 
1495       Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1496       LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1497 
1498       if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1499             return DT.dominates(KI, UseInst);
1500           })) {
1501         LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1502         continue;
1503       }
1504 
1505       // A memory terminator kills all preceeding MemoryDefs and all succeeding
1506       // MemoryAccesses. We do not have to check it's users.
1507       if (isMemTerminator(*CurrentLoc, EarlierMemInst, UseInst)) {
1508         LLVM_DEBUG(
1509             dbgs()
1510             << " ... skipping, memterminator invalidates following accesses\n");
1511         continue;
1512       }
1513 
1514       if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1515         LLVM_DEBUG(dbgs() << "    ... adding uses of intrinsic\n");
1516         PushMemUses(UseAccess);
1517         continue;
1518       }
1519 
1520       if (UseInst->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO)) {
1521         LLVM_DEBUG(dbgs() << "  ... found throwing instruction\n");
1522         return None;
1523       }
1524 
1525       // Uses which may read the original MemoryDef mean we cannot eliminate the
1526       // original MD. Stop walk.
1527       if (isReadClobber(*CurrentLoc, UseInst)) {
1528         LLVM_DEBUG(dbgs() << "    ... found read clobber\n");
1529         return None;
1530       }
1531 
1532       // For the KillingDef and EarlierAccess we only have to check if it reads
1533       // the memory location.
1534       // TODO: It would probably be better to check for self-reads before
1535       // calling the function.
1536       if (KillingDef == UseAccess || EarlierAccess == UseAccess) {
1537         LLVM_DEBUG(dbgs() << "    ... skipping killing def/dom access\n");
1538         continue;
1539       }
1540 
1541       // Check all uses for MemoryDefs, except for defs completely overwriting
1542       // the original location. Otherwise we have to check uses of *all*
1543       // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1544       // miss cases like the following
1545       //   1 = Def(LoE) ; <----- EarlierDef stores [0,1]
1546       //   2 = Def(1)   ; (2, 1) = NoAlias,   stores [2,3]
1547       //   Use(2)       ; MayAlias 2 *and* 1, loads [0, 3].
1548       //                  (The Use points to the *first* Def it may alias)
1549       //   3 = Def(1)   ; <---- Current  (3, 2) = NoAlias, (3,1) = MayAlias,
1550       //                  stores [0,1]
1551       if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1552         if (isCompleteOverwrite(*CurrentLoc, EarlierMemInst, UseInst)) {
1553           if (!isInvisibleToCallerAfterRet(DefUO) &&
1554               UseAccess != EarlierAccess) {
1555             BasicBlock *MaybeKillingBlock = UseInst->getParent();
1556             if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1557                 PostOrderNumbers.find(EarlierAccess->getBlock())->second) {
1558 
1559               LLVM_DEBUG(dbgs()
1560                          << "    ... found killing def " << *UseInst << "\n");
1561               KillingDefs.insert(UseInst);
1562             }
1563           }
1564         } else
1565           PushMemUses(UseDef);
1566       }
1567     }
1568 
1569     // For accesses to locations visible after the function returns, make sure
1570     // that the location is killed (=overwritten) along all paths from
1571     // EarlierAccess to the exit.
1572     if (!isInvisibleToCallerAfterRet(DefUO)) {
1573       SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1574       for (Instruction *KD : KillingDefs)
1575         KillingBlocks.insert(KD->getParent());
1576       assert(!KillingBlocks.empty() &&
1577              "Expected at least a single killing block");
1578 
1579       // Find the common post-dominator of all killing blocks.
1580       BasicBlock *CommonPred = *KillingBlocks.begin();
1581       for (auto I = std::next(KillingBlocks.begin()), E = KillingBlocks.end();
1582            I != E; I++) {
1583         if (!CommonPred)
1584           break;
1585         CommonPred = PDT.findNearestCommonDominator(CommonPred, *I);
1586       }
1587 
1588       // If CommonPred is in the set of killing blocks, just check if it
1589       // post-dominates EarlierAccess.
1590       if (KillingBlocks.count(CommonPred)) {
1591         if (PDT.dominates(CommonPred, EarlierAccess->getBlock()))
1592           return {EarlierAccess};
1593         return None;
1594       }
1595 
1596       // If the common post-dominator does not post-dominate EarlierAccess,
1597       // there is a path from EarlierAccess to an exit not going through a
1598       // killing block.
1599       if (PDT.dominates(CommonPred, EarlierAccess->getBlock())) {
1600         SetVector<BasicBlock *> WorkList;
1601 
1602         // If CommonPred is null, there are multiple exits from the function.
1603         // They all have to be added to the worklist.
1604         if (CommonPred)
1605           WorkList.insert(CommonPred);
1606         else
1607           for (BasicBlock *R : PDT.roots())
1608             WorkList.insert(R);
1609 
1610         NumCFGTries++;
1611         // Check if all paths starting from an exit node go through one of the
1612         // killing blocks before reaching EarlierAccess.
1613         for (unsigned I = 0; I < WorkList.size(); I++) {
1614           NumCFGChecks++;
1615           BasicBlock *Current = WorkList[I];
1616           if (KillingBlocks.count(Current))
1617             continue;
1618           if (Current == EarlierAccess->getBlock())
1619             return None;
1620 
1621           // EarlierAccess is reachable from the entry, so we don't have to
1622           // explore unreachable blocks further.
1623           if (!DT.isReachableFromEntry(Current))
1624             continue;
1625 
1626           for (BasicBlock *Pred : predecessors(Current))
1627             WorkList.insert(Pred);
1628 
1629           if (WorkList.size() >= MemorySSAPathCheckLimit)
1630             return None;
1631         }
1632         NumCFGSuccess++;
1633         return {EarlierAccess};
1634       }
1635       return None;
1636     }
1637 
1638     // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
1639     // potentially dead.
1640     return {EarlierAccess};
1641   }
1642 
1643   // Delete dead memory defs
deleteDeadInstruction__anon065d15520211::DSEState1644   void deleteDeadInstruction(Instruction *SI) {
1645     MemorySSAUpdater Updater(&MSSA);
1646     SmallVector<Instruction *, 32> NowDeadInsts;
1647     NowDeadInsts.push_back(SI);
1648     --NumFastOther;
1649 
1650     while (!NowDeadInsts.empty()) {
1651       Instruction *DeadInst = NowDeadInsts.pop_back_val();
1652       ++NumFastOther;
1653 
1654       // Try to preserve debug information attached to the dead instruction.
1655       salvageDebugInfo(*DeadInst);
1656       salvageKnowledge(DeadInst);
1657 
1658       // Remove the Instruction from MSSA.
1659       if (MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst)) {
1660         if (MemoryDef *MD = dyn_cast<MemoryDef>(MA)) {
1661           SkipStores.insert(MD);
1662         }
1663         Updater.removeMemoryAccess(MA);
1664       }
1665 
1666       auto I = IOLs.find(DeadInst->getParent());
1667       if (I != IOLs.end())
1668         I->second.erase(DeadInst);
1669       // Remove its operands
1670       for (Use &O : DeadInst->operands())
1671         if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1672           O = nullptr;
1673           if (isInstructionTriviallyDead(OpI, &TLI))
1674             NowDeadInsts.push_back(OpI);
1675         }
1676 
1677       DeadInst->eraseFromParent();
1678     }
1679   }
1680 
1681   // Check for any extra throws between SI and NI that block DSE.  This only
1682   // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1683   // throw are handled during the walk from one def to the next.
mayThrowBetween__anon065d15520211::DSEState1684   bool mayThrowBetween(Instruction *SI, Instruction *NI,
1685                        const Value *SILocUnd) {
1686     // First see if we can ignore it by using the fact that SI is an
1687     // alloca/alloca like object that is not visible to the caller during
1688     // execution of the function.
1689     if (SILocUnd && isInvisibleToCallerBeforeRet(SILocUnd))
1690       return false;
1691 
1692     if (SI->getParent() == NI->getParent())
1693       return ThrowingBlocks.count(SI->getParent());
1694     return !ThrowingBlocks.empty();
1695   }
1696 
1697   // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
1698   // act as barriers:
1699   //  * A memory instruction that may throw and \p SI accesses a non-stack
1700   //  object.
1701   //  * Atomic stores stronger that monotonic.
isDSEBarrier__anon065d15520211::DSEState1702   bool isDSEBarrier(const Value *SILocUnd, Instruction *NI) {
1703     // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
1704     // like object that does not escape.
1705     if (NI->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd))
1706       return true;
1707 
1708     // If NI is an atomic load/store stronger than monotonic, do not try to
1709     // eliminate/reorder it.
1710     if (NI->isAtomic()) {
1711       if (auto *LI = dyn_cast<LoadInst>(NI))
1712         return isStrongerThanMonotonic(LI->getOrdering());
1713       if (auto *SI = dyn_cast<StoreInst>(NI))
1714         return isStrongerThanMonotonic(SI->getOrdering());
1715       if (auto *ARMW = dyn_cast<AtomicRMWInst>(NI))
1716         return isStrongerThanMonotonic(ARMW->getOrdering());
1717       if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(NI))
1718         return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1719                isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1720       llvm_unreachable("other instructions should be skipped in MemorySSA");
1721     }
1722     return false;
1723   }
1724 
1725   /// Eliminate writes to objects that are not visible in the caller and are not
1726   /// accessed before returning from the function.
eliminateDeadWritesAtEndOfFunction__anon065d15520211::DSEState1727   bool eliminateDeadWritesAtEndOfFunction() {
1728     bool MadeChange = false;
1729     LLVM_DEBUG(
1730         dbgs()
1731         << "Trying to eliminate MemoryDefs at the end of the function\n");
1732     for (int I = MemDefs.size() - 1; I >= 0; I--) {
1733       MemoryDef *Def = MemDefs[I];
1734       if (SkipStores.contains(Def) || !isRemovable(Def->getMemoryInst()))
1735         continue;
1736 
1737       Instruction *DefI = Def->getMemoryInst();
1738       SmallVector<const Value *, 4> Pointers;
1739       auto DefLoc = getLocForWriteEx(DefI);
1740       if (!DefLoc)
1741         continue;
1742 
1743       // NOTE: Currently eliminating writes at the end of a function is limited
1744       // to MemoryDefs with a single underlying object, to save compile-time. In
1745       // practice it appears the case with multiple underlying objects is very
1746       // uncommon. If it turns out to be important, we can use
1747       // getUnderlyingObjects here instead.
1748       const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1749       if (!UO || !isInvisibleToCallerAfterRet(UO))
1750         continue;
1751 
1752       if (isWriteAtEndOfFunction(Def)) {
1753         // See through pointer-to-pointer bitcasts
1754         LLVM_DEBUG(dbgs() << "   ... MemoryDef is not accessed until the end "
1755                              "of the function\n");
1756         deleteDeadInstruction(DefI);
1757         ++NumFastStores;
1758         MadeChange = true;
1759       }
1760     }
1761     return MadeChange;
1762   }
1763 
1764   /// \returns true if \p Def is a no-op store, either because it
1765   /// directly stores back a loaded value or stores zero to a calloced object.
storeIsNoop__anon065d15520211::DSEState1766   bool storeIsNoop(MemoryDef *Def, const MemoryLocation &DefLoc,
1767                    const Value *DefUO) {
1768     StoreInst *Store = dyn_cast<StoreInst>(Def->getMemoryInst());
1769     MemSetInst *MemSet = dyn_cast<MemSetInst>(Def->getMemoryInst());
1770     Constant *StoredConstant = nullptr;
1771     if (Store)
1772       StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1773     if (MemSet)
1774       StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1775 
1776     if (StoredConstant && StoredConstant->isNullValue()) {
1777       auto *DefUOInst = dyn_cast<Instruction>(DefUO);
1778       if (DefUOInst && isCallocLikeFn(DefUOInst, &TLI)) {
1779         auto *UnderlyingDef = cast<MemoryDef>(MSSA.getMemoryAccess(DefUOInst));
1780         // If UnderlyingDef is the clobbering access of Def, no instructions
1781         // between them can modify the memory location.
1782         auto *ClobberDef =
1783             MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def);
1784         return UnderlyingDef == ClobberDef;
1785       }
1786     }
1787 
1788     if (!Store)
1789       return false;
1790 
1791     if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1792       if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1793         // Get the defining access for the load.
1794         auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1795         // Fast path: the defining accesses are the same.
1796         if (LoadAccess == Def->getDefiningAccess())
1797           return true;
1798 
1799         // Look through phi accesses. Recursively scan all phi accesses by
1800         // adding them to a worklist. Bail when we run into a memory def that
1801         // does not match LoadAccess.
1802         SetVector<MemoryAccess *> ToCheck;
1803         MemoryAccess *Current =
1804             MSSA.getWalker()->getClobberingMemoryAccess(Def);
1805         // We don't want to bail when we run into the store memory def. But,
1806         // the phi access may point to it. So, pretend like we've already
1807         // checked it.
1808         ToCheck.insert(Def);
1809         ToCheck.insert(Current);
1810         // Start at current (1) to simulate already having checked Def.
1811         for (unsigned I = 1; I < ToCheck.size(); ++I) {
1812           Current = ToCheck[I];
1813           if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1814             // Check all the operands.
1815             for (auto &Use : PhiAccess->incoming_values())
1816               ToCheck.insert(cast<MemoryAccess>(&Use));
1817             continue;
1818           }
1819 
1820           // If we found a memory def, bail. This happens when we have an
1821           // unrelated write in between an otherwise noop store.
1822           assert(isa<MemoryDef>(Current) &&
1823                  "Only MemoryDefs should reach here.");
1824           // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1825           // We are searching for the definition of the store's destination.
1826           // So, if that is the same definition as the load, then this is a
1827           // noop. Otherwise, fail.
1828           if (LoadAccess != Current)
1829             return false;
1830         }
1831         return true;
1832       }
1833     }
1834 
1835     return false;
1836   }
1837 };
1838 
eliminateDeadStores(Function & F,AliasAnalysis & AA,MemorySSA & MSSA,DominatorTree & DT,PostDominatorTree & PDT,const TargetLibraryInfo & TLI)1839 bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
1840                          DominatorTree &DT, PostDominatorTree &PDT,
1841                          const TargetLibraryInfo &TLI) {
1842   bool MadeChange = false;
1843 
1844   DSEState State = DSEState::get(F, AA, MSSA, DT, PDT, TLI);
1845   // For each store:
1846   for (unsigned I = 0; I < State.MemDefs.size(); I++) {
1847     MemoryDef *KillingDef = State.MemDefs[I];
1848     if (State.SkipStores.count(KillingDef))
1849       continue;
1850     Instruction *SI = KillingDef->getMemoryInst();
1851 
1852     Optional<MemoryLocation> MaybeSILoc;
1853     if (State.isMemTerminatorInst(SI))
1854       MaybeSILoc = State.getLocForTerminator(SI).map(
1855           [](const std::pair<MemoryLocation, bool> &P) { return P.first; });
1856     else
1857       MaybeSILoc = State.getLocForWriteEx(SI);
1858 
1859     if (!MaybeSILoc) {
1860       LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
1861                         << *SI << "\n");
1862       continue;
1863     }
1864     MemoryLocation SILoc = *MaybeSILoc;
1865     assert(SILoc.Ptr && "SILoc should not be null");
1866     const Value *SILocUnd = getUnderlyingObject(SILoc.Ptr);
1867 
1868     MemoryAccess *Current = KillingDef;
1869     LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
1870                       << *Current << " (" << *SI << ")\n");
1871 
1872     unsigned ScanLimit = MemorySSAScanLimit;
1873     unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
1874     unsigned PartialLimit = MemorySSAPartialStoreLimit;
1875     // Worklist of MemoryAccesses that may be killed by KillingDef.
1876     SetVector<MemoryAccess *> ToCheck;
1877 
1878     if (SILocUnd)
1879       ToCheck.insert(KillingDef->getDefiningAccess());
1880 
1881     bool Shortend = false;
1882     bool IsMemTerm = State.isMemTerminatorInst(SI);
1883     // Check if MemoryAccesses in the worklist are killed by KillingDef.
1884     for (unsigned I = 0; I < ToCheck.size(); I++) {
1885       Current = ToCheck[I];
1886       if (State.SkipStores.count(Current))
1887         continue;
1888 
1889       Optional<MemoryAccess *> Next = State.getDomMemoryDef(
1890           KillingDef, Current, SILoc, SILocUnd, ScanLimit, WalkerStepLimit,
1891           IsMemTerm, PartialLimit);
1892 
1893       if (!Next) {
1894         LLVM_DEBUG(dbgs() << "  finished walk\n");
1895         continue;
1896       }
1897 
1898       MemoryAccess *EarlierAccess = *Next;
1899       LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess);
1900       if (isa<MemoryPhi>(EarlierAccess)) {
1901         LLVM_DEBUG(dbgs() << "\n  ... adding incoming values to worklist\n");
1902         for (Value *V : cast<MemoryPhi>(EarlierAccess)->incoming_values()) {
1903           MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
1904           BasicBlock *IncomingBlock = IncomingAccess->getBlock();
1905           BasicBlock *PhiBlock = EarlierAccess->getBlock();
1906 
1907           // We only consider incoming MemoryAccesses that come before the
1908           // MemoryPhi. Otherwise we could discover candidates that do not
1909           // strictly dominate our starting def.
1910           if (State.PostOrderNumbers[IncomingBlock] >
1911               State.PostOrderNumbers[PhiBlock])
1912             ToCheck.insert(IncomingAccess);
1913         }
1914         continue;
1915       }
1916       auto *NextDef = cast<MemoryDef>(EarlierAccess);
1917       Instruction *NI = NextDef->getMemoryInst();
1918       LLVM_DEBUG(dbgs() << " (" << *NI << ")\n");
1919       ToCheck.insert(NextDef->getDefiningAccess());
1920       NumGetDomMemoryDefPassed++;
1921 
1922       if (!DebugCounter::shouldExecute(MemorySSACounter))
1923         continue;
1924 
1925       MemoryLocation NILoc = *State.getLocForWriteEx(NI);
1926 
1927       if (IsMemTerm) {
1928         const Value *NIUnd = getUnderlyingObject(NILoc.Ptr);
1929         if (SILocUnd != NIUnd)
1930           continue;
1931         LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
1932                           << "\n  KILLER: " << *SI << '\n');
1933         State.deleteDeadInstruction(NI);
1934         ++NumFastStores;
1935         MadeChange = true;
1936       } else {
1937         // Check if NI overwrites SI.
1938         int64_t InstWriteOffset, DepWriteOffset;
1939         OverwriteResult OR = State.isOverwrite(SI, NI, SILoc, NILoc,
1940                                                DepWriteOffset, InstWriteOffset);
1941         if (OR == OW_MaybePartial) {
1942           auto Iter = State.IOLs.insert(
1943               std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
1944                   NI->getParent(), InstOverlapIntervalsTy()));
1945           auto &IOL = Iter.first->second;
1946           OR = isPartialOverwrite(SILoc, NILoc, DepWriteOffset, InstWriteOffset,
1947                                   NI, IOL);
1948         }
1949 
1950         if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
1951           auto *Earlier = dyn_cast<StoreInst>(NI);
1952           auto *Later = dyn_cast<StoreInst>(SI);
1953           // We are re-using tryToMergePartialOverlappingStores, which requires
1954           // Earlier to domiante Later.
1955           // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
1956           if (Earlier && Later && DT.dominates(Earlier, Later)) {
1957             if (Constant *Merged = tryToMergePartialOverlappingStores(
1958                     Earlier, Later, InstWriteOffset, DepWriteOffset, State.DL,
1959                     State.BatchAA, &DT)) {
1960 
1961               // Update stored value of earlier store to merged constant.
1962               Earlier->setOperand(0, Merged);
1963               ++NumModifiedStores;
1964               MadeChange = true;
1965 
1966               Shortend = true;
1967               // Remove later store and remove any outstanding overlap intervals
1968               // for the updated store.
1969               State.deleteDeadInstruction(Later);
1970               auto I = State.IOLs.find(Earlier->getParent());
1971               if (I != State.IOLs.end())
1972                 I->second.erase(Earlier);
1973               break;
1974             }
1975           }
1976         }
1977 
1978         if (OR == OW_Complete) {
1979           LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n  DEAD: " << *NI
1980                             << "\n  KILLER: " << *SI << '\n');
1981           State.deleteDeadInstruction(NI);
1982           ++NumFastStores;
1983           MadeChange = true;
1984         }
1985       }
1986     }
1987 
1988     // Check if the store is a no-op.
1989     if (!Shortend && isRemovable(SI) &&
1990         State.storeIsNoop(KillingDef, SILoc, SILocUnd)) {
1991       LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n  DEAD: " << *SI << '\n');
1992       State.deleteDeadInstruction(SI);
1993       NumRedundantStores++;
1994       MadeChange = true;
1995       continue;
1996     }
1997   }
1998 
1999   if (EnablePartialOverwriteTracking)
2000     for (auto &KV : State.IOLs)
2001       MadeChange |= removePartiallyOverlappedStores(State.DL, KV.second, TLI);
2002 
2003   MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2004   return MadeChange;
2005 }
2006 } // end anonymous namespace
2007 
2008 //===----------------------------------------------------------------------===//
2009 // DSE Pass
2010 //===----------------------------------------------------------------------===//
run(Function & F,FunctionAnalysisManager & AM)2011 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2012   AliasAnalysis &AA = AM.getResult<AAManager>(F);
2013   const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2014   DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2015   MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2016   PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2017 
2018   bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI);
2019 
2020 #ifdef LLVM_ENABLE_STATS
2021   if (AreStatisticsEnabled())
2022     for (auto &I : instructions(F))
2023       NumRemainingStores += isa<StoreInst>(&I);
2024 #endif
2025 
2026   if (!Changed)
2027     return PreservedAnalyses::all();
2028 
2029   PreservedAnalyses PA;
2030   PA.preserveSet<CFGAnalyses>();
2031   PA.preserve<MemorySSAAnalysis>();
2032   return PA;
2033 }
2034 
2035 namespace {
2036 
2037 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2038 class DSELegacyPass : public FunctionPass {
2039 public:
2040   static char ID; // Pass identification, replacement for typeid
2041 
DSELegacyPass()2042   DSELegacyPass() : FunctionPass(ID) {
2043     initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2044   }
2045 
runOnFunction(Function & F)2046   bool runOnFunction(Function &F) override {
2047     if (skipFunction(F))
2048       return false;
2049 
2050     AliasAnalysis &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2051     DominatorTree &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2052     const TargetLibraryInfo &TLI =
2053         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2054     MemorySSA &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
2055     PostDominatorTree &PDT =
2056         getAnalysis<PostDominatorTreeWrapperPass>().getPostDomTree();
2057 
2058     bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI);
2059 
2060 #ifdef LLVM_ENABLE_STATS
2061     if (AreStatisticsEnabled())
2062       for (auto &I : instructions(F))
2063         NumRemainingStores += isa<StoreInst>(&I);
2064 #endif
2065 
2066     return Changed;
2067   }
2068 
getAnalysisUsage(AnalysisUsage & AU) const2069   void getAnalysisUsage(AnalysisUsage &AU) const override {
2070     AU.setPreservesCFG();
2071     AU.addRequired<AAResultsWrapperPass>();
2072     AU.addRequired<TargetLibraryInfoWrapperPass>();
2073     AU.addPreserved<GlobalsAAWrapperPass>();
2074     AU.addRequired<DominatorTreeWrapperPass>();
2075     AU.addPreserved<DominatorTreeWrapperPass>();
2076     AU.addRequired<PostDominatorTreeWrapperPass>();
2077     AU.addRequired<MemorySSAWrapperPass>();
2078     AU.addPreserved<PostDominatorTreeWrapperPass>();
2079     AU.addPreserved<MemorySSAWrapperPass>();
2080   }
2081 };
2082 
2083 } // end anonymous namespace
2084 
2085 char DSELegacyPass::ID = 0;
2086 
2087 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
2088                       false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)2089 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2090 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass)
2091 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
2092 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
2093 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
2094 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
2095 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2096 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
2097                     false)
2098 
2099 FunctionPass *llvm::createDeadStoreEliminationPass() {
2100   return new DSELegacyPass();
2101 }
2102