xref: /netbsd-src/external/apache2/llvm/dist/llvm/lib/Transforms/Scalar/GVN.cpp (revision 82d56013d7b633d116a93943de88e08335357a7c)
1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions.  It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16 
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PointerIntPair.h"
23 #include "llvm/ADT/PostOrderIterator.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SetVector.h"
26 #include "llvm/ADT/SmallPtrSet.h"
27 #include "llvm/ADT/SmallVector.h"
28 #include "llvm/ADT/Statistic.h"
29 #include "llvm/Analysis/AliasAnalysis.h"
30 #include "llvm/Analysis/AssumeBundleQueries.h"
31 #include "llvm/Analysis/AssumptionCache.h"
32 #include "llvm/Analysis/CFG.h"
33 #include "llvm/Analysis/DomTreeUpdater.h"
34 #include "llvm/Analysis/GlobalsModRef.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/Config/llvm-config.h"
46 #include "llvm/IR/Attributes.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/Constant.h"
49 #include "llvm/IR/Constants.h"
50 #include "llvm/IR/DataLayout.h"
51 #include "llvm/IR/DebugLoc.h"
52 #include "llvm/IR/Dominators.h"
53 #include "llvm/IR/Function.h"
54 #include "llvm/IR/InstrTypes.h"
55 #include "llvm/IR/Instruction.h"
56 #include "llvm/IR/Instructions.h"
57 #include "llvm/IR/IntrinsicInst.h"
58 #include "llvm/IR/Intrinsics.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/Metadata.h"
61 #include "llvm/IR/Module.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/IR/PassManager.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/Type.h"
66 #include "llvm/IR/Use.h"
67 #include "llvm/IR/Value.h"
68 #include "llvm/InitializePasses.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CommandLine.h"
72 #include "llvm/Support/Compiler.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/raw_ostream.h"
75 #include "llvm/Transforms/Utils.h"
76 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
77 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include "llvm/Transforms/Utils/SSAUpdater.h"
80 #include "llvm/Transforms/Utils/VNCoercion.h"
81 #include <algorithm>
82 #include <cassert>
83 #include <cstdint>
84 #include <utility>
85 #include <vector>
86 
87 using namespace llvm;
88 using namespace llvm::gvn;
89 using namespace llvm::VNCoercion;
90 using namespace PatternMatch;
91 
92 #define DEBUG_TYPE "gvn"
93 
94 STATISTIC(NumGVNInstr, "Number of instructions deleted");
95 STATISTIC(NumGVNLoad, "Number of loads deleted");
96 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
97 STATISTIC(NumGVNBlocks, "Number of blocks merged");
98 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
99 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
100 STATISTIC(NumPRELoad, "Number of loads PRE'd");
101 STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
102 
103 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
104           "Number of blocks speculated as available in "
105           "IsValueFullyAvailableInBlock(), max");
106 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
107           "Number of times we we reached gvn-max-block-speculations cut-off "
108           "preventing further exploration");
109 
110 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
111 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
112 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
113                                             cl::init(true));
114 static cl::opt<bool>
115 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
116                                 cl::init(true));
117 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
118 
119 static cl::opt<uint32_t> MaxNumDeps(
120     "gvn-max-num-deps", cl::Hidden, cl::init(100), cl::ZeroOrMore,
121     cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
122 
123 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
124 static cl::opt<uint32_t> MaxBBSpeculations(
125     "gvn-max-block-speculations", cl::Hidden, cl::init(600), cl::ZeroOrMore,
126     cl::desc("Max number of blocks we're willing to speculate on (and recurse "
127              "into) when deducing if a value is fully available or not in GVN "
128              "(default = 600)"));
129 
130 struct llvm::GVN::Expression {
131   uint32_t opcode;
132   bool commutative = false;
133   Type *type = nullptr;
134   SmallVector<uint32_t, 4> varargs;
135 
Expressionllvm::GVN::Expression136   Expression(uint32_t o = ~2U) : opcode(o) {}
137 
operator ==llvm::GVN::Expression138   bool operator==(const Expression &other) const {
139     if (opcode != other.opcode)
140       return false;
141     if (opcode == ~0U || opcode == ~1U)
142       return true;
143     if (type != other.type)
144       return false;
145     if (varargs != other.varargs)
146       return false;
147     return true;
148   }
149 
hash_value(const Expression & Value)150   friend hash_code hash_value(const Expression &Value) {
151     return hash_combine(
152         Value.opcode, Value.type,
153         hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
154   }
155 };
156 
157 namespace llvm {
158 
159 template <> struct DenseMapInfo<GVN::Expression> {
getEmptyKeyllvm::DenseMapInfo160   static inline GVN::Expression getEmptyKey() { return ~0U; }
getTombstoneKeyllvm::DenseMapInfo161   static inline GVN::Expression getTombstoneKey() { return ~1U; }
162 
getHashValuellvm::DenseMapInfo163   static unsigned getHashValue(const GVN::Expression &e) {
164     using llvm::hash_value;
165 
166     return static_cast<unsigned>(hash_value(e));
167   }
168 
isEqualllvm::DenseMapInfo169   static bool isEqual(const GVN::Expression &LHS, const GVN::Expression &RHS) {
170     return LHS == RHS;
171   }
172 };
173 
174 } // end namespace llvm
175 
176 /// Represents a particular available value that we know how to materialize.
177 /// Materialization of an AvailableValue never fails.  An AvailableValue is
178 /// implicitly associated with a rematerialization point which is the
179 /// location of the instruction from which it was formed.
180 struct llvm::gvn::AvailableValue {
181   enum ValType {
182     SimpleVal, // A simple offsetted value that is accessed.
183     LoadVal,   // A value produced by a load.
184     MemIntrin, // A memory intrinsic which is loaded from.
185     UndefVal   // A UndefValue representing a value from dead block (which
186                // is not yet physically removed from the CFG).
187   };
188 
189   /// V - The value that is live out of the block.
190   PointerIntPair<Value *, 2, ValType> Val;
191 
192   /// Offset - The byte offset in Val that is interesting for the load query.
193   unsigned Offset = 0;
194 
getllvm::gvn::AvailableValue195   static AvailableValue get(Value *V, unsigned Offset = 0) {
196     AvailableValue Res;
197     Res.Val.setPointer(V);
198     Res.Val.setInt(SimpleVal);
199     Res.Offset = Offset;
200     return Res;
201   }
202 
getMIllvm::gvn::AvailableValue203   static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
204     AvailableValue Res;
205     Res.Val.setPointer(MI);
206     Res.Val.setInt(MemIntrin);
207     Res.Offset = Offset;
208     return Res;
209   }
210 
getLoadllvm::gvn::AvailableValue211   static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
212     AvailableValue Res;
213     Res.Val.setPointer(Load);
214     Res.Val.setInt(LoadVal);
215     Res.Offset = Offset;
216     return Res;
217   }
218 
getUndefllvm::gvn::AvailableValue219   static AvailableValue getUndef() {
220     AvailableValue Res;
221     Res.Val.setPointer(nullptr);
222     Res.Val.setInt(UndefVal);
223     Res.Offset = 0;
224     return Res;
225   }
226 
isSimpleValuellvm::gvn::AvailableValue227   bool isSimpleValue() const { return Val.getInt() == SimpleVal; }
isCoercedLoadValuellvm::gvn::AvailableValue228   bool isCoercedLoadValue() const { return Val.getInt() == LoadVal; }
isMemIntrinValuellvm::gvn::AvailableValue229   bool isMemIntrinValue() const { return Val.getInt() == MemIntrin; }
isUndefValuellvm::gvn::AvailableValue230   bool isUndefValue() const { return Val.getInt() == UndefVal; }
231 
getSimpleValuellvm::gvn::AvailableValue232   Value *getSimpleValue() const {
233     assert(isSimpleValue() && "Wrong accessor");
234     return Val.getPointer();
235   }
236 
getCoercedLoadValuellvm::gvn::AvailableValue237   LoadInst *getCoercedLoadValue() const {
238     assert(isCoercedLoadValue() && "Wrong accessor");
239     return cast<LoadInst>(Val.getPointer());
240   }
241 
getMemIntrinValuellvm::gvn::AvailableValue242   MemIntrinsic *getMemIntrinValue() const {
243     assert(isMemIntrinValue() && "Wrong accessor");
244     return cast<MemIntrinsic>(Val.getPointer());
245   }
246 
247   /// Emit code at the specified insertion point to adjust the value defined
248   /// here to the specified type. This handles various coercion cases.
249   Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
250                                   GVN &gvn) const;
251 };
252 
253 /// Represents an AvailableValue which can be rematerialized at the end of
254 /// the associated BasicBlock.
255 struct llvm::gvn::AvailableValueInBlock {
256   /// BB - The basic block in question.
257   BasicBlock *BB = nullptr;
258 
259   /// AV - The actual available value
260   AvailableValue AV;
261 
getllvm::gvn::AvailableValueInBlock262   static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
263     AvailableValueInBlock Res;
264     Res.BB = BB;
265     Res.AV = std::move(AV);
266     return Res;
267   }
268 
getllvm::gvn::AvailableValueInBlock269   static AvailableValueInBlock get(BasicBlock *BB, Value *V,
270                                    unsigned Offset = 0) {
271     return get(BB, AvailableValue::get(V, Offset));
272   }
273 
getUndefllvm::gvn::AvailableValueInBlock274   static AvailableValueInBlock getUndef(BasicBlock *BB) {
275     return get(BB, AvailableValue::getUndef());
276   }
277 
278   /// Emit code at the end of this block to adjust the value defined here to
279   /// the specified type. This handles various coercion cases.
MaterializeAdjustedValuellvm::gvn::AvailableValueInBlock280   Value *MaterializeAdjustedValue(LoadInst *Load, GVN &gvn) const {
281     return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
282   }
283 };
284 
285 //===----------------------------------------------------------------------===//
286 //                     ValueTable Internal Functions
287 //===----------------------------------------------------------------------===//
288 
createExpr(Instruction * I)289 GVN::Expression GVN::ValueTable::createExpr(Instruction *I) {
290   Expression e;
291   e.type = I->getType();
292   e.opcode = I->getOpcode();
293   if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
294     // gc.relocate is 'special' call: its second and third operands are
295     // not real values, but indices into statepoint's argument list.
296     // Use the refered to values for purposes of identity.
297     e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
298     e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
299     e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
300   } else {
301     for (Use &Op : I->operands())
302       e.varargs.push_back(lookupOrAdd(Op));
303   }
304   if (I->isCommutative()) {
305     // Ensure that commutative instructions that only differ by a permutation
306     // of their operands get the same value number by sorting the operand value
307     // numbers.  Since commutative operands are the 1st two operands it is more
308     // efficient to sort by hand rather than using, say, std::sort.
309     assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
310     if (e.varargs[0] > e.varargs[1])
311       std::swap(e.varargs[0], e.varargs[1]);
312     e.commutative = true;
313   }
314 
315   if (auto *C = dyn_cast<CmpInst>(I)) {
316     // Sort the operand value numbers so x<y and y>x get the same value number.
317     CmpInst::Predicate Predicate = C->getPredicate();
318     if (e.varargs[0] > e.varargs[1]) {
319       std::swap(e.varargs[0], e.varargs[1]);
320       Predicate = CmpInst::getSwappedPredicate(Predicate);
321     }
322     e.opcode = (C->getOpcode() << 8) | Predicate;
323     e.commutative = true;
324   } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
325     e.varargs.append(E->idx_begin(), E->idx_end());
326   } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
327     ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
328     e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
329   }
330 
331   return e;
332 }
333 
createCmpExpr(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)334 GVN::Expression GVN::ValueTable::createCmpExpr(unsigned Opcode,
335                                                CmpInst::Predicate Predicate,
336                                                Value *LHS, Value *RHS) {
337   assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
338          "Not a comparison!");
339   Expression e;
340   e.type = CmpInst::makeCmpResultType(LHS->getType());
341   e.varargs.push_back(lookupOrAdd(LHS));
342   e.varargs.push_back(lookupOrAdd(RHS));
343 
344   // Sort the operand value numbers so x<y and y>x get the same value number.
345   if (e.varargs[0] > e.varargs[1]) {
346     std::swap(e.varargs[0], e.varargs[1]);
347     Predicate = CmpInst::getSwappedPredicate(Predicate);
348   }
349   e.opcode = (Opcode << 8) | Predicate;
350   e.commutative = true;
351   return e;
352 }
353 
createExtractvalueExpr(ExtractValueInst * EI)354 GVN::Expression GVN::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
355   assert(EI && "Not an ExtractValueInst?");
356   Expression e;
357   e.type = EI->getType();
358   e.opcode = 0;
359 
360   WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
361   if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
362     // EI is an extract from one of our with.overflow intrinsics. Synthesize
363     // a semantically equivalent expression instead of an extract value
364     // expression.
365     e.opcode = WO->getBinaryOp();
366     e.varargs.push_back(lookupOrAdd(WO->getLHS()));
367     e.varargs.push_back(lookupOrAdd(WO->getRHS()));
368     return e;
369   }
370 
371   // Not a recognised intrinsic. Fall back to producing an extract value
372   // expression.
373   e.opcode = EI->getOpcode();
374   for (Use &Op : EI->operands())
375     e.varargs.push_back(lookupOrAdd(Op));
376 
377   append_range(e.varargs, EI->indices());
378 
379   return e;
380 }
381 
382 //===----------------------------------------------------------------------===//
383 //                     ValueTable External Functions
384 //===----------------------------------------------------------------------===//
385 
386 GVN::ValueTable::ValueTable() = default;
387 GVN::ValueTable::ValueTable(const ValueTable &) = default;
388 GVN::ValueTable::ValueTable(ValueTable &&) = default;
389 GVN::ValueTable::~ValueTable() = default;
390 GVN::ValueTable &GVN::ValueTable::operator=(const GVN::ValueTable &Arg) = default;
391 
392 /// add - Insert a value into the table with a specified value number.
add(Value * V,uint32_t num)393 void GVN::ValueTable::add(Value *V, uint32_t num) {
394   valueNumbering.insert(std::make_pair(V, num));
395   if (PHINode *PN = dyn_cast<PHINode>(V))
396     NumberingPhi[num] = PN;
397 }
398 
lookupOrAddCall(CallInst * C)399 uint32_t GVN::ValueTable::lookupOrAddCall(CallInst *C) {
400   if (AA->doesNotAccessMemory(C)) {
401     Expression exp = createExpr(C);
402     uint32_t e = assignExpNewValueNum(exp).first;
403     valueNumbering[C] = e;
404     return e;
405   } else if (MD && AA->onlyReadsMemory(C)) {
406     Expression exp = createExpr(C);
407     auto ValNum = assignExpNewValueNum(exp);
408     if (ValNum.second) {
409       valueNumbering[C] = ValNum.first;
410       return ValNum.first;
411     }
412 
413     MemDepResult local_dep = MD->getDependency(C);
414 
415     if (!local_dep.isDef() && !local_dep.isNonLocal()) {
416       valueNumbering[C] =  nextValueNumber;
417       return nextValueNumber++;
418     }
419 
420     if (local_dep.isDef()) {
421       // For masked load/store intrinsics, the local_dep may actully be
422       // a normal load or store instruction.
423       CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
424 
425       if (!local_cdep ||
426           local_cdep->getNumArgOperands() != C->getNumArgOperands()) {
427         valueNumbering[C] = nextValueNumber;
428         return nextValueNumber++;
429       }
430 
431       for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
432         uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
433         uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
434         if (c_vn != cd_vn) {
435           valueNumbering[C] = nextValueNumber;
436           return nextValueNumber++;
437         }
438       }
439 
440       uint32_t v = lookupOrAdd(local_cdep);
441       valueNumbering[C] = v;
442       return v;
443     }
444 
445     // Non-local case.
446     const MemoryDependenceResults::NonLocalDepInfo &deps =
447         MD->getNonLocalCallDependency(C);
448     // FIXME: Move the checking logic to MemDep!
449     CallInst* cdep = nullptr;
450 
451     // Check to see if we have a single dominating call instruction that is
452     // identical to C.
453     for (unsigned i = 0, e = deps.size(); i != e; ++i) {
454       const NonLocalDepEntry *I = &deps[i];
455       if (I->getResult().isNonLocal())
456         continue;
457 
458       // We don't handle non-definitions.  If we already have a call, reject
459       // instruction dependencies.
460       if (!I->getResult().isDef() || cdep != nullptr) {
461         cdep = nullptr;
462         break;
463       }
464 
465       CallInst *NonLocalDepCall = dyn_cast<CallInst>(I->getResult().getInst());
466       // FIXME: All duplicated with non-local case.
467       if (NonLocalDepCall && DT->properlyDominates(I->getBB(), C->getParent())){
468         cdep = NonLocalDepCall;
469         continue;
470       }
471 
472       cdep = nullptr;
473       break;
474     }
475 
476     if (!cdep) {
477       valueNumbering[C] = nextValueNumber;
478       return nextValueNumber++;
479     }
480 
481     if (cdep->getNumArgOperands() != C->getNumArgOperands()) {
482       valueNumbering[C] = nextValueNumber;
483       return nextValueNumber++;
484     }
485     for (unsigned i = 0, e = C->getNumArgOperands(); i < e; ++i) {
486       uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
487       uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
488       if (c_vn != cd_vn) {
489         valueNumbering[C] = nextValueNumber;
490         return nextValueNumber++;
491       }
492     }
493 
494     uint32_t v = lookupOrAdd(cdep);
495     valueNumbering[C] = v;
496     return v;
497   } else {
498     valueNumbering[C] = nextValueNumber;
499     return nextValueNumber++;
500   }
501 }
502 
503 /// Returns true if a value number exists for the specified value.
exists(Value * V) const504 bool GVN::ValueTable::exists(Value *V) const { return valueNumbering.count(V) != 0; }
505 
506 /// lookup_or_add - Returns the value number for the specified value, assigning
507 /// it a new number if it did not have one before.
lookupOrAdd(Value * V)508 uint32_t GVN::ValueTable::lookupOrAdd(Value *V) {
509   DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
510   if (VI != valueNumbering.end())
511     return VI->second;
512 
513   if (!isa<Instruction>(V)) {
514     valueNumbering[V] = nextValueNumber;
515     return nextValueNumber++;
516   }
517 
518   Instruction* I = cast<Instruction>(V);
519   Expression exp;
520   switch (I->getOpcode()) {
521     case Instruction::Call:
522       return lookupOrAddCall(cast<CallInst>(I));
523     case Instruction::FNeg:
524     case Instruction::Add:
525     case Instruction::FAdd:
526     case Instruction::Sub:
527     case Instruction::FSub:
528     case Instruction::Mul:
529     case Instruction::FMul:
530     case Instruction::UDiv:
531     case Instruction::SDiv:
532     case Instruction::FDiv:
533     case Instruction::URem:
534     case Instruction::SRem:
535     case Instruction::FRem:
536     case Instruction::Shl:
537     case Instruction::LShr:
538     case Instruction::AShr:
539     case Instruction::And:
540     case Instruction::Or:
541     case Instruction::Xor:
542     case Instruction::ICmp:
543     case Instruction::FCmp:
544     case Instruction::Trunc:
545     case Instruction::ZExt:
546     case Instruction::SExt:
547     case Instruction::FPToUI:
548     case Instruction::FPToSI:
549     case Instruction::UIToFP:
550     case Instruction::SIToFP:
551     case Instruction::FPTrunc:
552     case Instruction::FPExt:
553     case Instruction::PtrToInt:
554     case Instruction::IntToPtr:
555     case Instruction::AddrSpaceCast:
556     case Instruction::BitCast:
557     case Instruction::Select:
558     case Instruction::Freeze:
559     case Instruction::ExtractElement:
560     case Instruction::InsertElement:
561     case Instruction::ShuffleVector:
562     case Instruction::InsertValue:
563     case Instruction::GetElementPtr:
564       exp = createExpr(I);
565       break;
566     case Instruction::ExtractValue:
567       exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
568       break;
569     case Instruction::PHI:
570       valueNumbering[V] = nextValueNumber;
571       NumberingPhi[nextValueNumber] = cast<PHINode>(V);
572       return nextValueNumber++;
573     default:
574       valueNumbering[V] = nextValueNumber;
575       return nextValueNumber++;
576   }
577 
578   uint32_t e = assignExpNewValueNum(exp).first;
579   valueNumbering[V] = e;
580   return e;
581 }
582 
583 /// Returns the value number of the specified value. Fails if
584 /// the value has not yet been numbered.
lookup(Value * V,bool Verify) const585 uint32_t GVN::ValueTable::lookup(Value *V, bool Verify) const {
586   DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
587   if (Verify) {
588     assert(VI != valueNumbering.end() && "Value not numbered?");
589     return VI->second;
590   }
591   return (VI != valueNumbering.end()) ? VI->second : 0;
592 }
593 
594 /// Returns the value number of the given comparison,
595 /// assigning it a new number if it did not have one before.  Useful when
596 /// we deduced the result of a comparison, but don't immediately have an
597 /// instruction realizing that comparison to hand.
lookupOrAddCmp(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)598 uint32_t GVN::ValueTable::lookupOrAddCmp(unsigned Opcode,
599                                          CmpInst::Predicate Predicate,
600                                          Value *LHS, Value *RHS) {
601   Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
602   return assignExpNewValueNum(exp).first;
603 }
604 
605 /// Remove all entries from the ValueTable.
clear()606 void GVN::ValueTable::clear() {
607   valueNumbering.clear();
608   expressionNumbering.clear();
609   NumberingPhi.clear();
610   PhiTranslateTable.clear();
611   nextValueNumber = 1;
612   Expressions.clear();
613   ExprIdx.clear();
614   nextExprNumber = 0;
615 }
616 
617 /// Remove a value from the value numbering.
erase(Value * V)618 void GVN::ValueTable::erase(Value *V) {
619   uint32_t Num = valueNumbering.lookup(V);
620   valueNumbering.erase(V);
621   // If V is PHINode, V <--> value number is an one-to-one mapping.
622   if (isa<PHINode>(V))
623     NumberingPhi.erase(Num);
624 }
625 
626 /// verifyRemoved - Verify that the value is removed from all internal data
627 /// structures.
verifyRemoved(const Value * V) const628 void GVN::ValueTable::verifyRemoved(const Value *V) const {
629   for (DenseMap<Value*, uint32_t>::const_iterator
630          I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
631     assert(I->first != V && "Inst still occurs in value numbering map!");
632   }
633 }
634 
635 //===----------------------------------------------------------------------===//
636 //                                GVN Pass
637 //===----------------------------------------------------------------------===//
638 
isPREEnabled() const639 bool GVN::isPREEnabled() const {
640   return Options.AllowPRE.getValueOr(GVNEnablePRE);
641 }
642 
isLoadPREEnabled() const643 bool GVN::isLoadPREEnabled() const {
644   return Options.AllowLoadPRE.getValueOr(GVNEnableLoadPRE);
645 }
646 
isLoadInLoopPREEnabled() const647 bool GVN::isLoadInLoopPREEnabled() const {
648   return Options.AllowLoadInLoopPRE.getValueOr(GVNEnableLoadInLoopPRE);
649 }
650 
isLoadPRESplitBackedgeEnabled() const651 bool GVN::isLoadPRESplitBackedgeEnabled() const {
652   return Options.AllowLoadPRESplitBackedge.getValueOr(
653       GVNEnableSplitBackedgeInLoadPRE);
654 }
655 
isMemDepEnabled() const656 bool GVN::isMemDepEnabled() const {
657   return Options.AllowMemDep.getValueOr(GVNEnableMemDep);
658 }
659 
run(Function & F,FunctionAnalysisManager & AM)660 PreservedAnalyses GVN::run(Function &F, FunctionAnalysisManager &AM) {
661   // FIXME: The order of evaluation of these 'getResult' calls is very
662   // significant! Re-ordering these variables will cause GVN when run alone to
663   // be less effective! We should fix memdep and basic-aa to not exhibit this
664   // behavior, but until then don't change the order here.
665   auto &AC = AM.getResult<AssumptionAnalysis>(F);
666   auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
667   auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
668   auto &AA = AM.getResult<AAManager>(F);
669   auto *MemDep =
670       isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
671   auto *LI = AM.getCachedResult<LoopAnalysis>(F);
672   auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
673   auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
674   bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
675                          MSSA ? &MSSA->getMSSA() : nullptr);
676   if (!Changed)
677     return PreservedAnalyses::all();
678   PreservedAnalyses PA;
679   PA.preserve<DominatorTreeAnalysis>();
680   PA.preserve<TargetLibraryAnalysis>();
681   if (MSSA)
682     PA.preserve<MemorySSAAnalysis>();
683   if (LI)
684     PA.preserve<LoopAnalysis>();
685   return PA;
686 }
687 
688 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(DenseMap<uint32_t,Value * > & d) const689 LLVM_DUMP_METHOD void GVN::dump(DenseMap<uint32_t, Value*>& d) const {
690   errs() << "{\n";
691   for (auto &I : d) {
692     errs() << I.first << "\n";
693     I.second->dump();
694   }
695   errs() << "}\n";
696 }
697 #endif
698 
699 enum class AvailabilityState : char {
700   /// We know the block *is not* fully available. This is a fixpoint.
701   Unavailable = 0,
702   /// We know the block *is* fully available. This is a fixpoint.
703   Available = 1,
704   /// We do not know whether the block is fully available or not,
705   /// but we are currently speculating that it will be.
706   /// If it would have turned out that the block was, in fact, not fully
707   /// available, this would have been cleaned up into an Unavailable.
708   SpeculativelyAvailable = 2,
709 };
710 
711 /// Return true if we can prove that the value
712 /// we're analyzing is fully available in the specified block.  As we go, keep
713 /// track of which blocks we know are fully alive in FullyAvailableBlocks.  This
714 /// map is actually a tri-state map with the following values:
715 ///   0) we know the block *is not* fully available.
716 ///   1) we know the block *is* fully available.
717 ///   2) we do not know whether the block is fully available or not, but we are
718 ///      currently speculating that it will be.
IsValueFullyAvailableInBlock(BasicBlock * BB,DenseMap<BasicBlock *,AvailabilityState> & FullyAvailableBlocks)719 static bool IsValueFullyAvailableInBlock(
720     BasicBlock *BB,
721     DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
722   SmallVector<BasicBlock *, 32> Worklist;
723   Optional<BasicBlock *> UnavailableBB;
724 
725   // The number of times we didn't find an entry for a block in a map and
726   // optimistically inserted an entry marking block as speculatively available.
727   unsigned NumNewNewSpeculativelyAvailableBBs = 0;
728 
729 #ifndef NDEBUG
730   SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
731   SmallVector<BasicBlock *, 32> AvailableBBs;
732 #endif
733 
734   Worklist.emplace_back(BB);
735   while (!Worklist.empty()) {
736     BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
737     // Optimistically assume that the block is Speculatively Available and check
738     // to see if we already know about this block in one lookup.
739     std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
740         FullyAvailableBlocks.try_emplace(
741             CurrBB, AvailabilityState::SpeculativelyAvailable);
742     AvailabilityState &State = IV.first->second;
743 
744     // Did the entry already exist for this block?
745     if (!IV.second) {
746       if (State == AvailabilityState::Unavailable) {
747         UnavailableBB = CurrBB;
748         break; // Backpropagate unavailability info.
749       }
750 
751 #ifndef NDEBUG
752       AvailableBBs.emplace_back(CurrBB);
753 #endif
754       continue; // Don't recurse further, but continue processing worklist.
755     }
756 
757     // No entry found for block.
758     ++NumNewNewSpeculativelyAvailableBBs;
759     bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
760 
761     // If we have exhausted our budget, mark this block as unavailable.
762     // Also, if this block has no predecessors, the value isn't live-in here.
763     if (OutOfBudget || pred_empty(CurrBB)) {
764       MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
765       State = AvailabilityState::Unavailable;
766       UnavailableBB = CurrBB;
767       break; // Backpropagate unavailability info.
768     }
769 
770     // Tentatively consider this block as speculatively available.
771 #ifndef NDEBUG
772     NewSpeculativelyAvailableBBs.insert(CurrBB);
773 #endif
774     // And further recurse into block's predecessors, in depth-first order!
775     Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
776   }
777 
778 #if LLVM_ENABLE_STATS
779   IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
780       NumNewNewSpeculativelyAvailableBBs);
781 #endif
782 
783   // If the block isn't marked as fixpoint yet
784   // (the Unavailable and Available states are fixpoints)
785   auto MarkAsFixpointAndEnqueueSuccessors =
786       [&](BasicBlock *BB, AvailabilityState FixpointState) {
787         auto It = FullyAvailableBlocks.find(BB);
788         if (It == FullyAvailableBlocks.end())
789           return; // Never queried this block, leave as-is.
790         switch (AvailabilityState &State = It->second) {
791         case AvailabilityState::Unavailable:
792         case AvailabilityState::Available:
793           return; // Don't backpropagate further, continue processing worklist.
794         case AvailabilityState::SpeculativelyAvailable: // Fix it!
795           State = FixpointState;
796 #ifndef NDEBUG
797           assert(NewSpeculativelyAvailableBBs.erase(BB) &&
798                  "Found a speculatively available successor leftover?");
799 #endif
800           // Queue successors for further processing.
801           Worklist.append(succ_begin(BB), succ_end(BB));
802           return;
803         }
804       };
805 
806   if (UnavailableBB) {
807     // Okay, we have encountered an unavailable block.
808     // Mark speculatively available blocks reachable from UnavailableBB as
809     // unavailable as well. Paths are terminated when they reach blocks not in
810     // FullyAvailableBlocks or they are not marked as speculatively available.
811     Worklist.clear();
812     Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
813     while (!Worklist.empty())
814       MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
815                                          AvailabilityState::Unavailable);
816   }
817 
818 #ifndef NDEBUG
819   Worklist.clear();
820   for (BasicBlock *AvailableBB : AvailableBBs)
821     Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
822   while (!Worklist.empty())
823     MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
824                                        AvailabilityState::Available);
825 
826   assert(NewSpeculativelyAvailableBBs.empty() &&
827          "Must have fixed all the new speculatively available blocks.");
828 #endif
829 
830   return !UnavailableBB;
831 }
832 
833 /// Given a set of loads specified by ValuesPerBlock,
834 /// construct SSA form, allowing us to eliminate Load.  This returns the value
835 /// that should be used at Load's definition site.
836 static Value *
ConstructSSAForLoadSet(LoadInst * Load,SmallVectorImpl<AvailableValueInBlock> & ValuesPerBlock,GVN & gvn)837 ConstructSSAForLoadSet(LoadInst *Load,
838                        SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
839                        GVN &gvn) {
840   // Check for the fully redundant, dominating load case.  In this case, we can
841   // just use the dominating value directly.
842   if (ValuesPerBlock.size() == 1 &&
843       gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
844                                                Load->getParent())) {
845     assert(!ValuesPerBlock[0].AV.isUndefValue() &&
846            "Dead BB dominate this block");
847     return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
848   }
849 
850   // Otherwise, we have to construct SSA form.
851   SmallVector<PHINode*, 8> NewPHIs;
852   SSAUpdater SSAUpdate(&NewPHIs);
853   SSAUpdate.Initialize(Load->getType(), Load->getName());
854 
855   for (const AvailableValueInBlock &AV : ValuesPerBlock) {
856     BasicBlock *BB = AV.BB;
857 
858     if (AV.AV.isUndefValue())
859       continue;
860 
861     if (SSAUpdate.HasValueForBlock(BB))
862       continue;
863 
864     // If the value is the load that we will be eliminating, and the block it's
865     // available in is the block that the load is in, then don't add it as
866     // SSAUpdater will resolve the value to the relevant phi which may let it
867     // avoid phi construction entirely if there's actually only one value.
868     if (BB == Load->getParent() &&
869         ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
870          (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
871       continue;
872 
873     SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
874   }
875 
876   // Perform PHI construction.
877   return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
878 }
879 
MaterializeAdjustedValue(LoadInst * Load,Instruction * InsertPt,GVN & gvn) const880 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
881                                                 Instruction *InsertPt,
882                                                 GVN &gvn) const {
883   Value *Res;
884   Type *LoadTy = Load->getType();
885   const DataLayout &DL = Load->getModule()->getDataLayout();
886   if (isSimpleValue()) {
887     Res = getSimpleValue();
888     if (Res->getType() != LoadTy) {
889       Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
890 
891       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
892                         << "  " << *getSimpleValue() << '\n'
893                         << *Res << '\n'
894                         << "\n\n\n");
895     }
896   } else if (isCoercedLoadValue()) {
897     LoadInst *Load = getCoercedLoadValue();
898     if (Load->getType() == LoadTy && Offset == 0) {
899       Res = Load;
900     } else {
901       Res = getLoadValueForLoad(Load, Offset, LoadTy, InsertPt, DL);
902       // We would like to use gvn.markInstructionForDeletion here, but we can't
903       // because the load is already memoized into the leader map table that GVN
904       // tracks.  It is potentially possible to remove the load from the table,
905       // but then there all of the operations based on it would need to be
906       // rehashed.  Just leave the dead load around.
907       gvn.getMemDep().removeInstruction(Load);
908       LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
909                         << "  " << *getCoercedLoadValue() << '\n'
910                         << *Res << '\n'
911                         << "\n\n\n");
912     }
913   } else if (isMemIntrinValue()) {
914     Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
915                                  InsertPt, DL);
916     LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
917                       << "  " << *getMemIntrinValue() << '\n'
918                       << *Res << '\n'
919                       << "\n\n\n");
920   } else {
921     llvm_unreachable("Should not materialize value from dead block");
922   }
923   assert(Res && "failed to materialize?");
924   return Res;
925 }
926 
isLifetimeStart(const Instruction * Inst)927 static bool isLifetimeStart(const Instruction *Inst) {
928   if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
929     return II->getIntrinsicID() == Intrinsic::lifetime_start;
930   return false;
931 }
932 
933 /// Assuming To can be reached from both From and Between, does Between lie on
934 /// every path from From to To?
liesBetween(const Instruction * From,Instruction * Between,const Instruction * To,DominatorTree * DT)935 static bool liesBetween(const Instruction *From, Instruction *Between,
936                         const Instruction *To, DominatorTree *DT) {
937   if (From->getParent() == Between->getParent())
938     return DT->dominates(From, Between);
939   SmallSet<BasicBlock *, 1> Exclusion;
940   Exclusion.insert(Between->getParent());
941   return !isPotentiallyReachable(From, To, &Exclusion, DT);
942 }
943 
944 /// Try to locate the three instruction involved in a missed
945 /// load-elimination case that is due to an intervening store.
reportMayClobberedLoad(LoadInst * Load,MemDepResult DepInfo,DominatorTree * DT,OptimizationRemarkEmitter * ORE)946 static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
947                                    DominatorTree *DT,
948                                    OptimizationRemarkEmitter *ORE) {
949   using namespace ore;
950 
951   User *OtherAccess = nullptr;
952 
953   OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
954   R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
955     << setExtraArgs();
956 
957   for (auto *U : Load->getPointerOperand()->users()) {
958     if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
959         cast<Instruction>(U)->getFunction() == Load->getFunction() &&
960         DT->dominates(cast<Instruction>(U), Load)) {
961       // Use the most immediately dominating value
962       if (OtherAccess) {
963         if (DT->dominates(cast<Instruction>(OtherAccess), cast<Instruction>(U)))
964           OtherAccess = U;
965         else
966           assert(DT->dominates(cast<Instruction>(U),
967                                cast<Instruction>(OtherAccess)));
968       } else
969         OtherAccess = U;
970     }
971   }
972 
973   if (!OtherAccess) {
974     // There is no dominating use, check if we can find a closest non-dominating
975     // use that lies between any other potentially available use and Load.
976     for (auto *U : Load->getPointerOperand()->users()) {
977       if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U)) &&
978           cast<Instruction>(U)->getFunction() == Load->getFunction() &&
979           isPotentiallyReachable(cast<Instruction>(U), Load, nullptr, DT)) {
980         if (OtherAccess) {
981           if (liesBetween(cast<Instruction>(OtherAccess), cast<Instruction>(U),
982                           Load, DT)) {
983             OtherAccess = U;
984           } else if (!liesBetween(cast<Instruction>(U),
985                                   cast<Instruction>(OtherAccess), Load, DT)) {
986             // These uses are both partially available at Load were it not for
987             // the clobber, but neither lies strictly after the other.
988             OtherAccess = nullptr;
989             break;
990           } // else: keep current OtherAccess since it lies between U and Load
991         } else {
992           OtherAccess = U;
993         }
994       }
995     }
996   }
997 
998   if (OtherAccess)
999     R << " in favor of " << NV("OtherAccess", OtherAccess);
1000 
1001   R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1002 
1003   ORE->emit(R);
1004 }
1005 
AnalyzeLoadAvailability(LoadInst * Load,MemDepResult DepInfo,Value * Address,AvailableValue & Res)1006 bool GVN::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1007                                   Value *Address, AvailableValue &Res) {
1008   assert((DepInfo.isDef() || DepInfo.isClobber()) &&
1009          "expected a local dependence");
1010   assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1011 
1012   const DataLayout &DL = Load->getModule()->getDataLayout();
1013 
1014   Instruction *DepInst = DepInfo.getInst();
1015   if (DepInfo.isClobber()) {
1016     // If the dependence is to a store that writes to a superset of the bits
1017     // read by the load, we can extract the bits we need for the load from the
1018     // stored value.
1019     if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1020       // Can't forward from non-atomic to atomic without violating memory model.
1021       if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1022         int Offset =
1023             analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1024         if (Offset != -1) {
1025           Res = AvailableValue::get(DepSI->getValueOperand(), Offset);
1026           return true;
1027         }
1028       }
1029     }
1030 
1031     // Check to see if we have something like this:
1032     //    load i32* P
1033     //    load i8* (P+1)
1034     // if we have this, replace the later with an extraction from the former.
1035     if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1036       // If this is a clobber and L is the first instruction in its block, then
1037       // we have the first instruction in the entry block.
1038       // Can't forward from non-atomic to atomic without violating memory model.
1039       if (DepLoad != Load && Address &&
1040           Load->isAtomic() <= DepLoad->isAtomic()) {
1041         Type *LoadType = Load->getType();
1042         int Offset = -1;
1043 
1044         // If MD reported clobber, check it was nested.
1045         if (DepInfo.isClobber() &&
1046             canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1047           const auto ClobberOff = MD->getClobberOffset(DepLoad);
1048           // GVN has no deal with a negative offset.
1049           Offset = (ClobberOff == None || ClobberOff.getValue() < 0)
1050                        ? -1
1051                        : ClobberOff.getValue();
1052         }
1053         if (Offset == -1)
1054           Offset =
1055               analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1056         if (Offset != -1) {
1057           Res = AvailableValue::getLoad(DepLoad, Offset);
1058           return true;
1059         }
1060       }
1061     }
1062 
1063     // If the clobbering value is a memset/memcpy/memmove, see if we can
1064     // forward a value on from it.
1065     if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1066       if (Address && !Load->isAtomic()) {
1067         int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
1068                                                       DepMI, DL);
1069         if (Offset != -1) {
1070           Res = AvailableValue::getMI(DepMI, Offset);
1071           return true;
1072         }
1073       }
1074     }
1075     // Nothing known about this clobber, have to be conservative
1076     LLVM_DEBUG(
1077         // fast print dep, using operator<< on instruction is too slow.
1078         dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1079         dbgs() << " is clobbered by " << *DepInst << '\n';);
1080     if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1081       reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1082 
1083     return false;
1084   }
1085   assert(DepInfo.isDef() && "follows from above");
1086 
1087   // Loading the allocation -> undef.
1088   if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
1089       isAlignedAllocLikeFn(DepInst, TLI) ||
1090       // Loading immediately after lifetime begin -> undef.
1091       isLifetimeStart(DepInst)) {
1092     Res = AvailableValue::get(UndefValue::get(Load->getType()));
1093     return true;
1094   }
1095 
1096   // Loading from calloc (which zero initializes memory) -> zero
1097   if (isCallocLikeFn(DepInst, TLI)) {
1098     Res = AvailableValue::get(Constant::getNullValue(Load->getType()));
1099     return true;
1100   }
1101 
1102   if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1103     // Reject loads and stores that are to the same address but are of
1104     // different types if we have to. If the stored value is convertable to
1105     // the loaded value, we can reuse it.
1106     if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1107                                          DL))
1108       return false;
1109 
1110     // Can't forward from non-atomic to atomic without violating memory model.
1111     if (S->isAtomic() < Load->isAtomic())
1112       return false;
1113 
1114     Res = AvailableValue::get(S->getValueOperand());
1115     return true;
1116   }
1117 
1118   if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1119     // If the types mismatch and we can't handle it, reject reuse of the load.
1120     // If the stored value is larger or equal to the loaded value, we can reuse
1121     // it.
1122     if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1123       return false;
1124 
1125     // Can't forward from non-atomic to atomic without violating memory model.
1126     if (LD->isAtomic() < Load->isAtomic())
1127       return false;
1128 
1129     Res = AvailableValue::getLoad(LD);
1130     return true;
1131   }
1132 
1133   // Unknown def - must be conservative
1134   LLVM_DEBUG(
1135       // fast print dep, using operator<< on instruction is too slow.
1136       dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1137       dbgs() << " has unknown def " << *DepInst << '\n';);
1138   return false;
1139 }
1140 
AnalyzeLoadAvailability(LoadInst * Load,LoadDepVect & Deps,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1141 void GVN::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1142                                   AvailValInBlkVect &ValuesPerBlock,
1143                                   UnavailBlkVect &UnavailableBlocks) {
1144   // Filter out useless results (non-locals, etc).  Keep track of the blocks
1145   // where we have a value available in repl, also keep track of whether we see
1146   // dependencies that produce an unknown value for the load (such as a call
1147   // that could potentially clobber the load).
1148   unsigned NumDeps = Deps.size();
1149   for (unsigned i = 0, e = NumDeps; i != e; ++i) {
1150     BasicBlock *DepBB = Deps[i].getBB();
1151     MemDepResult DepInfo = Deps[i].getResult();
1152 
1153     if (DeadBlocks.count(DepBB)) {
1154       // Dead dependent mem-op disguise as a load evaluating the same value
1155       // as the load in question.
1156       ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1157       continue;
1158     }
1159 
1160     if (!DepInfo.isDef() && !DepInfo.isClobber()) {
1161       UnavailableBlocks.push_back(DepBB);
1162       continue;
1163     }
1164 
1165     // The address being loaded in this non-local block may not be the same as
1166     // the pointer operand of the load if PHI translation occurs.  Make sure
1167     // to consider the right address.
1168     Value *Address = Deps[i].getAddress();
1169 
1170     AvailableValue AV;
1171     if (AnalyzeLoadAvailability(Load, DepInfo, Address, AV)) {
1172       // subtlety: because we know this was a non-local dependency, we know
1173       // it's safe to materialize anywhere between the instruction within
1174       // DepInfo and the end of it's block.
1175       ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
1176                                                           std::move(AV)));
1177     } else {
1178       UnavailableBlocks.push_back(DepBB);
1179     }
1180   }
1181 
1182   assert(NumDeps == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1183          "post condition violation");
1184 }
1185 
eliminatePartiallyRedundantLoad(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,MapVector<BasicBlock *,Value * > & AvailableLoads)1186 void GVN::eliminatePartiallyRedundantLoad(
1187     LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1188     MapVector<BasicBlock *, Value *> &AvailableLoads) {
1189   for (const auto &AvailableLoad : AvailableLoads) {
1190     BasicBlock *UnavailableBlock = AvailableLoad.first;
1191     Value *LoadPtr = AvailableLoad.second;
1192 
1193     auto *NewLoad =
1194         new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1195                      Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1196                      Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1197     NewLoad->setDebugLoc(Load->getDebugLoc());
1198     if (MSSAU) {
1199       auto *MSSA = MSSAU->getMemorySSA();
1200       // Get the defining access of the original load or use the load if it is a
1201       // MemoryDef (e.g. because it is volatile). The inserted loads are
1202       // guaranteed to load from the same definition.
1203       auto *LoadAcc = MSSA->getMemoryAccess(Load);
1204       auto *DefiningAcc =
1205           isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1206       auto *NewAccess = MSSAU->createMemoryAccessInBB(
1207           NewLoad, DefiningAcc, NewLoad->getParent(),
1208           MemorySSA::BeforeTerminator);
1209       if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1210         MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1211       else
1212         MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1213     }
1214 
1215     // Transfer the old load's AA tags to the new load.
1216     AAMDNodes Tags;
1217     Load->getAAMetadata(Tags);
1218     if (Tags)
1219       NewLoad->setAAMetadata(Tags);
1220 
1221     if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1222       NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1223     if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1224       NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1225     if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1226       NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1227     if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1228       if (LI &&
1229           LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1230         NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1231 
1232     // We do not propagate the old load's debug location, because the new
1233     // load now lives in a different BB, and we want to avoid a jumpy line
1234     // table.
1235     // FIXME: How do we retain source locations without causing poor debugging
1236     // behavior?
1237 
1238     // Add the newly created load.
1239     ValuesPerBlock.push_back(
1240         AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1241     MD->invalidateCachedPointerInfo(LoadPtr);
1242     LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1243   }
1244 
1245   // Perform PHI construction.
1246   Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1247   Load->replaceAllUsesWith(V);
1248   if (isa<PHINode>(V))
1249     V->takeName(Load);
1250   if (Instruction *I = dyn_cast<Instruction>(V))
1251     I->setDebugLoc(Load->getDebugLoc());
1252   if (V->getType()->isPtrOrPtrVectorTy())
1253     MD->invalidateCachedPointerInfo(V);
1254   markInstructionForDeletion(Load);
1255   ORE->emit([&]() {
1256     return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1257            << "load eliminated by PRE";
1258   });
1259 }
1260 
PerformLoadPRE(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1261 bool GVN::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1262                          UnavailBlkVect &UnavailableBlocks) {
1263   // Okay, we have *some* definitions of the value.  This means that the value
1264   // is available in some of our (transitive) predecessors.  Lets think about
1265   // doing PRE of this load.  This will involve inserting a new load into the
1266   // predecessor when it's not available.  We could do this in general, but
1267   // prefer to not increase code size.  As such, we only do this when we know
1268   // that we only have to insert *one* load (which means we're basically moving
1269   // the load, not inserting a new one).
1270 
1271   SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1272                                         UnavailableBlocks.end());
1273 
1274   // Let's find the first basic block with more than one predecessor.  Walk
1275   // backwards through predecessors if needed.
1276   BasicBlock *LoadBB = Load->getParent();
1277   BasicBlock *TmpBB = LoadBB;
1278 
1279   // Check that there is no implicit control flow instructions above our load in
1280   // its block. If there is an instruction that doesn't always pass the
1281   // execution to the following instruction, then moving through it may become
1282   // invalid. For example:
1283   //
1284   // int arr[LEN];
1285   // int index = ???;
1286   // ...
1287   // guard(0 <= index && index < LEN);
1288   // use(arr[index]);
1289   //
1290   // It is illegal to move the array access to any point above the guard,
1291   // because if the index is out of bounds we should deoptimize rather than
1292   // access the array.
1293   // Check that there is no guard in this block above our instruction.
1294   bool MustEnsureSafetyOfSpeculativeExecution =
1295       ICF->isDominatedByICFIFromSameBlock(Load);
1296 
1297   while (TmpBB->getSinglePredecessor()) {
1298     TmpBB = TmpBB->getSinglePredecessor();
1299     if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1300       return false;
1301     if (Blockers.count(TmpBB))
1302       return false;
1303 
1304     // If any of these blocks has more than one successor (i.e. if the edge we
1305     // just traversed was critical), then there are other paths through this
1306     // block along which the load may not be anticipated.  Hoisting the load
1307     // above this block would be adding the load to execution paths along
1308     // which it was not previously executed.
1309     if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1310       return false;
1311 
1312     // Check that there is no implicit control flow in a block above.
1313     MustEnsureSafetyOfSpeculativeExecution =
1314         MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1315   }
1316 
1317   assert(TmpBB);
1318   LoadBB = TmpBB;
1319 
1320   // Check to see how many predecessors have the loaded value fully
1321   // available.
1322   MapVector<BasicBlock *, Value *> PredLoads;
1323   DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1324   for (const AvailableValueInBlock &AV : ValuesPerBlock)
1325     FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1326   for (BasicBlock *UnavailableBB : UnavailableBlocks)
1327     FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1328 
1329   SmallVector<BasicBlock *, 4> CriticalEdgePred;
1330   for (BasicBlock *Pred : predecessors(LoadBB)) {
1331     // If any predecessor block is an EH pad that does not allow non-PHI
1332     // instructions before the terminator, we can't PRE the load.
1333     if (Pred->getTerminator()->isEHPad()) {
1334       LLVM_DEBUG(
1335           dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1336                  << Pred->getName() << "': " << *Load << '\n');
1337       return false;
1338     }
1339 
1340     if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1341       continue;
1342     }
1343 
1344     if (Pred->getTerminator()->getNumSuccessors() != 1) {
1345       if (isa<IndirectBrInst>(Pred->getTerminator())) {
1346         LLVM_DEBUG(
1347             dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1348                    << Pred->getName() << "': " << *Load << '\n');
1349         return false;
1350       }
1351 
1352       // FIXME: Can we support the fallthrough edge?
1353       if (isa<CallBrInst>(Pred->getTerminator())) {
1354         LLVM_DEBUG(
1355             dbgs() << "COULD NOT PRE LOAD BECAUSE OF CALLBR CRITICAL EDGE '"
1356                    << Pred->getName() << "': " << *Load << '\n');
1357         return false;
1358       }
1359 
1360       if (LoadBB->isEHPad()) {
1361         LLVM_DEBUG(
1362             dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1363                    << Pred->getName() << "': " << *Load << '\n');
1364         return false;
1365       }
1366 
1367       // Do not split backedge as it will break the canonical loop form.
1368       if (!isLoadPRESplitBackedgeEnabled())
1369         if (DT->dominates(LoadBB, Pred)) {
1370           LLVM_DEBUG(
1371               dbgs()
1372               << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1373               << Pred->getName() << "': " << *Load << '\n');
1374           return false;
1375         }
1376 
1377       CriticalEdgePred.push_back(Pred);
1378     } else {
1379       // Only add the predecessors that will not be split for now.
1380       PredLoads[Pred] = nullptr;
1381     }
1382   }
1383 
1384   // Decide whether PRE is profitable for this load.
1385   unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1386   assert(NumUnavailablePreds != 0 &&
1387          "Fully available value should already be eliminated!");
1388 
1389   // If this load is unavailable in multiple predecessors, reject it.
1390   // FIXME: If we could restructure the CFG, we could make a common pred with
1391   // all the preds that don't have an available Load and insert a new load into
1392   // that one block.
1393   if (NumUnavailablePreds != 1)
1394       return false;
1395 
1396   // Now we know where we will insert load. We must ensure that it is safe
1397   // to speculatively execute the load at that points.
1398   if (MustEnsureSafetyOfSpeculativeExecution) {
1399     if (CriticalEdgePred.size())
1400       if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), DT))
1401         return false;
1402     for (auto &PL : PredLoads)
1403       if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), DT))
1404         return false;
1405   }
1406 
1407   // Split critical edges, and update the unavailable predecessors accordingly.
1408   for (BasicBlock *OrigPred : CriticalEdgePred) {
1409     BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1410     assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1411     PredLoads[NewPred] = nullptr;
1412     LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1413                       << LoadBB->getName() << '\n');
1414   }
1415 
1416   // Check if the load can safely be moved to all the unavailable predecessors.
1417   bool CanDoPRE = true;
1418   const DataLayout &DL = Load->getModule()->getDataLayout();
1419   SmallVector<Instruction*, 8> NewInsts;
1420   for (auto &PredLoad : PredLoads) {
1421     BasicBlock *UnavailablePred = PredLoad.first;
1422 
1423     // Do PHI translation to get its value in the predecessor if necessary.  The
1424     // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1425     // We do the translation for each edge we skipped by going from Load's block
1426     // to LoadBB, otherwise we might miss pieces needing translation.
1427 
1428     // If all preds have a single successor, then we know it is safe to insert
1429     // the load on the pred (?!?), so we can insert code to materialize the
1430     // pointer if it is not available.
1431     Value *LoadPtr = Load->getPointerOperand();
1432     BasicBlock *Cur = Load->getParent();
1433     while (Cur != LoadBB) {
1434       PHITransAddr Address(LoadPtr, DL, AC);
1435       LoadPtr = Address.PHITranslateWithInsertion(
1436           Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1437       if (!LoadPtr) {
1438         CanDoPRE = false;
1439         break;
1440       }
1441       Cur = Cur->getSinglePredecessor();
1442     }
1443 
1444     if (LoadPtr) {
1445       PHITransAddr Address(LoadPtr, DL, AC);
1446       LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1447                                                   NewInsts);
1448     }
1449     // If we couldn't find or insert a computation of this phi translated value,
1450     // we fail PRE.
1451     if (!LoadPtr) {
1452       LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1453                         << *Load->getPointerOperand() << "\n");
1454       CanDoPRE = false;
1455       break;
1456     }
1457 
1458     PredLoad.second = LoadPtr;
1459   }
1460 
1461   if (!CanDoPRE) {
1462     while (!NewInsts.empty()) {
1463       // Erase instructions generated by the failed PHI translation before
1464       // trying to number them. PHI translation might insert instructions
1465       // in basic blocks other than the current one, and we delete them
1466       // directly, as markInstructionForDeletion only allows removing from the
1467       // current basic block.
1468       NewInsts.pop_back_val()->eraseFromParent();
1469     }
1470     // HINT: Don't revert the edge-splitting as following transformation may
1471     // also need to split these critical edges.
1472     return !CriticalEdgePred.empty();
1473   }
1474 
1475   // Okay, we can eliminate this load by inserting a reload in the predecessor
1476   // and using PHI construction to get the value in the other predecessors, do
1477   // it.
1478   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1479   LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1480                                            << " INSTS: " << *NewInsts.back()
1481                                            << '\n');
1482 
1483   // Assign value numbers to the new instructions.
1484   for (Instruction *I : NewInsts) {
1485     // Instructions that have been inserted in predecessor(s) to materialize
1486     // the load address do not retain their original debug locations. Doing
1487     // so could lead to confusing (but correct) source attributions.
1488     I->updateLocationAfterHoist();
1489 
1490     // FIXME: We really _ought_ to insert these value numbers into their
1491     // parent's availability map.  However, in doing so, we risk getting into
1492     // ordering issues.  If a block hasn't been processed yet, we would be
1493     // marking a value as AVAIL-IN, which isn't what we intend.
1494     VN.lookupOrAdd(I);
1495   }
1496 
1497   eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1498   ++NumPRELoad;
1499   return true;
1500 }
1501 
performLoopLoadPRE(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1502 bool GVN::performLoopLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1503                              UnavailBlkVect &UnavailableBlocks) {
1504   if (!LI)
1505     return false;
1506 
1507   const Loop *L = LI->getLoopFor(Load->getParent());
1508   // TODO: Generalize to other loop blocks that dominate the latch.
1509   if (!L || L->getHeader() != Load->getParent())
1510     return false;
1511 
1512   BasicBlock *Preheader = L->getLoopPreheader();
1513   BasicBlock *Latch = L->getLoopLatch();
1514   if (!Preheader || !Latch)
1515     return false;
1516 
1517   Value *LoadPtr = Load->getPointerOperand();
1518   // Must be available in preheader.
1519   if (!L->isLoopInvariant(LoadPtr))
1520     return false;
1521 
1522   // We plan to hoist the load to preheader without introducing a new fault.
1523   // In order to do it, we need to prove that we cannot side-exit the loop
1524   // once loop header is first entered before execution of the load.
1525   if (ICF->isDominatedByICFIFromSameBlock(Load))
1526     return false;
1527 
1528   BasicBlock *LoopBlock = nullptr;
1529   for (auto *Blocker : UnavailableBlocks) {
1530     // Blockers from outside the loop are handled in preheader.
1531     if (!L->contains(Blocker))
1532       continue;
1533 
1534     // Only allow one loop block. Loop header is not less frequently executed
1535     // than each loop block, and likely it is much more frequently executed. But
1536     // in case of multiple loop blocks, we need extra information (such as block
1537     // frequency info) to understand whether it is profitable to PRE into
1538     // multiple loop blocks.
1539     if (LoopBlock)
1540       return false;
1541 
1542     // Do not sink into inner loops. This may be non-profitable.
1543     if (L != LI->getLoopFor(Blocker))
1544       return false;
1545 
1546     // Blocks that dominate the latch execute on every single iteration, maybe
1547     // except the last one. So PREing into these blocks doesn't make much sense
1548     // in most cases. But the blocks that do not necessarily execute on each
1549     // iteration are sometimes much colder than the header, and this is when
1550     // PRE is potentially profitable.
1551     if (DT->dominates(Blocker, Latch))
1552       return false;
1553 
1554     // Make sure that the terminator itself doesn't clobber.
1555     if (Blocker->getTerminator()->mayWriteToMemory())
1556       return false;
1557 
1558     LoopBlock = Blocker;
1559   }
1560 
1561   if (!LoopBlock)
1562     return false;
1563 
1564   // Make sure the memory at this pointer cannot be freed, therefore we can
1565   // safely reload from it after clobber.
1566   if (LoadPtr->canBeFreed())
1567     return false;
1568 
1569   // TODO: Support critical edge splitting if blocker has more than 1 successor.
1570   MapVector<BasicBlock *, Value *> AvailableLoads;
1571   AvailableLoads[LoopBlock] = LoadPtr;
1572   AvailableLoads[Preheader] = LoadPtr;
1573 
1574   LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1575   eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1576   ++NumPRELoopLoad;
1577   return true;
1578 }
1579 
reportLoadElim(LoadInst * Load,Value * AvailableValue,OptimizationRemarkEmitter * ORE)1580 static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
1581                            OptimizationRemarkEmitter *ORE) {
1582   using namespace ore;
1583 
1584   ORE->emit([&]() {
1585     return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1586            << "load of type " << NV("Type", Load->getType()) << " eliminated"
1587            << setExtraArgs() << " in favor of "
1588            << NV("InfavorOfValue", AvailableValue);
1589   });
1590 }
1591 
1592 /// Attempt to eliminate a load whose dependencies are
1593 /// non-local by performing PHI construction.
processNonLocalLoad(LoadInst * Load)1594 bool GVN::processNonLocalLoad(LoadInst *Load) {
1595   // non-local speculations are not allowed under asan.
1596   if (Load->getParent()->getParent()->hasFnAttribute(
1597           Attribute::SanitizeAddress) ||
1598       Load->getParent()->getParent()->hasFnAttribute(
1599           Attribute::SanitizeHWAddress))
1600     return false;
1601 
1602   // Step 1: Find the non-local dependencies of the load.
1603   LoadDepVect Deps;
1604   MD->getNonLocalPointerDependency(Load, Deps);
1605 
1606   // If we had to process more than one hundred blocks to find the
1607   // dependencies, this load isn't worth worrying about.  Optimizing
1608   // it will be too expensive.
1609   unsigned NumDeps = Deps.size();
1610   if (NumDeps > MaxNumDeps)
1611     return false;
1612 
1613   // If we had a phi translation failure, we'll have a single entry which is a
1614   // clobber in the current block.  Reject this early.
1615   if (NumDeps == 1 &&
1616       !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1617     LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1618                dbgs() << " has unknown dependencies\n";);
1619     return false;
1620   }
1621 
1622   bool Changed = false;
1623   // If this load follows a GEP, see if we can PRE the indices before analyzing.
1624   if (GetElementPtrInst *GEP =
1625           dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1626     for (GetElementPtrInst::op_iterator OI = GEP->idx_begin(),
1627                                         OE = GEP->idx_end();
1628          OI != OE; ++OI)
1629       if (Instruction *I = dyn_cast<Instruction>(OI->get()))
1630         Changed |= performScalarPRE(I);
1631   }
1632 
1633   // Step 2: Analyze the availability of the load
1634   AvailValInBlkVect ValuesPerBlock;
1635   UnavailBlkVect UnavailableBlocks;
1636   AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1637 
1638   // If we have no predecessors that produce a known value for this load, exit
1639   // early.
1640   if (ValuesPerBlock.empty())
1641     return Changed;
1642 
1643   // Step 3: Eliminate fully redundancy.
1644   //
1645   // If all of the instructions we depend on produce a known value for this
1646   // load, then it is fully redundant and we can use PHI insertion to compute
1647   // its value.  Insert PHIs and remove the fully redundant value now.
1648   if (UnavailableBlocks.empty()) {
1649     LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1650 
1651     // Perform PHI construction.
1652     Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1653     Load->replaceAllUsesWith(V);
1654 
1655     if (isa<PHINode>(V))
1656       V->takeName(Load);
1657     if (Instruction *I = dyn_cast<Instruction>(V))
1658       // If instruction I has debug info, then we should not update it.
1659       // Also, if I has a null DebugLoc, then it is still potentially incorrect
1660       // to propagate Load's DebugLoc because Load may not post-dominate I.
1661       if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1662         I->setDebugLoc(Load->getDebugLoc());
1663     if (V->getType()->isPtrOrPtrVectorTy())
1664       MD->invalidateCachedPointerInfo(V);
1665     markInstructionForDeletion(Load);
1666     ++NumGVNLoad;
1667     reportLoadElim(Load, V, ORE);
1668     return true;
1669   }
1670 
1671   // Step 4: Eliminate partial redundancy.
1672   if (!isPREEnabled() || !isLoadPREEnabled())
1673     return Changed;
1674   if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1675     return Changed;
1676 
1677   return Changed || PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1678          performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks);
1679 }
1680 
impliesEquivalanceIfTrue(CmpInst * Cmp)1681 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1682   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1683     return true;
1684 
1685   // Floating point comparisons can be equal, but not equivalent.  Cases:
1686   // NaNs for unordered operators
1687   // +0.0 vs 0.0 for all operators
1688   if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1689       (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1690        Cmp->getFastMathFlags().noNaNs())) {
1691       Value *LHS = Cmp->getOperand(0);
1692       Value *RHS = Cmp->getOperand(1);
1693       // If we can prove either side non-zero, then equality must imply
1694       // equivalence.
1695       // FIXME: We should do this optimization if 'no signed zeros' is
1696       // applicable via an instruction-level fast-math-flag or some other
1697       // indicator that relaxed FP semantics are being used.
1698       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1699         return true;
1700       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1701         return true;;
1702       // TODO: Handle vector floating point constants
1703   }
1704   return false;
1705 }
1706 
impliesEquivalanceIfFalse(CmpInst * Cmp)1707 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1708   if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1709     return true;
1710 
1711   // Floating point comparisons can be equal, but not equivelent.  Cases:
1712   // NaNs for unordered operators
1713   // +0.0 vs 0.0 for all operators
1714   if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1715        Cmp->getFastMathFlags().noNaNs()) ||
1716       Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1717       Value *LHS = Cmp->getOperand(0);
1718       Value *RHS = Cmp->getOperand(1);
1719       // If we can prove either side non-zero, then equality must imply
1720       // equivalence.
1721       // FIXME: We should do this optimization if 'no signed zeros' is
1722       // applicable via an instruction-level fast-math-flag or some other
1723       // indicator that relaxed FP semantics are being used.
1724       if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1725         return true;
1726       if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1727         return true;;
1728       // TODO: Handle vector floating point constants
1729   }
1730   return false;
1731 }
1732 
1733 
hasUsersIn(Value * V,BasicBlock * BB)1734 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1735   for (User *U : V->users())
1736     if (isa<Instruction>(U) &&
1737         cast<Instruction>(U)->getParent() == BB)
1738       return true;
1739   return false;
1740 }
1741 
processAssumeIntrinsic(AssumeInst * IntrinsicI)1742 bool GVN::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1743   Value *V = IntrinsicI->getArgOperand(0);
1744 
1745   if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1746     if (Cond->isZero()) {
1747       Type *Int8Ty = Type::getInt8Ty(V->getContext());
1748       // Insert a new store to null instruction before the load to indicate that
1749       // this code is not reachable.  FIXME: We could insert unreachable
1750       // instruction directly because we can modify the CFG.
1751       auto *NewS = new StoreInst(UndefValue::get(Int8Ty),
1752                                  Constant::getNullValue(Int8Ty->getPointerTo()),
1753                                  IntrinsicI);
1754       if (MSSAU) {
1755         const MemoryUseOrDef *FirstNonDom = nullptr;
1756         const auto *AL =
1757             MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1758 
1759         // If there are accesses in the current basic block, find the first one
1760         // that does not come before NewS. The new memory access is inserted
1761         // after the found access or before the terminator if no such access is
1762         // found.
1763         if (AL) {
1764           for (auto &Acc : *AL) {
1765             if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1766               if (!Current->getMemoryInst()->comesBefore(NewS)) {
1767                 FirstNonDom = Current;
1768                 break;
1769               }
1770           }
1771         }
1772 
1773         // This added store is to null, so it will never executed and we can
1774         // just use the LiveOnEntry def as defining access.
1775         auto *NewDef =
1776             FirstNonDom ? MSSAU->createMemoryAccessBefore(
1777                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1778                               const_cast<MemoryUseOrDef *>(FirstNonDom))
1779                         : MSSAU->createMemoryAccessInBB(
1780                               NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1781                               NewS->getParent(), MemorySSA::BeforeTerminator);
1782 
1783         MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1784       }
1785     }
1786     if (isAssumeWithEmptyBundle(*IntrinsicI))
1787       markInstructionForDeletion(IntrinsicI);
1788     return false;
1789   } else if (isa<Constant>(V)) {
1790     // If it's not false, and constant, it must evaluate to true. This means our
1791     // assume is assume(true), and thus, pointless, and we don't want to do
1792     // anything more here.
1793     return false;
1794   }
1795 
1796   Constant *True = ConstantInt::getTrue(V->getContext());
1797   bool Changed = false;
1798 
1799   for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1800     BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1801 
1802     // This property is only true in dominated successors, propagateEquality
1803     // will check dominance for us.
1804     Changed |= propagateEquality(V, True, Edge, false);
1805   }
1806 
1807   // We can replace assume value with true, which covers cases like this:
1808   // call void @llvm.assume(i1 %cmp)
1809   // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1810   ReplaceOperandsWithMap[V] = True;
1811 
1812   // Similarly, after assume(!NotV) we know that NotV == false.
1813   Value *NotV;
1814   if (match(V, m_Not(m_Value(NotV))))
1815     ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1816 
1817   // If we find an equality fact, canonicalize all dominated uses in this block
1818   // to one of the two values.  We heuristically choice the "oldest" of the
1819   // two where age is determined by value number. (Note that propagateEquality
1820   // above handles the cross block case.)
1821   //
1822   // Key case to cover are:
1823   // 1)
1824   // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1825   // call void @llvm.assume(i1 %cmp)
1826   // ret float %0 ; will change it to ret float 3.000000e+00
1827   // 2)
1828   // %load = load float, float* %addr
1829   // %cmp = fcmp oeq float %load, %0
1830   // call void @llvm.assume(i1 %cmp)
1831   // ret float %load ; will change it to ret float %0
1832   if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1833     if (impliesEquivalanceIfTrue(CmpI)) {
1834       Value *CmpLHS = CmpI->getOperand(0);
1835       Value *CmpRHS = CmpI->getOperand(1);
1836       // Heuristically pick the better replacement -- the choice of heuristic
1837       // isn't terribly important here, but the fact we canonicalize on some
1838       // replacement is for exposing other simplifications.
1839       // TODO: pull this out as a helper function and reuse w/existing
1840       // (slightly different) logic.
1841       if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1842         std::swap(CmpLHS, CmpRHS);
1843       if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1844         std::swap(CmpLHS, CmpRHS);
1845       if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1846           (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1847         // Move the 'oldest' value to the right-hand side, using the value
1848         // number as a proxy for age.
1849         uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1850         uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1851         if (LVN < RVN)
1852           std::swap(CmpLHS, CmpRHS);
1853       }
1854 
1855       // Handle degenerate case where we either haven't pruned a dead path or a
1856       // removed a trivial assume yet.
1857       if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1858         return Changed;
1859 
1860       LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1861                  << *CmpLHS << " with "
1862                  << *CmpRHS << " in block "
1863                  << IntrinsicI->getParent()->getName() << "\n");
1864 
1865 
1866       // Setup the replacement map - this handles uses within the same block
1867       if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1868         ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1869 
1870       // NOTE: The non-block local cases are handled by the call to
1871       // propagateEquality above; this block is just about handling the block
1872       // local cases.  TODO: There's a bunch of logic in propagateEqualiy which
1873       // isn't duplicated for the block local case, can we share it somehow?
1874     }
1875   }
1876   return Changed;
1877 }
1878 
patchAndReplaceAllUsesWith(Instruction * I,Value * Repl)1879 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
1880   patchReplacementInstruction(I, Repl);
1881   I->replaceAllUsesWith(Repl);
1882 }
1883 
1884 /// Attempt to eliminate a load, first by eliminating it
1885 /// locally, and then attempting non-local elimination if that fails.
processLoad(LoadInst * L)1886 bool GVN::processLoad(LoadInst *L) {
1887   if (!MD)
1888     return false;
1889 
1890   // This code hasn't been audited for ordered or volatile memory access
1891   if (!L->isUnordered())
1892     return false;
1893 
1894   if (L->use_empty()) {
1895     markInstructionForDeletion(L);
1896     return true;
1897   }
1898 
1899   // ... to a pointer that has been loaded from before...
1900   MemDepResult Dep = MD->getDependency(L);
1901 
1902   // If it is defined in another block, try harder.
1903   if (Dep.isNonLocal())
1904     return processNonLocalLoad(L);
1905 
1906   // Only handle the local case below
1907   if (!Dep.isDef() && !Dep.isClobber()) {
1908     // This might be a NonFuncLocal or an Unknown
1909     LLVM_DEBUG(
1910         // fast print dep, using operator<< on instruction is too slow.
1911         dbgs() << "GVN: load "; L->printAsOperand(dbgs());
1912         dbgs() << " has unknown dependence\n";);
1913     return false;
1914   }
1915 
1916   AvailableValue AV;
1917   if (AnalyzeLoadAvailability(L, Dep, L->getPointerOperand(), AV)) {
1918     Value *AvailableValue = AV.MaterializeAdjustedValue(L, L, *this);
1919 
1920     // Replace the load!
1921     patchAndReplaceAllUsesWith(L, AvailableValue);
1922     markInstructionForDeletion(L);
1923     if (MSSAU)
1924       MSSAU->removeMemoryAccess(L);
1925     ++NumGVNLoad;
1926     reportLoadElim(L, AvailableValue, ORE);
1927     // Tell MDA to reexamine the reused pointer since we might have more
1928     // information after forwarding it.
1929     if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
1930       MD->invalidateCachedPointerInfo(AvailableValue);
1931     return true;
1932   }
1933 
1934   return false;
1935 }
1936 
1937 /// Return a pair the first field showing the value number of \p Exp and the
1938 /// second field showing whether it is a value number newly created.
1939 std::pair<uint32_t, bool>
assignExpNewValueNum(Expression & Exp)1940 GVN::ValueTable::assignExpNewValueNum(Expression &Exp) {
1941   uint32_t &e = expressionNumbering[Exp];
1942   bool CreateNewValNum = !e;
1943   if (CreateNewValNum) {
1944     Expressions.push_back(Exp);
1945     if (ExprIdx.size() < nextValueNumber + 1)
1946       ExprIdx.resize(nextValueNumber * 2);
1947     e = nextValueNumber;
1948     ExprIdx[nextValueNumber++] = nextExprNumber++;
1949   }
1950   return {e, CreateNewValNum};
1951 }
1952 
1953 /// Return whether all the values related with the same \p num are
1954 /// defined in \p BB.
areAllValsInBB(uint32_t Num,const BasicBlock * BB,GVN & Gvn)1955 bool GVN::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
1956                                      GVN &Gvn) {
1957   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1958   while (Vals && Vals->BB == BB)
1959     Vals = Vals->Next;
1960   return !Vals;
1961 }
1962 
1963 /// Wrap phiTranslateImpl to provide caching functionality.
phiTranslate(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVN & Gvn)1964 uint32_t GVN::ValueTable::phiTranslate(const BasicBlock *Pred,
1965                                        const BasicBlock *PhiBlock, uint32_t Num,
1966                                        GVN &Gvn) {
1967   auto FindRes = PhiTranslateTable.find({Num, Pred});
1968   if (FindRes != PhiTranslateTable.end())
1969     return FindRes->second;
1970   uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
1971   PhiTranslateTable.insert({{Num, Pred}, NewNum});
1972   return NewNum;
1973 }
1974 
1975 // Return true if the value number \p Num and NewNum have equal value.
1976 // Return false if the result is unknown.
areCallValsEqual(uint32_t Num,uint32_t NewNum,const BasicBlock * Pred,const BasicBlock * PhiBlock,GVN & Gvn)1977 bool GVN::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
1978                                        const BasicBlock *Pred,
1979                                        const BasicBlock *PhiBlock, GVN &Gvn) {
1980   CallInst *Call = nullptr;
1981   LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
1982   while (Vals) {
1983     Call = dyn_cast<CallInst>(Vals->Val);
1984     if (Call && Call->getParent() == PhiBlock)
1985       break;
1986     Vals = Vals->Next;
1987   }
1988 
1989   if (AA->doesNotAccessMemory(Call))
1990     return true;
1991 
1992   if (!MD || !AA->onlyReadsMemory(Call))
1993     return false;
1994 
1995   MemDepResult local_dep = MD->getDependency(Call);
1996   if (!local_dep.isNonLocal())
1997     return false;
1998 
1999   const MemoryDependenceResults::NonLocalDepInfo &deps =
2000       MD->getNonLocalCallDependency(Call);
2001 
2002   // Check to see if the Call has no function local clobber.
2003   for (const NonLocalDepEntry &D : deps) {
2004     if (D.getResult().isNonFuncLocal())
2005       return true;
2006   }
2007   return false;
2008 }
2009 
2010 /// Translate value number \p Num using phis, so that it has the values of
2011 /// the phis in BB.
phiTranslateImpl(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVN & Gvn)2012 uint32_t GVN::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2013                                            const BasicBlock *PhiBlock,
2014                                            uint32_t Num, GVN &Gvn) {
2015   if (PHINode *PN = NumberingPhi[Num]) {
2016     for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2017       if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2018         if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2019           return TransVal;
2020     }
2021     return Num;
2022   }
2023 
2024   // If there is any value related with Num is defined in a BB other than
2025   // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2026   // a backedge. We can do an early exit in that case to save compile time.
2027   if (!areAllValsInBB(Num, PhiBlock, Gvn))
2028     return Num;
2029 
2030   if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2031     return Num;
2032   Expression Exp = Expressions[ExprIdx[Num]];
2033 
2034   for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2035     // For InsertValue and ExtractValue, some varargs are index numbers
2036     // instead of value numbers. Those index numbers should not be
2037     // translated.
2038     if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2039         (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2040         (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2041       continue;
2042     Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2043   }
2044 
2045   if (Exp.commutative) {
2046     assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2047     if (Exp.varargs[0] > Exp.varargs[1]) {
2048       std::swap(Exp.varargs[0], Exp.varargs[1]);
2049       uint32_t Opcode = Exp.opcode >> 8;
2050       if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2051         Exp.opcode = (Opcode << 8) |
2052                      CmpInst::getSwappedPredicate(
2053                          static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2054     }
2055   }
2056 
2057   if (uint32_t NewNum = expressionNumbering[Exp]) {
2058     if (Exp.opcode == Instruction::Call && NewNum != Num)
2059       return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2060     return NewNum;
2061   }
2062   return Num;
2063 }
2064 
2065 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2066 /// again.
eraseTranslateCacheEntry(uint32_t Num,const BasicBlock & CurrBlock)2067 void GVN::ValueTable::eraseTranslateCacheEntry(uint32_t Num,
2068                                                const BasicBlock &CurrBlock) {
2069   for (const BasicBlock *Pred : predecessors(&CurrBlock))
2070     PhiTranslateTable.erase({Num, Pred});
2071 }
2072 
2073 // In order to find a leader for a given value number at a
2074 // specific basic block, we first obtain the list of all Values for that number,
2075 // and then scan the list to find one whose block dominates the block in
2076 // question.  This is fast because dominator tree queries consist of only
2077 // a few comparisons of DFS numbers.
findLeader(const BasicBlock * BB,uint32_t num)2078 Value *GVN::findLeader(const BasicBlock *BB, uint32_t num) {
2079   LeaderTableEntry Vals = LeaderTable[num];
2080   if (!Vals.Val) return nullptr;
2081 
2082   Value *Val = nullptr;
2083   if (DT->dominates(Vals.BB, BB)) {
2084     Val = Vals.Val;
2085     if (isa<Constant>(Val)) return Val;
2086   }
2087 
2088   LeaderTableEntry* Next = Vals.Next;
2089   while (Next) {
2090     if (DT->dominates(Next->BB, BB)) {
2091       if (isa<Constant>(Next->Val)) return Next->Val;
2092       if (!Val) Val = Next->Val;
2093     }
2094 
2095     Next = Next->Next;
2096   }
2097 
2098   return Val;
2099 }
2100 
2101 /// There is an edge from 'Src' to 'Dst'.  Return
2102 /// true if every path from the entry block to 'Dst' passes via this edge.  In
2103 /// particular 'Dst' must not be reachable via another edge from 'Src'.
isOnlyReachableViaThisEdge(const BasicBlockEdge & E,DominatorTree * DT)2104 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
2105                                        DominatorTree *DT) {
2106   // While in theory it is interesting to consider the case in which Dst has
2107   // more than one predecessor, because Dst might be part of a loop which is
2108   // only reachable from Src, in practice it is pointless since at the time
2109   // GVN runs all such loops have preheaders, which means that Dst will have
2110   // been changed to have only one predecessor, namely Src.
2111   const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2112   assert((!Pred || Pred == E.getStart()) &&
2113          "No edge between these basic blocks!");
2114   return Pred != nullptr;
2115 }
2116 
assignBlockRPONumber(Function & F)2117 void GVN::assignBlockRPONumber(Function &F) {
2118   BlockRPONumber.clear();
2119   uint32_t NextBlockNumber = 1;
2120   ReversePostOrderTraversal<Function *> RPOT(&F);
2121   for (BasicBlock *BB : RPOT)
2122     BlockRPONumber[BB] = NextBlockNumber++;
2123   InvalidBlockRPONumbers = false;
2124 }
2125 
replaceOperandsForInBlockEquality(Instruction * Instr) const2126 bool GVN::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2127   bool Changed = false;
2128   for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2129     Value *Operand = Instr->getOperand(OpNum);
2130     auto it = ReplaceOperandsWithMap.find(Operand);
2131     if (it != ReplaceOperandsWithMap.end()) {
2132       LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2133                         << *it->second << " in instruction " << *Instr << '\n');
2134       Instr->setOperand(OpNum, it->second);
2135       Changed = true;
2136     }
2137   }
2138   return Changed;
2139 }
2140 
2141 /// The given values are known to be equal in every block
2142 /// dominated by 'Root'.  Exploit this, for example by replacing 'LHS' with
2143 /// 'RHS' everywhere in the scope.  Returns whether a change was made.
2144 /// If DominatesByEdge is false, then it means that we will propagate the RHS
2145 /// value starting from the end of Root.Start.
propagateEquality(Value * LHS,Value * RHS,const BasicBlockEdge & Root,bool DominatesByEdge)2146 bool GVN::propagateEquality(Value *LHS, Value *RHS, const BasicBlockEdge &Root,
2147                             bool DominatesByEdge) {
2148   SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2149   Worklist.push_back(std::make_pair(LHS, RHS));
2150   bool Changed = false;
2151   // For speed, compute a conservative fast approximation to
2152   // DT->dominates(Root, Root.getEnd());
2153   const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2154 
2155   while (!Worklist.empty()) {
2156     std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2157     LHS = Item.first; RHS = Item.second;
2158 
2159     if (LHS == RHS)
2160       continue;
2161     assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2162 
2163     // Don't try to propagate equalities between constants.
2164     if (isa<Constant>(LHS) && isa<Constant>(RHS))
2165       continue;
2166 
2167     // Prefer a constant on the right-hand side, or an Argument if no constants.
2168     if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2169       std::swap(LHS, RHS);
2170     assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2171 
2172     // If there is no obvious reason to prefer the left-hand side over the
2173     // right-hand side, ensure the longest lived term is on the right-hand side,
2174     // so the shortest lived term will be replaced by the longest lived.
2175     // This tends to expose more simplifications.
2176     uint32_t LVN = VN.lookupOrAdd(LHS);
2177     if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2178         (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2179       // Move the 'oldest' value to the right-hand side, using the value number
2180       // as a proxy for age.
2181       uint32_t RVN = VN.lookupOrAdd(RHS);
2182       if (LVN < RVN) {
2183         std::swap(LHS, RHS);
2184         LVN = RVN;
2185       }
2186     }
2187 
2188     // If value numbering later sees that an instruction in the scope is equal
2189     // to 'LHS' then ensure it will be turned into 'RHS'.  In order to preserve
2190     // the invariant that instructions only occur in the leader table for their
2191     // own value number (this is used by removeFromLeaderTable), do not do this
2192     // if RHS is an instruction (if an instruction in the scope is morphed into
2193     // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2194     // using the leader table is about compiling faster, not optimizing better).
2195     // The leader table only tracks basic blocks, not edges. Only add to if we
2196     // have the simple case where the edge dominates the end.
2197     if (RootDominatesEnd && !isa<Instruction>(RHS))
2198       addToLeaderTable(LVN, RHS, Root.getEnd());
2199 
2200     // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope.  As
2201     // LHS always has at least one use that is not dominated by Root, this will
2202     // never do anything if LHS has only one use.
2203     if (!LHS->hasOneUse()) {
2204       unsigned NumReplacements =
2205           DominatesByEdge
2206               ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2207               : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2208 
2209       Changed |= NumReplacements > 0;
2210       NumGVNEqProp += NumReplacements;
2211       // Cached information for anything that uses LHS will be invalid.
2212       if (MD)
2213         MD->invalidateCachedPointerInfo(LHS);
2214     }
2215 
2216     // Now try to deduce additional equalities from this one. For example, if
2217     // the known equality was "(A != B)" == "false" then it follows that A and B
2218     // are equal in the scope. Only boolean equalities with an explicit true or
2219     // false RHS are currently supported.
2220     if (!RHS->getType()->isIntegerTy(1))
2221       // Not a boolean equality - bail out.
2222       continue;
2223     ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2224     if (!CI)
2225       // RHS neither 'true' nor 'false' - bail out.
2226       continue;
2227     // Whether RHS equals 'true'.  Otherwise it equals 'false'.
2228     bool isKnownTrue = CI->isMinusOne();
2229     bool isKnownFalse = !isKnownTrue;
2230 
2231     // If "A && B" is known true then both A and B are known true.  If "A || B"
2232     // is known false then both A and B are known false.
2233     Value *A, *B;
2234     if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2235         (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2236       Worklist.push_back(std::make_pair(A, RHS));
2237       Worklist.push_back(std::make_pair(B, RHS));
2238       continue;
2239     }
2240 
2241     // If we are propagating an equality like "(A == B)" == "true" then also
2242     // propagate the equality A == B.  When propagating a comparison such as
2243     // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2244     if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2245       Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2246 
2247       // If "A == B" is known true, or "A != B" is known false, then replace
2248       // A with B everywhere in the scope.  For floating point operations, we
2249       // have to be careful since equality does not always imply equivalance.
2250       if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2251           (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2252         Worklist.push_back(std::make_pair(Op0, Op1));
2253 
2254       // If "A >= B" is known true, replace "A < B" with false everywhere.
2255       CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2256       Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2257       // Since we don't have the instruction "A < B" immediately to hand, work
2258       // out the value number that it would have and use that to find an
2259       // appropriate instruction (if any).
2260       uint32_t NextNum = VN.getNextUnusedValueNumber();
2261       uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2262       // If the number we were assigned was brand new then there is no point in
2263       // looking for an instruction realizing it: there cannot be one!
2264       if (Num < NextNum) {
2265         Value *NotCmp = findLeader(Root.getEnd(), Num);
2266         if (NotCmp && isa<Instruction>(NotCmp)) {
2267           unsigned NumReplacements =
2268               DominatesByEdge
2269                   ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2270                   : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2271                                              Root.getStart());
2272           Changed |= NumReplacements > 0;
2273           NumGVNEqProp += NumReplacements;
2274           // Cached information for anything that uses NotCmp will be invalid.
2275           if (MD)
2276             MD->invalidateCachedPointerInfo(NotCmp);
2277         }
2278       }
2279       // Ensure that any instruction in scope that gets the "A < B" value number
2280       // is replaced with false.
2281       // The leader table only tracks basic blocks, not edges. Only add to if we
2282       // have the simple case where the edge dominates the end.
2283       if (RootDominatesEnd)
2284         addToLeaderTable(Num, NotVal, Root.getEnd());
2285 
2286       continue;
2287     }
2288   }
2289 
2290   return Changed;
2291 }
2292 
2293 /// When calculating availability, handle an instruction
2294 /// by inserting it into the appropriate sets
processInstruction(Instruction * I)2295 bool GVN::processInstruction(Instruction *I) {
2296   // Ignore dbg info intrinsics.
2297   if (isa<DbgInfoIntrinsic>(I))
2298     return false;
2299 
2300   // If the instruction can be easily simplified then do so now in preference
2301   // to value numbering it.  Value numbering often exposes redundancies, for
2302   // example if it determines that %y is equal to %x then the instruction
2303   // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2304   const DataLayout &DL = I->getModule()->getDataLayout();
2305   if (Value *V = SimplifyInstruction(I, {DL, TLI, DT, AC})) {
2306     bool Changed = false;
2307     if (!I->use_empty()) {
2308       // Simplification can cause a special instruction to become not special.
2309       // For example, devirtualization to a willreturn function.
2310       ICF->removeUsersOf(I);
2311       I->replaceAllUsesWith(V);
2312       Changed = true;
2313     }
2314     if (isInstructionTriviallyDead(I, TLI)) {
2315       markInstructionForDeletion(I);
2316       Changed = true;
2317     }
2318     if (Changed) {
2319       if (MD && V->getType()->isPtrOrPtrVectorTy())
2320         MD->invalidateCachedPointerInfo(V);
2321       ++NumGVNSimpl;
2322       return true;
2323     }
2324   }
2325 
2326   if (auto *Assume = dyn_cast<AssumeInst>(I))
2327     return processAssumeIntrinsic(Assume);
2328 
2329   if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2330     if (processLoad(Load))
2331       return true;
2332 
2333     unsigned Num = VN.lookupOrAdd(Load);
2334     addToLeaderTable(Num, Load, Load->getParent());
2335     return false;
2336   }
2337 
2338   // For conditional branches, we can perform simple conditional propagation on
2339   // the condition value itself.
2340   if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2341     if (!BI->isConditional())
2342       return false;
2343 
2344     if (isa<Constant>(BI->getCondition()))
2345       return processFoldableCondBr(BI);
2346 
2347     Value *BranchCond = BI->getCondition();
2348     BasicBlock *TrueSucc = BI->getSuccessor(0);
2349     BasicBlock *FalseSucc = BI->getSuccessor(1);
2350     // Avoid multiple edges early.
2351     if (TrueSucc == FalseSucc)
2352       return false;
2353 
2354     BasicBlock *Parent = BI->getParent();
2355     bool Changed = false;
2356 
2357     Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2358     BasicBlockEdge TrueE(Parent, TrueSucc);
2359     Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2360 
2361     Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2362     BasicBlockEdge FalseE(Parent, FalseSucc);
2363     Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2364 
2365     return Changed;
2366   }
2367 
2368   // For switches, propagate the case values into the case destinations.
2369   if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2370     Value *SwitchCond = SI->getCondition();
2371     BasicBlock *Parent = SI->getParent();
2372     bool Changed = false;
2373 
2374     // Remember how many outgoing edges there are to every successor.
2375     SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2376     for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2377       ++SwitchEdges[SI->getSuccessor(i)];
2378 
2379     for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2380          i != e; ++i) {
2381       BasicBlock *Dst = i->getCaseSuccessor();
2382       // If there is only a single edge, propagate the case value into it.
2383       if (SwitchEdges.lookup(Dst) == 1) {
2384         BasicBlockEdge E(Parent, Dst);
2385         Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2386       }
2387     }
2388     return Changed;
2389   }
2390 
2391   // Instructions with void type don't return a value, so there's
2392   // no point in trying to find redundancies in them.
2393   if (I->getType()->isVoidTy())
2394     return false;
2395 
2396   uint32_t NextNum = VN.getNextUnusedValueNumber();
2397   unsigned Num = VN.lookupOrAdd(I);
2398 
2399   // Allocations are always uniquely numbered, so we can save time and memory
2400   // by fast failing them.
2401   if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2402     addToLeaderTable(Num, I, I->getParent());
2403     return false;
2404   }
2405 
2406   // If the number we were assigned was a brand new VN, then we don't
2407   // need to do a lookup to see if the number already exists
2408   // somewhere in the domtree: it can't!
2409   if (Num >= NextNum) {
2410     addToLeaderTable(Num, I, I->getParent());
2411     return false;
2412   }
2413 
2414   // Perform fast-path value-number based elimination of values inherited from
2415   // dominators.
2416   Value *Repl = findLeader(I->getParent(), Num);
2417   if (!Repl) {
2418     // Failure, just remember this instance for future use.
2419     addToLeaderTable(Num, I, I->getParent());
2420     return false;
2421   } else if (Repl == I) {
2422     // If I was the result of a shortcut PRE, it might already be in the table
2423     // and the best replacement for itself. Nothing to do.
2424     return false;
2425   }
2426 
2427   // Remove it!
2428   patchAndReplaceAllUsesWith(I, Repl);
2429   if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2430     MD->invalidateCachedPointerInfo(Repl);
2431   markInstructionForDeletion(I);
2432   return true;
2433 }
2434 
2435 /// runOnFunction - This is the main transformation entry point for a function.
runImpl(Function & F,AssumptionCache & RunAC,DominatorTree & RunDT,const TargetLibraryInfo & RunTLI,AAResults & RunAA,MemoryDependenceResults * RunMD,LoopInfo * LI,OptimizationRemarkEmitter * RunORE,MemorySSA * MSSA)2436 bool GVN::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2437                   const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2438                   MemoryDependenceResults *RunMD, LoopInfo *LI,
2439                   OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2440   AC = &RunAC;
2441   DT = &RunDT;
2442   VN.setDomTree(DT);
2443   TLI = &RunTLI;
2444   VN.setAliasAnalysis(&RunAA);
2445   MD = RunMD;
2446   ImplicitControlFlowTracking ImplicitCFT;
2447   ICF = &ImplicitCFT;
2448   this->LI = LI;
2449   VN.setMemDep(MD);
2450   ORE = RunORE;
2451   InvalidBlockRPONumbers = true;
2452   MemorySSAUpdater Updater(MSSA);
2453   MSSAU = MSSA ? &Updater : nullptr;
2454 
2455   bool Changed = false;
2456   bool ShouldContinue = true;
2457 
2458   DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2459   // Merge unconditional branches, allowing PRE to catch more
2460   // optimization opportunities.
2461   for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ) {
2462     BasicBlock *BB = &*FI++;
2463 
2464     bool removedBlock = MergeBlockIntoPredecessor(BB, &DTU, LI, MSSAU, MD);
2465     if (removedBlock)
2466       ++NumGVNBlocks;
2467 
2468     Changed |= removedBlock;
2469   }
2470 
2471   unsigned Iteration = 0;
2472   while (ShouldContinue) {
2473     LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2474     ShouldContinue = iterateOnFunction(F);
2475     Changed |= ShouldContinue;
2476     ++Iteration;
2477   }
2478 
2479   if (isPREEnabled()) {
2480     // Fabricate val-num for dead-code in order to suppress assertion in
2481     // performPRE().
2482     assignValNumForDeadCode();
2483     bool PREChanged = true;
2484     while (PREChanged) {
2485       PREChanged = performPRE(F);
2486       Changed |= PREChanged;
2487     }
2488   }
2489 
2490   // FIXME: Should perform GVN again after PRE does something.  PRE can move
2491   // computations into blocks where they become fully redundant.  Note that
2492   // we can't do this until PRE's critical edge splitting updates memdep.
2493   // Actually, when this happens, we should just fully integrate PRE into GVN.
2494 
2495   cleanupGlobalSets();
2496   // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2497   // iteration.
2498   DeadBlocks.clear();
2499 
2500   if (MSSA && VerifyMemorySSA)
2501     MSSA->verifyMemorySSA();
2502 
2503   return Changed;
2504 }
2505 
processBlock(BasicBlock * BB)2506 bool GVN::processBlock(BasicBlock *BB) {
2507   // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2508   // (and incrementing BI before processing an instruction).
2509   assert(InstrsToErase.empty() &&
2510          "We expect InstrsToErase to be empty across iterations");
2511   if (DeadBlocks.count(BB))
2512     return false;
2513 
2514   // Clearing map before every BB because it can be used only for single BB.
2515   ReplaceOperandsWithMap.clear();
2516   bool ChangedFunction = false;
2517 
2518   // Since we may not have visited the input blocks of the phis, we can't
2519   // use our normal hash approach for phis.  Instead, simply look for
2520   // obvious duplicates.  The first pass of GVN will tend to create
2521   // identical phis, and the second or later passes can eliminate them.
2522   ChangedFunction |= EliminateDuplicatePHINodes(BB);
2523 
2524   for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2525        BI != BE;) {
2526     if (!ReplaceOperandsWithMap.empty())
2527       ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2528     ChangedFunction |= processInstruction(&*BI);
2529 
2530     if (InstrsToErase.empty()) {
2531       ++BI;
2532       continue;
2533     }
2534 
2535     // If we need some instructions deleted, do it now.
2536     NumGVNInstr += InstrsToErase.size();
2537 
2538     // Avoid iterator invalidation.
2539     bool AtStart = BI == BB->begin();
2540     if (!AtStart)
2541       --BI;
2542 
2543     for (auto *I : InstrsToErase) {
2544       assert(I->getParent() == BB && "Removing instruction from wrong block?");
2545       LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2546       salvageKnowledge(I, AC);
2547       salvageDebugInfo(*I);
2548       if (MD) MD->removeInstruction(I);
2549       if (MSSAU)
2550         MSSAU->removeMemoryAccess(I);
2551       LLVM_DEBUG(verifyRemoved(I));
2552       ICF->removeInstruction(I);
2553       I->eraseFromParent();
2554     }
2555     InstrsToErase.clear();
2556 
2557     if (AtStart)
2558       BI = BB->begin();
2559     else
2560       ++BI;
2561   }
2562 
2563   return ChangedFunction;
2564 }
2565 
2566 // Instantiate an expression in a predecessor that lacked it.
performScalarPREInsertion(Instruction * Instr,BasicBlock * Pred,BasicBlock * Curr,unsigned int ValNo)2567 bool GVN::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2568                                     BasicBlock *Curr, unsigned int ValNo) {
2569   // Because we are going top-down through the block, all value numbers
2570   // will be available in the predecessor by the time we need them.  Any
2571   // that weren't originally present will have been instantiated earlier
2572   // in this loop.
2573   bool success = true;
2574   for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2575     Value *Op = Instr->getOperand(i);
2576     if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2577       continue;
2578     // This could be a newly inserted instruction, in which case, we won't
2579     // find a value number, and should give up before we hurt ourselves.
2580     // FIXME: Rewrite the infrastructure to let it easier to value number
2581     // and process newly inserted instructions.
2582     if (!VN.exists(Op)) {
2583       success = false;
2584       break;
2585     }
2586     uint32_t TValNo =
2587         VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2588     if (Value *V = findLeader(Pred, TValNo)) {
2589       Instr->setOperand(i, V);
2590     } else {
2591       success = false;
2592       break;
2593     }
2594   }
2595 
2596   // Fail out if we encounter an operand that is not available in
2597   // the PRE predecessor.  This is typically because of loads which
2598   // are not value numbered precisely.
2599   if (!success)
2600     return false;
2601 
2602   Instr->insertBefore(Pred->getTerminator());
2603   Instr->setName(Instr->getName() + ".pre");
2604   Instr->setDebugLoc(Instr->getDebugLoc());
2605 
2606   ICF->insertInstructionTo(Instr, Pred);
2607 
2608   unsigned Num = VN.lookupOrAdd(Instr);
2609   VN.add(Instr, Num);
2610 
2611   // Update the availability map to include the new instruction.
2612   addToLeaderTable(Num, Instr, Pred);
2613   return true;
2614 }
2615 
performScalarPRE(Instruction * CurInst)2616 bool GVN::performScalarPRE(Instruction *CurInst) {
2617   if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2618       isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2619       CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2620       isa<DbgInfoIntrinsic>(CurInst))
2621     return false;
2622 
2623   // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2624   // sinking the compare again, and it would force the code generator to
2625   // move the i1 from processor flags or predicate registers into a general
2626   // purpose register.
2627   if (isa<CmpInst>(CurInst))
2628     return false;
2629 
2630   // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2631   // sinking the addressing mode computation back to its uses. Extending the
2632   // GEP's live range increases the register pressure, and therefore it can
2633   // introduce unnecessary spills.
2634   //
2635   // This doesn't prevent Load PRE. PHI translation will make the GEP available
2636   // to the load by moving it to the predecessor block if necessary.
2637   if (isa<GetElementPtrInst>(CurInst))
2638     return false;
2639 
2640   if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2641     // We don't currently value number ANY inline asm calls.
2642     if (CallB->isInlineAsm())
2643       return false;
2644     // Don't do PRE on convergent calls.
2645     if (CallB->isConvergent())
2646       return false;
2647   }
2648 
2649   uint32_t ValNo = VN.lookup(CurInst);
2650 
2651   // Look for the predecessors for PRE opportunities.  We're
2652   // only trying to solve the basic diamond case, where
2653   // a value is computed in the successor and one predecessor,
2654   // but not the other.  We also explicitly disallow cases
2655   // where the successor is its own predecessor, because they're
2656   // more complicated to get right.
2657   unsigned NumWith = 0;
2658   unsigned NumWithout = 0;
2659   BasicBlock *PREPred = nullptr;
2660   BasicBlock *CurrentBlock = CurInst->getParent();
2661 
2662   // Update the RPO numbers for this function.
2663   if (InvalidBlockRPONumbers)
2664     assignBlockRPONumber(*CurrentBlock->getParent());
2665 
2666   SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2667   for (BasicBlock *P : predecessors(CurrentBlock)) {
2668     // We're not interested in PRE where blocks with predecessors that are
2669     // not reachable.
2670     if (!DT->isReachableFromEntry(P)) {
2671       NumWithout = 2;
2672       break;
2673     }
2674     // It is not safe to do PRE when P->CurrentBlock is a loop backedge, and
2675     // when CurInst has operand defined in CurrentBlock (so it may be defined
2676     // by phi in the loop header).
2677     assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2678            "Invalid BlockRPONumber map.");
2679     if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock] &&
2680         llvm::any_of(CurInst->operands(), [&](const Use &U) {
2681           if (auto *Inst = dyn_cast<Instruction>(U.get()))
2682             return Inst->getParent() == CurrentBlock;
2683           return false;
2684         })) {
2685       NumWithout = 2;
2686       break;
2687     }
2688 
2689     uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2690     Value *predV = findLeader(P, TValNo);
2691     if (!predV) {
2692       predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2693       PREPred = P;
2694       ++NumWithout;
2695     } else if (predV == CurInst) {
2696       /* CurInst dominates this predecessor. */
2697       NumWithout = 2;
2698       break;
2699     } else {
2700       predMap.push_back(std::make_pair(predV, P));
2701       ++NumWith;
2702     }
2703   }
2704 
2705   // Don't do PRE when it might increase code size, i.e. when
2706   // we would need to insert instructions in more than one pred.
2707   if (NumWithout > 1 || NumWith == 0)
2708     return false;
2709 
2710   // We may have a case where all predecessors have the instruction,
2711   // and we just need to insert a phi node. Otherwise, perform
2712   // insertion.
2713   Instruction *PREInstr = nullptr;
2714 
2715   if (NumWithout != 0) {
2716     if (!isSafeToSpeculativelyExecute(CurInst)) {
2717       // It is only valid to insert a new instruction if the current instruction
2718       // is always executed. An instruction with implicit control flow could
2719       // prevent us from doing it. If we cannot speculate the execution, then
2720       // PRE should be prohibited.
2721       if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2722         return false;
2723     }
2724 
2725     // Don't do PRE across indirect branch.
2726     if (isa<IndirectBrInst>(PREPred->getTerminator()))
2727       return false;
2728 
2729     // Don't do PRE across callbr.
2730     // FIXME: Can we do this across the fallthrough edge?
2731     if (isa<CallBrInst>(PREPred->getTerminator()))
2732       return false;
2733 
2734     // We can't do PRE safely on a critical edge, so instead we schedule
2735     // the edge to be split and perform the PRE the next time we iterate
2736     // on the function.
2737     unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2738     if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2739       toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2740       return false;
2741     }
2742     // We need to insert somewhere, so let's give it a shot
2743     PREInstr = CurInst->clone();
2744     if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2745       // If we failed insertion, make sure we remove the instruction.
2746       LLVM_DEBUG(verifyRemoved(PREInstr));
2747       PREInstr->deleteValue();
2748       return false;
2749     }
2750   }
2751 
2752   // Either we should have filled in the PRE instruction, or we should
2753   // not have needed insertions.
2754   assert(PREInstr != nullptr || NumWithout == 0);
2755 
2756   ++NumGVNPRE;
2757 
2758   // Create a PHI to make the value available in this block.
2759   PHINode *Phi =
2760       PHINode::Create(CurInst->getType(), predMap.size(),
2761                       CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2762   for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2763     if (Value *V = predMap[i].first) {
2764       // If we use an existing value in this phi, we have to patch the original
2765       // value because the phi will be used to replace a later value.
2766       patchReplacementInstruction(CurInst, V);
2767       Phi->addIncoming(V, predMap[i].second);
2768     } else
2769       Phi->addIncoming(PREInstr, PREPred);
2770   }
2771 
2772   VN.add(Phi, ValNo);
2773   // After creating a new PHI for ValNo, the phi translate result for ValNo will
2774   // be changed, so erase the related stale entries in phi translate cache.
2775   VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2776   addToLeaderTable(ValNo, Phi, CurrentBlock);
2777   Phi->setDebugLoc(CurInst->getDebugLoc());
2778   CurInst->replaceAllUsesWith(Phi);
2779   if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2780     MD->invalidateCachedPointerInfo(Phi);
2781   VN.erase(CurInst);
2782   removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2783 
2784   LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2785   if (MD)
2786     MD->removeInstruction(CurInst);
2787   if (MSSAU)
2788     MSSAU->removeMemoryAccess(CurInst);
2789   LLVM_DEBUG(verifyRemoved(CurInst));
2790   // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2791   // some assertion failures.
2792   ICF->removeInstruction(CurInst);
2793   CurInst->eraseFromParent();
2794   ++NumGVNInstr;
2795 
2796   return true;
2797 }
2798 
2799 /// Perform a purely local form of PRE that looks for diamond
2800 /// control flow patterns and attempts to perform simple PRE at the join point.
performPRE(Function & F)2801 bool GVN::performPRE(Function &F) {
2802   bool Changed = false;
2803   for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2804     // Nothing to PRE in the entry block.
2805     if (CurrentBlock == &F.getEntryBlock())
2806       continue;
2807 
2808     // Don't perform PRE on an EH pad.
2809     if (CurrentBlock->isEHPad())
2810       continue;
2811 
2812     for (BasicBlock::iterator BI = CurrentBlock->begin(),
2813                               BE = CurrentBlock->end();
2814          BI != BE;) {
2815       Instruction *CurInst = &*BI++;
2816       Changed |= performScalarPRE(CurInst);
2817     }
2818   }
2819 
2820   if (splitCriticalEdges())
2821     Changed = true;
2822 
2823   return Changed;
2824 }
2825 
2826 /// Split the critical edge connecting the given two blocks, and return
2827 /// the block inserted to the critical edge.
splitCriticalEdges(BasicBlock * Pred,BasicBlock * Succ)2828 BasicBlock *GVN::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2829   // GVN does not require loop-simplify, do not try to preserve it if it is not
2830   // possible.
2831   BasicBlock *BB = SplitCriticalEdge(
2832       Pred, Succ,
2833       CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2834   if (BB) {
2835     if (MD)
2836       MD->invalidateCachedPredecessors();
2837     InvalidBlockRPONumbers = true;
2838   }
2839   return BB;
2840 }
2841 
2842 /// Split critical edges found during the previous
2843 /// iteration that may enable further optimization.
splitCriticalEdges()2844 bool GVN::splitCriticalEdges() {
2845   if (toSplit.empty())
2846     return false;
2847 
2848   bool Changed = false;
2849   do {
2850     std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2851     Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2852                                  CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2853                nullptr;
2854   } while (!toSplit.empty());
2855   if (Changed) {
2856     if (MD)
2857       MD->invalidateCachedPredecessors();
2858     InvalidBlockRPONumbers = true;
2859   }
2860   return Changed;
2861 }
2862 
2863 /// Executes one iteration of GVN
iterateOnFunction(Function & F)2864 bool GVN::iterateOnFunction(Function &F) {
2865   cleanupGlobalSets();
2866 
2867   // Top-down walk of the dominator tree
2868   bool Changed = false;
2869   // Needed for value numbering with phi construction to work.
2870   // RPOT walks the graph in its constructor and will not be invalidated during
2871   // processBlock.
2872   ReversePostOrderTraversal<Function *> RPOT(&F);
2873 
2874   for (BasicBlock *BB : RPOT)
2875     Changed |= processBlock(BB);
2876 
2877   return Changed;
2878 }
2879 
cleanupGlobalSets()2880 void GVN::cleanupGlobalSets() {
2881   VN.clear();
2882   LeaderTable.clear();
2883   BlockRPONumber.clear();
2884   TableAllocator.Reset();
2885   ICF->clear();
2886   InvalidBlockRPONumbers = true;
2887 }
2888 
2889 /// Verify that the specified instruction does not occur in our
2890 /// internal data structures.
verifyRemoved(const Instruction * Inst) const2891 void GVN::verifyRemoved(const Instruction *Inst) const {
2892   VN.verifyRemoved(Inst);
2893 
2894   // Walk through the value number scope to make sure the instruction isn't
2895   // ferreted away in it.
2896   for (const auto &I : LeaderTable) {
2897     const LeaderTableEntry *Node = &I.second;
2898     assert(Node->Val != Inst && "Inst still in value numbering scope!");
2899 
2900     while (Node->Next) {
2901       Node = Node->Next;
2902       assert(Node->Val != Inst && "Inst still in value numbering scope!");
2903     }
2904   }
2905 }
2906 
2907 /// BB is declared dead, which implied other blocks become dead as well. This
2908 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
2909 /// live successors, update their phi nodes by replacing the operands
2910 /// corresponding to dead blocks with UndefVal.
addDeadBlock(BasicBlock * BB)2911 void GVN::addDeadBlock(BasicBlock *BB) {
2912   SmallVector<BasicBlock *, 4> NewDead;
2913   SmallSetVector<BasicBlock *, 4> DF;
2914 
2915   NewDead.push_back(BB);
2916   while (!NewDead.empty()) {
2917     BasicBlock *D = NewDead.pop_back_val();
2918     if (DeadBlocks.count(D))
2919       continue;
2920 
2921     // All blocks dominated by D are dead.
2922     SmallVector<BasicBlock *, 8> Dom;
2923     DT->getDescendants(D, Dom);
2924     DeadBlocks.insert(Dom.begin(), Dom.end());
2925 
2926     // Figure out the dominance-frontier(D).
2927     for (BasicBlock *B : Dom) {
2928       for (BasicBlock *S : successors(B)) {
2929         if (DeadBlocks.count(S))
2930           continue;
2931 
2932         bool AllPredDead = true;
2933         for (BasicBlock *P : predecessors(S))
2934           if (!DeadBlocks.count(P)) {
2935             AllPredDead = false;
2936             break;
2937           }
2938 
2939         if (!AllPredDead) {
2940           // S could be proved dead later on. That is why we don't update phi
2941           // operands at this moment.
2942           DF.insert(S);
2943         } else {
2944           // While S is not dominated by D, it is dead by now. This could take
2945           // place if S already have a dead predecessor before D is declared
2946           // dead.
2947           NewDead.push_back(S);
2948         }
2949       }
2950     }
2951   }
2952 
2953   // For the dead blocks' live successors, update their phi nodes by replacing
2954   // the operands corresponding to dead blocks with UndefVal.
2955   for (BasicBlock *B : DF) {
2956     if (DeadBlocks.count(B))
2957       continue;
2958 
2959     // First, split the critical edges. This might also create additional blocks
2960     // to preserve LoopSimplify form and adjust edges accordingly.
2961     SmallVector<BasicBlock *, 4> Preds(predecessors(B));
2962     for (BasicBlock *P : Preds) {
2963       if (!DeadBlocks.count(P))
2964         continue;
2965 
2966       if (llvm::is_contained(successors(P), B) &&
2967           isCriticalEdge(P->getTerminator(), B)) {
2968         if (BasicBlock *S = splitCriticalEdges(P, B))
2969           DeadBlocks.insert(P = S);
2970       }
2971     }
2972 
2973     // Now undef the incoming values from the dead predecessors.
2974     for (BasicBlock *P : predecessors(B)) {
2975       if (!DeadBlocks.count(P))
2976         continue;
2977       for (PHINode &Phi : B->phis()) {
2978         Phi.setIncomingValueForBlock(P, UndefValue::get(Phi.getType()));
2979         if (MD)
2980           MD->invalidateCachedPointerInfo(&Phi);
2981       }
2982     }
2983   }
2984 }
2985 
2986 // If the given branch is recognized as a foldable branch (i.e. conditional
2987 // branch with constant condition), it will perform following analyses and
2988 // transformation.
2989 //  1) If the dead out-coming edge is a critical-edge, split it. Let
2990 //     R be the target of the dead out-coming edge.
2991 //  1) Identify the set of dead blocks implied by the branch's dead outcoming
2992 //     edge. The result of this step will be {X| X is dominated by R}
2993 //  2) Identify those blocks which haves at least one dead predecessor. The
2994 //     result of this step will be dominance-frontier(R).
2995 //  3) Update the PHIs in DF(R) by replacing the operands corresponding to
2996 //     dead blocks with "UndefVal" in an hope these PHIs will optimized away.
2997 //
2998 // Return true iff *NEW* dead code are found.
processFoldableCondBr(BranchInst * BI)2999 bool GVN::processFoldableCondBr(BranchInst *BI) {
3000   if (!BI || BI->isUnconditional())
3001     return false;
3002 
3003   // If a branch has two identical successors, we cannot declare either dead.
3004   if (BI->getSuccessor(0) == BI->getSuccessor(1))
3005     return false;
3006 
3007   ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3008   if (!Cond)
3009     return false;
3010 
3011   BasicBlock *DeadRoot =
3012       Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3013   if (DeadBlocks.count(DeadRoot))
3014     return false;
3015 
3016   if (!DeadRoot->getSinglePredecessor())
3017     DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3018 
3019   addDeadBlock(DeadRoot);
3020   return true;
3021 }
3022 
3023 // performPRE() will trigger assert if it comes across an instruction without
3024 // associated val-num. As it normally has far more live instructions than dead
3025 // instructions, it makes more sense just to "fabricate" a val-number for the
3026 // dead code than checking if instruction involved is dead or not.
assignValNumForDeadCode()3027 void GVN::assignValNumForDeadCode() {
3028   for (BasicBlock *BB : DeadBlocks) {
3029     for (Instruction &Inst : *BB) {
3030       unsigned ValNum = VN.lookupOrAdd(&Inst);
3031       addToLeaderTable(ValNum, &Inst, BB);
3032     }
3033   }
3034 }
3035 
3036 class llvm::gvn::GVNLegacyPass : public FunctionPass {
3037 public:
3038   static char ID; // Pass identification, replacement for typeid
3039 
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)3040   explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3041       : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3042     initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
3043   }
3044 
runOnFunction(Function & F)3045   bool runOnFunction(Function &F) override {
3046     if (skipFunction(F))
3047       return false;
3048 
3049     auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3050 
3051     auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3052     return Impl.runImpl(
3053         F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3054         getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3055         getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3056         getAnalysis<AAResultsWrapperPass>().getAAResults(),
3057         Impl.isMemDepEnabled()
3058             ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3059             : nullptr,
3060         LIWP ? &LIWP->getLoopInfo() : nullptr,
3061         &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3062         MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3063   }
3064 
getAnalysisUsage(AnalysisUsage & AU) const3065   void getAnalysisUsage(AnalysisUsage &AU) const override {
3066     AU.addRequired<AssumptionCacheTracker>();
3067     AU.addRequired<DominatorTreeWrapperPass>();
3068     AU.addRequired<TargetLibraryInfoWrapperPass>();
3069     AU.addRequired<LoopInfoWrapperPass>();
3070     if (Impl.isMemDepEnabled())
3071       AU.addRequired<MemoryDependenceWrapperPass>();
3072     AU.addRequired<AAResultsWrapperPass>();
3073     AU.addPreserved<DominatorTreeWrapperPass>();
3074     AU.addPreserved<GlobalsAAWrapperPass>();
3075     AU.addPreserved<TargetLibraryInfoWrapperPass>();
3076     AU.addPreserved<LoopInfoWrapperPass>();
3077     AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
3078     AU.addPreserved<MemorySSAWrapperPass>();
3079   }
3080 
3081 private:
3082   GVN Impl;
3083 };
3084 
3085 char GVNLegacyPass::ID = 0;
3086 
3087 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)3088 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3089 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
3090 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3091 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3092 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3093 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3094 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
3095 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3096 
3097 // The public interface to this file...
3098 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3099   return new GVNLegacyPass(NoMemDepAnalysis);
3100 }
3101