1 //===- GVN.cpp - Eliminate redundant values and loads ---------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs global value numbering to eliminate fully redundant
10 // instructions. It also performs simple dead load elimination.
11 //
12 // Note that this pass does the value numbering itself; it does not use the
13 // ValueNumbering analysis passes.
14 //
15 //===----------------------------------------------------------------------===//
16
17 #include "llvm/Transforms/Scalar/GVN.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DepthFirstIterator.h"
20 #include "llvm/ADT/Hashing.h"
21 #include "llvm/ADT/MapVector.h"
22 #include "llvm/ADT/PostOrderIterator.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SetVector.h"
25 #include "llvm/ADT/SmallPtrSet.h"
26 #include "llvm/ADT/SmallVector.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/Analysis/AssumeBundleQueries.h"
30 #include "llvm/Analysis/AssumptionCache.h"
31 #include "llvm/Analysis/CFG.h"
32 #include "llvm/Analysis/DomTreeUpdater.h"
33 #include "llvm/Analysis/GlobalsModRef.h"
34 #include "llvm/Analysis/InstructionPrecedenceTracking.h"
35 #include "llvm/Analysis/InstructionSimplify.h"
36 #include "llvm/Analysis/LoopInfo.h"
37 #include "llvm/Analysis/MemoryBuiltins.h"
38 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
39 #include "llvm/Analysis/MemorySSA.h"
40 #include "llvm/Analysis/MemorySSAUpdater.h"
41 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
42 #include "llvm/Analysis/PHITransAddr.h"
43 #include "llvm/Analysis/TargetLibraryInfo.h"
44 #include "llvm/Analysis/ValueTracking.h"
45 #include "llvm/IR/Attributes.h"
46 #include "llvm/IR/BasicBlock.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DebugLoc.h"
50 #include "llvm/IR/Dominators.h"
51 #include "llvm/IR/Function.h"
52 #include "llvm/IR/InstrTypes.h"
53 #include "llvm/IR/Instruction.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Module.h"
59 #include "llvm/IR/PassManager.h"
60 #include "llvm/IR/PatternMatch.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/Use.h"
63 #include "llvm/IR/Value.h"
64 #include "llvm/InitializePasses.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Compiler.h"
69 #include "llvm/Support/Debug.h"
70 #include "llvm/Support/raw_ostream.h"
71 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
72 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
73 #include "llvm/Transforms/Utils/Local.h"
74 #include "llvm/Transforms/Utils/SSAUpdater.h"
75 #include "llvm/Transforms/Utils/VNCoercion.h"
76 #include <algorithm>
77 #include <cassert>
78 #include <cstdint>
79 #include <optional>
80 #include <utility>
81
82 using namespace llvm;
83 using namespace llvm::gvn;
84 using namespace llvm::VNCoercion;
85 using namespace PatternMatch;
86
87 #define DEBUG_TYPE "gvn"
88
89 STATISTIC(NumGVNInstr, "Number of instructions deleted");
90 STATISTIC(NumGVNLoad, "Number of loads deleted");
91 STATISTIC(NumGVNPRE, "Number of instructions PRE'd");
92 STATISTIC(NumGVNBlocks, "Number of blocks merged");
93 STATISTIC(NumGVNSimpl, "Number of instructions simplified");
94 STATISTIC(NumGVNEqProp, "Number of equalities propagated");
95 STATISTIC(NumPRELoad, "Number of loads PRE'd");
96 STATISTIC(NumPRELoopLoad, "Number of loop loads PRE'd");
97
98 STATISTIC(IsValueFullyAvailableInBlockNumSpeculationsMax,
99 "Number of blocks speculated as available in "
100 "IsValueFullyAvailableInBlock(), max");
101 STATISTIC(MaxBBSpeculationCutoffReachedTimes,
102 "Number of times we we reached gvn-max-block-speculations cut-off "
103 "preventing further exploration");
104
105 static cl::opt<bool> GVNEnablePRE("enable-pre", cl::init(true), cl::Hidden);
106 static cl::opt<bool> GVNEnableLoadPRE("enable-load-pre", cl::init(true));
107 static cl::opt<bool> GVNEnableLoadInLoopPRE("enable-load-in-loop-pre",
108 cl::init(true));
109 static cl::opt<bool>
110 GVNEnableSplitBackedgeInLoadPRE("enable-split-backedge-in-load-pre",
111 cl::init(false));
112 static cl::opt<bool> GVNEnableMemDep("enable-gvn-memdep", cl::init(true));
113
114 static cl::opt<uint32_t> MaxNumDeps(
115 "gvn-max-num-deps", cl::Hidden, cl::init(100),
116 cl::desc("Max number of dependences to attempt Load PRE (default = 100)"));
117
118 // This is based on IsValueFullyAvailableInBlockNumSpeculationsMax stat.
119 static cl::opt<uint32_t> MaxBBSpeculations(
120 "gvn-max-block-speculations", cl::Hidden, cl::init(600),
121 cl::desc("Max number of blocks we're willing to speculate on (and recurse "
122 "into) when deducing if a value is fully available or not in GVN "
123 "(default = 600)"));
124
125 static cl::opt<uint32_t> MaxNumVisitedInsts(
126 "gvn-max-num-visited-insts", cl::Hidden, cl::init(100),
127 cl::desc("Max number of visited instructions when trying to find "
128 "dominating value of select dependency (default = 100)"));
129
130 struct llvm::GVNPass::Expression {
131 uint32_t opcode;
132 bool commutative = false;
133 // The type is not necessarily the result type of the expression, it may be
134 // any additional type needed to disambiguate the expression.
135 Type *type = nullptr;
136 SmallVector<uint32_t, 4> varargs;
137
Expressionllvm::GVNPass::Expression138 Expression(uint32_t o = ~2U) : opcode(o) {}
139
operator ==llvm::GVNPass::Expression140 bool operator==(const Expression &other) const {
141 if (opcode != other.opcode)
142 return false;
143 if (opcode == ~0U || opcode == ~1U)
144 return true;
145 if (type != other.type)
146 return false;
147 if (varargs != other.varargs)
148 return false;
149 return true;
150 }
151
hash_value(const Expression & Value)152 friend hash_code hash_value(const Expression &Value) {
153 return hash_combine(
154 Value.opcode, Value.type,
155 hash_combine_range(Value.varargs.begin(), Value.varargs.end()));
156 }
157 };
158
159 namespace llvm {
160
161 template <> struct DenseMapInfo<GVNPass::Expression> {
getEmptyKeyllvm::DenseMapInfo162 static inline GVNPass::Expression getEmptyKey() { return ~0U; }
getTombstoneKeyllvm::DenseMapInfo163 static inline GVNPass::Expression getTombstoneKey() { return ~1U; }
164
getHashValuellvm::DenseMapInfo165 static unsigned getHashValue(const GVNPass::Expression &e) {
166 using llvm::hash_value;
167
168 return static_cast<unsigned>(hash_value(e));
169 }
170
isEqualllvm::DenseMapInfo171 static bool isEqual(const GVNPass::Expression &LHS,
172 const GVNPass::Expression &RHS) {
173 return LHS == RHS;
174 }
175 };
176
177 } // end namespace llvm
178
179 /// Represents a particular available value that we know how to materialize.
180 /// Materialization of an AvailableValue never fails. An AvailableValue is
181 /// implicitly associated with a rematerialization point which is the
182 /// location of the instruction from which it was formed.
183 struct llvm::gvn::AvailableValue {
184 enum class ValType {
185 SimpleVal, // A simple offsetted value that is accessed.
186 LoadVal, // A value produced by a load.
187 MemIntrin, // A memory intrinsic which is loaded from.
188 UndefVal, // A UndefValue representing a value from dead block (which
189 // is not yet physically removed from the CFG).
190 SelectVal, // A pointer select which is loaded from and for which the load
191 // can be replace by a value select.
192 };
193
194 /// Val - The value that is live out of the block.
195 Value *Val;
196 /// Kind of the live-out value.
197 ValType Kind;
198
199 /// Offset - The byte offset in Val that is interesting for the load query.
200 unsigned Offset = 0;
201 /// V1, V2 - The dominating non-clobbered values of SelectVal.
202 Value *V1 = nullptr, *V2 = nullptr;
203
getllvm::gvn::AvailableValue204 static AvailableValue get(Value *V, unsigned Offset = 0) {
205 AvailableValue Res;
206 Res.Val = V;
207 Res.Kind = ValType::SimpleVal;
208 Res.Offset = Offset;
209 return Res;
210 }
211
getMIllvm::gvn::AvailableValue212 static AvailableValue getMI(MemIntrinsic *MI, unsigned Offset = 0) {
213 AvailableValue Res;
214 Res.Val = MI;
215 Res.Kind = ValType::MemIntrin;
216 Res.Offset = Offset;
217 return Res;
218 }
219
getLoadllvm::gvn::AvailableValue220 static AvailableValue getLoad(LoadInst *Load, unsigned Offset = 0) {
221 AvailableValue Res;
222 Res.Val = Load;
223 Res.Kind = ValType::LoadVal;
224 Res.Offset = Offset;
225 return Res;
226 }
227
getUndefllvm::gvn::AvailableValue228 static AvailableValue getUndef() {
229 AvailableValue Res;
230 Res.Val = nullptr;
231 Res.Kind = ValType::UndefVal;
232 Res.Offset = 0;
233 return Res;
234 }
235
getSelectllvm::gvn::AvailableValue236 static AvailableValue getSelect(SelectInst *Sel, Value *V1, Value *V2) {
237 AvailableValue Res;
238 Res.Val = Sel;
239 Res.Kind = ValType::SelectVal;
240 Res.Offset = 0;
241 Res.V1 = V1;
242 Res.V2 = V2;
243 return Res;
244 }
245
isSimpleValuellvm::gvn::AvailableValue246 bool isSimpleValue() const { return Kind == ValType::SimpleVal; }
isCoercedLoadValuellvm::gvn::AvailableValue247 bool isCoercedLoadValue() const { return Kind == ValType::LoadVal; }
isMemIntrinValuellvm::gvn::AvailableValue248 bool isMemIntrinValue() const { return Kind == ValType::MemIntrin; }
isUndefValuellvm::gvn::AvailableValue249 bool isUndefValue() const { return Kind == ValType::UndefVal; }
isSelectValuellvm::gvn::AvailableValue250 bool isSelectValue() const { return Kind == ValType::SelectVal; }
251
getSimpleValuellvm::gvn::AvailableValue252 Value *getSimpleValue() const {
253 assert(isSimpleValue() && "Wrong accessor");
254 return Val;
255 }
256
getCoercedLoadValuellvm::gvn::AvailableValue257 LoadInst *getCoercedLoadValue() const {
258 assert(isCoercedLoadValue() && "Wrong accessor");
259 return cast<LoadInst>(Val);
260 }
261
getMemIntrinValuellvm::gvn::AvailableValue262 MemIntrinsic *getMemIntrinValue() const {
263 assert(isMemIntrinValue() && "Wrong accessor");
264 return cast<MemIntrinsic>(Val);
265 }
266
getSelectValuellvm::gvn::AvailableValue267 SelectInst *getSelectValue() const {
268 assert(isSelectValue() && "Wrong accessor");
269 return cast<SelectInst>(Val);
270 }
271
272 /// Emit code at the specified insertion point to adjust the value defined
273 /// here to the specified type. This handles various coercion cases.
274 Value *MaterializeAdjustedValue(LoadInst *Load, Instruction *InsertPt,
275 GVNPass &gvn) const;
276 };
277
278 /// Represents an AvailableValue which can be rematerialized at the end of
279 /// the associated BasicBlock.
280 struct llvm::gvn::AvailableValueInBlock {
281 /// BB - The basic block in question.
282 BasicBlock *BB = nullptr;
283
284 /// AV - The actual available value
285 AvailableValue AV;
286
getllvm::gvn::AvailableValueInBlock287 static AvailableValueInBlock get(BasicBlock *BB, AvailableValue &&AV) {
288 AvailableValueInBlock Res;
289 Res.BB = BB;
290 Res.AV = std::move(AV);
291 return Res;
292 }
293
getllvm::gvn::AvailableValueInBlock294 static AvailableValueInBlock get(BasicBlock *BB, Value *V,
295 unsigned Offset = 0) {
296 return get(BB, AvailableValue::get(V, Offset));
297 }
298
getUndefllvm::gvn::AvailableValueInBlock299 static AvailableValueInBlock getUndef(BasicBlock *BB) {
300 return get(BB, AvailableValue::getUndef());
301 }
302
getSelectllvm::gvn::AvailableValueInBlock303 static AvailableValueInBlock getSelect(BasicBlock *BB, SelectInst *Sel,
304 Value *V1, Value *V2) {
305 return get(BB, AvailableValue::getSelect(Sel, V1, V2));
306 }
307
308 /// Emit code at the end of this block to adjust the value defined here to
309 /// the specified type. This handles various coercion cases.
MaterializeAdjustedValuellvm::gvn::AvailableValueInBlock310 Value *MaterializeAdjustedValue(LoadInst *Load, GVNPass &gvn) const {
311 return AV.MaterializeAdjustedValue(Load, BB->getTerminator(), gvn);
312 }
313 };
314
315 //===----------------------------------------------------------------------===//
316 // ValueTable Internal Functions
317 //===----------------------------------------------------------------------===//
318
createExpr(Instruction * I)319 GVNPass::Expression GVNPass::ValueTable::createExpr(Instruction *I) {
320 Expression e;
321 e.type = I->getType();
322 e.opcode = I->getOpcode();
323 if (const GCRelocateInst *GCR = dyn_cast<GCRelocateInst>(I)) {
324 // gc.relocate is 'special' call: its second and third operands are
325 // not real values, but indices into statepoint's argument list.
326 // Use the refered to values for purposes of identity.
327 e.varargs.push_back(lookupOrAdd(GCR->getOperand(0)));
328 e.varargs.push_back(lookupOrAdd(GCR->getBasePtr()));
329 e.varargs.push_back(lookupOrAdd(GCR->getDerivedPtr()));
330 } else {
331 for (Use &Op : I->operands())
332 e.varargs.push_back(lookupOrAdd(Op));
333 }
334 if (I->isCommutative()) {
335 // Ensure that commutative instructions that only differ by a permutation
336 // of their operands get the same value number by sorting the operand value
337 // numbers. Since commutative operands are the 1st two operands it is more
338 // efficient to sort by hand rather than using, say, std::sort.
339 assert(I->getNumOperands() >= 2 && "Unsupported commutative instruction!");
340 if (e.varargs[0] > e.varargs[1])
341 std::swap(e.varargs[0], e.varargs[1]);
342 e.commutative = true;
343 }
344
345 if (auto *C = dyn_cast<CmpInst>(I)) {
346 // Sort the operand value numbers so x<y and y>x get the same value number.
347 CmpInst::Predicate Predicate = C->getPredicate();
348 if (e.varargs[0] > e.varargs[1]) {
349 std::swap(e.varargs[0], e.varargs[1]);
350 Predicate = CmpInst::getSwappedPredicate(Predicate);
351 }
352 e.opcode = (C->getOpcode() << 8) | Predicate;
353 e.commutative = true;
354 } else if (auto *E = dyn_cast<InsertValueInst>(I)) {
355 e.varargs.append(E->idx_begin(), E->idx_end());
356 } else if (auto *SVI = dyn_cast<ShuffleVectorInst>(I)) {
357 ArrayRef<int> ShuffleMask = SVI->getShuffleMask();
358 e.varargs.append(ShuffleMask.begin(), ShuffleMask.end());
359 }
360
361 return e;
362 }
363
createCmpExpr(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)364 GVNPass::Expression GVNPass::ValueTable::createCmpExpr(
365 unsigned Opcode, CmpInst::Predicate Predicate, Value *LHS, Value *RHS) {
366 assert((Opcode == Instruction::ICmp || Opcode == Instruction::FCmp) &&
367 "Not a comparison!");
368 Expression e;
369 e.type = CmpInst::makeCmpResultType(LHS->getType());
370 e.varargs.push_back(lookupOrAdd(LHS));
371 e.varargs.push_back(lookupOrAdd(RHS));
372
373 // Sort the operand value numbers so x<y and y>x get the same value number.
374 if (e.varargs[0] > e.varargs[1]) {
375 std::swap(e.varargs[0], e.varargs[1]);
376 Predicate = CmpInst::getSwappedPredicate(Predicate);
377 }
378 e.opcode = (Opcode << 8) | Predicate;
379 e.commutative = true;
380 return e;
381 }
382
383 GVNPass::Expression
createExtractvalueExpr(ExtractValueInst * EI)384 GVNPass::ValueTable::createExtractvalueExpr(ExtractValueInst *EI) {
385 assert(EI && "Not an ExtractValueInst?");
386 Expression e;
387 e.type = EI->getType();
388 e.opcode = 0;
389
390 WithOverflowInst *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
391 if (WO != nullptr && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) {
392 // EI is an extract from one of our with.overflow intrinsics. Synthesize
393 // a semantically equivalent expression instead of an extract value
394 // expression.
395 e.opcode = WO->getBinaryOp();
396 e.varargs.push_back(lookupOrAdd(WO->getLHS()));
397 e.varargs.push_back(lookupOrAdd(WO->getRHS()));
398 return e;
399 }
400
401 // Not a recognised intrinsic. Fall back to producing an extract value
402 // expression.
403 e.opcode = EI->getOpcode();
404 for (Use &Op : EI->operands())
405 e.varargs.push_back(lookupOrAdd(Op));
406
407 append_range(e.varargs, EI->indices());
408
409 return e;
410 }
411
createGEPExpr(GetElementPtrInst * GEP)412 GVNPass::Expression GVNPass::ValueTable::createGEPExpr(GetElementPtrInst *GEP) {
413 Expression E;
414 Type *PtrTy = GEP->getType()->getScalarType();
415 const DataLayout &DL = GEP->getModule()->getDataLayout();
416 unsigned BitWidth = DL.getIndexTypeSizeInBits(PtrTy);
417 MapVector<Value *, APInt> VariableOffsets;
418 APInt ConstantOffset(BitWidth, 0);
419 if (PtrTy->isOpaquePointerTy() &&
420 GEP->collectOffset(DL, BitWidth, VariableOffsets, ConstantOffset)) {
421 // For opaque pointers, convert into offset representation, to recognize
422 // equivalent address calculations that use different type encoding.
423 LLVMContext &Context = GEP->getContext();
424 E.opcode = GEP->getOpcode();
425 E.type = nullptr;
426 E.varargs.push_back(lookupOrAdd(GEP->getPointerOperand()));
427 for (const auto &Pair : VariableOffsets) {
428 E.varargs.push_back(lookupOrAdd(Pair.first));
429 E.varargs.push_back(lookupOrAdd(ConstantInt::get(Context, Pair.second)));
430 }
431 if (!ConstantOffset.isZero())
432 E.varargs.push_back(
433 lookupOrAdd(ConstantInt::get(Context, ConstantOffset)));
434 } else {
435 // If converting to offset representation fails (for typed pointers and
436 // scalable vectors), fall back to type-based implementation:
437 E.opcode = GEP->getOpcode();
438 E.type = GEP->getSourceElementType();
439 for (Use &Op : GEP->operands())
440 E.varargs.push_back(lookupOrAdd(Op));
441 }
442 return E;
443 }
444
445 //===----------------------------------------------------------------------===//
446 // ValueTable External Functions
447 //===----------------------------------------------------------------------===//
448
449 GVNPass::ValueTable::ValueTable() = default;
450 GVNPass::ValueTable::ValueTable(const ValueTable &) = default;
451 GVNPass::ValueTable::ValueTable(ValueTable &&) = default;
452 GVNPass::ValueTable::~ValueTable() = default;
453 GVNPass::ValueTable &
454 GVNPass::ValueTable::operator=(const GVNPass::ValueTable &Arg) = default;
455
456 /// add - Insert a value into the table with a specified value number.
add(Value * V,uint32_t num)457 void GVNPass::ValueTable::add(Value *V, uint32_t num) {
458 valueNumbering.insert(std::make_pair(V, num));
459 if (PHINode *PN = dyn_cast<PHINode>(V))
460 NumberingPhi[num] = PN;
461 }
462
lookupOrAddCall(CallInst * C)463 uint32_t GVNPass::ValueTable::lookupOrAddCall(CallInst *C) {
464 if (AA->doesNotAccessMemory(C) &&
465 // FIXME: Currently the calls which may access the thread id may
466 // be considered as not accessing the memory. But this is
467 // problematic for coroutines, since coroutines may resume in a
468 // different thread. So we disable the optimization here for the
469 // correctness. However, it may block many other correct
470 // optimizations. Revert this one when we detect the memory
471 // accessing kind more precisely.
472 !C->getFunction()->isPresplitCoroutine()) {
473 Expression exp = createExpr(C);
474 uint32_t e = assignExpNewValueNum(exp).first;
475 valueNumbering[C] = e;
476 return e;
477 } else if (MD && AA->onlyReadsMemory(C) &&
478 // FIXME: Currently the calls which may access the thread id may
479 // be considered as not accessing the memory. But this is
480 // problematic for coroutines, since coroutines may resume in a
481 // different thread. So we disable the optimization here for the
482 // correctness. However, it may block many other correct
483 // optimizations. Revert this one when we detect the memory
484 // accessing kind more precisely.
485 !C->getFunction()->isPresplitCoroutine()) {
486 Expression exp = createExpr(C);
487 auto ValNum = assignExpNewValueNum(exp);
488 if (ValNum.second) {
489 valueNumbering[C] = ValNum.first;
490 return ValNum.first;
491 }
492
493 MemDepResult local_dep = MD->getDependency(C);
494
495 if (!local_dep.isDef() && !local_dep.isNonLocal()) {
496 valueNumbering[C] = nextValueNumber;
497 return nextValueNumber++;
498 }
499
500 if (local_dep.isDef()) {
501 // For masked load/store intrinsics, the local_dep may actually be
502 // a normal load or store instruction.
503 CallInst *local_cdep = dyn_cast<CallInst>(local_dep.getInst());
504
505 if (!local_cdep || local_cdep->arg_size() != C->arg_size()) {
506 valueNumbering[C] = nextValueNumber;
507 return nextValueNumber++;
508 }
509
510 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
511 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
512 uint32_t cd_vn = lookupOrAdd(local_cdep->getArgOperand(i));
513 if (c_vn != cd_vn) {
514 valueNumbering[C] = nextValueNumber;
515 return nextValueNumber++;
516 }
517 }
518
519 uint32_t v = lookupOrAdd(local_cdep);
520 valueNumbering[C] = v;
521 return v;
522 }
523
524 // Non-local case.
525 const MemoryDependenceResults::NonLocalDepInfo &deps =
526 MD->getNonLocalCallDependency(C);
527 // FIXME: Move the checking logic to MemDep!
528 CallInst* cdep = nullptr;
529
530 // Check to see if we have a single dominating call instruction that is
531 // identical to C.
532 for (const NonLocalDepEntry &I : deps) {
533 if (I.getResult().isNonLocal())
534 continue;
535
536 // We don't handle non-definitions. If we already have a call, reject
537 // instruction dependencies.
538 if (!I.getResult().isDef() || cdep != nullptr) {
539 cdep = nullptr;
540 break;
541 }
542
543 CallInst *NonLocalDepCall = dyn_cast<CallInst>(I.getResult().getInst());
544 // FIXME: All duplicated with non-local case.
545 if (NonLocalDepCall && DT->properlyDominates(I.getBB(), C->getParent())) {
546 cdep = NonLocalDepCall;
547 continue;
548 }
549
550 cdep = nullptr;
551 break;
552 }
553
554 if (!cdep) {
555 valueNumbering[C] = nextValueNumber;
556 return nextValueNumber++;
557 }
558
559 if (cdep->arg_size() != C->arg_size()) {
560 valueNumbering[C] = nextValueNumber;
561 return nextValueNumber++;
562 }
563 for (unsigned i = 0, e = C->arg_size(); i < e; ++i) {
564 uint32_t c_vn = lookupOrAdd(C->getArgOperand(i));
565 uint32_t cd_vn = lookupOrAdd(cdep->getArgOperand(i));
566 if (c_vn != cd_vn) {
567 valueNumbering[C] = nextValueNumber;
568 return nextValueNumber++;
569 }
570 }
571
572 uint32_t v = lookupOrAdd(cdep);
573 valueNumbering[C] = v;
574 return v;
575 } else {
576 valueNumbering[C] = nextValueNumber;
577 return nextValueNumber++;
578 }
579 }
580
581 /// Returns true if a value number exists for the specified value.
exists(Value * V) const582 bool GVNPass::ValueTable::exists(Value *V) const {
583 return valueNumbering.count(V) != 0;
584 }
585
586 /// lookup_or_add - Returns the value number for the specified value, assigning
587 /// it a new number if it did not have one before.
lookupOrAdd(Value * V)588 uint32_t GVNPass::ValueTable::lookupOrAdd(Value *V) {
589 DenseMap<Value*, uint32_t>::iterator VI = valueNumbering.find(V);
590 if (VI != valueNumbering.end())
591 return VI->second;
592
593 auto *I = dyn_cast<Instruction>(V);
594 if (!I) {
595 valueNumbering[V] = nextValueNumber;
596 return nextValueNumber++;
597 }
598
599 Expression exp;
600 switch (I->getOpcode()) {
601 case Instruction::Call:
602 return lookupOrAddCall(cast<CallInst>(I));
603 case Instruction::FNeg:
604 case Instruction::Add:
605 case Instruction::FAdd:
606 case Instruction::Sub:
607 case Instruction::FSub:
608 case Instruction::Mul:
609 case Instruction::FMul:
610 case Instruction::UDiv:
611 case Instruction::SDiv:
612 case Instruction::FDiv:
613 case Instruction::URem:
614 case Instruction::SRem:
615 case Instruction::FRem:
616 case Instruction::Shl:
617 case Instruction::LShr:
618 case Instruction::AShr:
619 case Instruction::And:
620 case Instruction::Or:
621 case Instruction::Xor:
622 case Instruction::ICmp:
623 case Instruction::FCmp:
624 case Instruction::Trunc:
625 case Instruction::ZExt:
626 case Instruction::SExt:
627 case Instruction::FPToUI:
628 case Instruction::FPToSI:
629 case Instruction::UIToFP:
630 case Instruction::SIToFP:
631 case Instruction::FPTrunc:
632 case Instruction::FPExt:
633 case Instruction::PtrToInt:
634 case Instruction::IntToPtr:
635 case Instruction::AddrSpaceCast:
636 case Instruction::BitCast:
637 case Instruction::Select:
638 case Instruction::Freeze:
639 case Instruction::ExtractElement:
640 case Instruction::InsertElement:
641 case Instruction::ShuffleVector:
642 case Instruction::InsertValue:
643 exp = createExpr(I);
644 break;
645 case Instruction::GetElementPtr:
646 exp = createGEPExpr(cast<GetElementPtrInst>(I));
647 break;
648 case Instruction::ExtractValue:
649 exp = createExtractvalueExpr(cast<ExtractValueInst>(I));
650 break;
651 case Instruction::PHI:
652 valueNumbering[V] = nextValueNumber;
653 NumberingPhi[nextValueNumber] = cast<PHINode>(V);
654 return nextValueNumber++;
655 default:
656 valueNumbering[V] = nextValueNumber;
657 return nextValueNumber++;
658 }
659
660 uint32_t e = assignExpNewValueNum(exp).first;
661 valueNumbering[V] = e;
662 return e;
663 }
664
665 /// Returns the value number of the specified value. Fails if
666 /// the value has not yet been numbered.
lookup(Value * V,bool Verify) const667 uint32_t GVNPass::ValueTable::lookup(Value *V, bool Verify) const {
668 DenseMap<Value*, uint32_t>::const_iterator VI = valueNumbering.find(V);
669 if (Verify) {
670 assert(VI != valueNumbering.end() && "Value not numbered?");
671 return VI->second;
672 }
673 return (VI != valueNumbering.end()) ? VI->second : 0;
674 }
675
676 /// Returns the value number of the given comparison,
677 /// assigning it a new number if it did not have one before. Useful when
678 /// we deduced the result of a comparison, but don't immediately have an
679 /// instruction realizing that comparison to hand.
lookupOrAddCmp(unsigned Opcode,CmpInst::Predicate Predicate,Value * LHS,Value * RHS)680 uint32_t GVNPass::ValueTable::lookupOrAddCmp(unsigned Opcode,
681 CmpInst::Predicate Predicate,
682 Value *LHS, Value *RHS) {
683 Expression exp = createCmpExpr(Opcode, Predicate, LHS, RHS);
684 return assignExpNewValueNum(exp).first;
685 }
686
687 /// Remove all entries from the ValueTable.
clear()688 void GVNPass::ValueTable::clear() {
689 valueNumbering.clear();
690 expressionNumbering.clear();
691 NumberingPhi.clear();
692 PhiTranslateTable.clear();
693 nextValueNumber = 1;
694 Expressions.clear();
695 ExprIdx.clear();
696 nextExprNumber = 0;
697 }
698
699 /// Remove a value from the value numbering.
erase(Value * V)700 void GVNPass::ValueTable::erase(Value *V) {
701 uint32_t Num = valueNumbering.lookup(V);
702 valueNumbering.erase(V);
703 // If V is PHINode, V <--> value number is an one-to-one mapping.
704 if (isa<PHINode>(V))
705 NumberingPhi.erase(Num);
706 }
707
708 /// verifyRemoved - Verify that the value is removed from all internal data
709 /// structures.
verifyRemoved(const Value * V) const710 void GVNPass::ValueTable::verifyRemoved(const Value *V) const {
711 for (DenseMap<Value*, uint32_t>::const_iterator
712 I = valueNumbering.begin(), E = valueNumbering.end(); I != E; ++I) {
713 assert(I->first != V && "Inst still occurs in value numbering map!");
714 }
715 }
716
717 //===----------------------------------------------------------------------===//
718 // GVN Pass
719 //===----------------------------------------------------------------------===//
720
isPREEnabled() const721 bool GVNPass::isPREEnabled() const {
722 return Options.AllowPRE.value_or(GVNEnablePRE);
723 }
724
isLoadPREEnabled() const725 bool GVNPass::isLoadPREEnabled() const {
726 return Options.AllowLoadPRE.value_or(GVNEnableLoadPRE);
727 }
728
isLoadInLoopPREEnabled() const729 bool GVNPass::isLoadInLoopPREEnabled() const {
730 return Options.AllowLoadInLoopPRE.value_or(GVNEnableLoadInLoopPRE);
731 }
732
isLoadPRESplitBackedgeEnabled() const733 bool GVNPass::isLoadPRESplitBackedgeEnabled() const {
734 return Options.AllowLoadPRESplitBackedge.value_or(
735 GVNEnableSplitBackedgeInLoadPRE);
736 }
737
isMemDepEnabled() const738 bool GVNPass::isMemDepEnabled() const {
739 return Options.AllowMemDep.value_or(GVNEnableMemDep);
740 }
741
run(Function & F,FunctionAnalysisManager & AM)742 PreservedAnalyses GVNPass::run(Function &F, FunctionAnalysisManager &AM) {
743 // FIXME: The order of evaluation of these 'getResult' calls is very
744 // significant! Re-ordering these variables will cause GVN when run alone to
745 // be less effective! We should fix memdep and basic-aa to not exhibit this
746 // behavior, but until then don't change the order here.
747 auto &AC = AM.getResult<AssumptionAnalysis>(F);
748 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
749 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
750 auto &AA = AM.getResult<AAManager>(F);
751 auto *MemDep =
752 isMemDepEnabled() ? &AM.getResult<MemoryDependenceAnalysis>(F) : nullptr;
753 auto *LI = AM.getCachedResult<LoopAnalysis>(F);
754 auto *MSSA = AM.getCachedResult<MemorySSAAnalysis>(F);
755 auto &ORE = AM.getResult<OptimizationRemarkEmitterAnalysis>(F);
756 bool Changed = runImpl(F, AC, DT, TLI, AA, MemDep, LI, &ORE,
757 MSSA ? &MSSA->getMSSA() : nullptr);
758 if (!Changed)
759 return PreservedAnalyses::all();
760 PreservedAnalyses PA;
761 PA.preserve<DominatorTreeAnalysis>();
762 PA.preserve<TargetLibraryAnalysis>();
763 if (MSSA)
764 PA.preserve<MemorySSAAnalysis>();
765 if (LI)
766 PA.preserve<LoopAnalysis>();
767 return PA;
768 }
769
printPipeline(raw_ostream & OS,function_ref<StringRef (StringRef)> MapClassName2PassName)770 void GVNPass::printPipeline(
771 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
772 static_cast<PassInfoMixin<GVNPass> *>(this)->printPipeline(
773 OS, MapClassName2PassName);
774
775 OS << "<";
776 if (Options.AllowPRE != std::nullopt)
777 OS << (*Options.AllowPRE ? "" : "no-") << "pre;";
778 if (Options.AllowLoadPRE != std::nullopt)
779 OS << (*Options.AllowLoadPRE ? "" : "no-") << "load-pre;";
780 if (Options.AllowLoadPRESplitBackedge != std::nullopt)
781 OS << (*Options.AllowLoadPRESplitBackedge ? "" : "no-")
782 << "split-backedge-load-pre;";
783 if (Options.AllowMemDep != std::nullopt)
784 OS << (*Options.AllowMemDep ? "" : "no-") << "memdep";
785 OS << ">";
786 }
787
788 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump(DenseMap<uint32_t,Value * > & d) const789 LLVM_DUMP_METHOD void GVNPass::dump(DenseMap<uint32_t, Value *> &d) const {
790 errs() << "{\n";
791 for (auto &I : d) {
792 errs() << I.first << "\n";
793 I.second->dump();
794 }
795 errs() << "}\n";
796 }
797 #endif
798
799 enum class AvailabilityState : char {
800 /// We know the block *is not* fully available. This is a fixpoint.
801 Unavailable = 0,
802 /// We know the block *is* fully available. This is a fixpoint.
803 Available = 1,
804 /// We do not know whether the block is fully available or not,
805 /// but we are currently speculating that it will be.
806 /// If it would have turned out that the block was, in fact, not fully
807 /// available, this would have been cleaned up into an Unavailable.
808 SpeculativelyAvailable = 2,
809 };
810
811 /// Return true if we can prove that the value
812 /// we're analyzing is fully available in the specified block. As we go, keep
813 /// track of which blocks we know are fully alive in FullyAvailableBlocks. This
814 /// map is actually a tri-state map with the following values:
815 /// 0) we know the block *is not* fully available.
816 /// 1) we know the block *is* fully available.
817 /// 2) we do not know whether the block is fully available or not, but we are
818 /// currently speculating that it will be.
IsValueFullyAvailableInBlock(BasicBlock * BB,DenseMap<BasicBlock *,AvailabilityState> & FullyAvailableBlocks)819 static bool IsValueFullyAvailableInBlock(
820 BasicBlock *BB,
821 DenseMap<BasicBlock *, AvailabilityState> &FullyAvailableBlocks) {
822 SmallVector<BasicBlock *, 32> Worklist;
823 std::optional<BasicBlock *> UnavailableBB;
824
825 // The number of times we didn't find an entry for a block in a map and
826 // optimistically inserted an entry marking block as speculatively available.
827 unsigned NumNewNewSpeculativelyAvailableBBs = 0;
828
829 #ifndef NDEBUG
830 SmallSet<BasicBlock *, 32> NewSpeculativelyAvailableBBs;
831 SmallVector<BasicBlock *, 32> AvailableBBs;
832 #endif
833
834 Worklist.emplace_back(BB);
835 while (!Worklist.empty()) {
836 BasicBlock *CurrBB = Worklist.pop_back_val(); // LoadFO - depth-first!
837 // Optimistically assume that the block is Speculatively Available and check
838 // to see if we already know about this block in one lookup.
839 std::pair<DenseMap<BasicBlock *, AvailabilityState>::iterator, bool> IV =
840 FullyAvailableBlocks.try_emplace(
841 CurrBB, AvailabilityState::SpeculativelyAvailable);
842 AvailabilityState &State = IV.first->second;
843
844 // Did the entry already exist for this block?
845 if (!IV.second) {
846 if (State == AvailabilityState::Unavailable) {
847 UnavailableBB = CurrBB;
848 break; // Backpropagate unavailability info.
849 }
850
851 #ifndef NDEBUG
852 AvailableBBs.emplace_back(CurrBB);
853 #endif
854 continue; // Don't recurse further, but continue processing worklist.
855 }
856
857 // No entry found for block.
858 ++NumNewNewSpeculativelyAvailableBBs;
859 bool OutOfBudget = NumNewNewSpeculativelyAvailableBBs > MaxBBSpeculations;
860
861 // If we have exhausted our budget, mark this block as unavailable.
862 // Also, if this block has no predecessors, the value isn't live-in here.
863 if (OutOfBudget || pred_empty(CurrBB)) {
864 MaxBBSpeculationCutoffReachedTimes += (int)OutOfBudget;
865 State = AvailabilityState::Unavailable;
866 UnavailableBB = CurrBB;
867 break; // Backpropagate unavailability info.
868 }
869
870 // Tentatively consider this block as speculatively available.
871 #ifndef NDEBUG
872 NewSpeculativelyAvailableBBs.insert(CurrBB);
873 #endif
874 // And further recurse into block's predecessors, in depth-first order!
875 Worklist.append(pred_begin(CurrBB), pred_end(CurrBB));
876 }
877
878 #if LLVM_ENABLE_STATS
879 IsValueFullyAvailableInBlockNumSpeculationsMax.updateMax(
880 NumNewNewSpeculativelyAvailableBBs);
881 #endif
882
883 // If the block isn't marked as fixpoint yet
884 // (the Unavailable and Available states are fixpoints)
885 auto MarkAsFixpointAndEnqueueSuccessors =
886 [&](BasicBlock *BB, AvailabilityState FixpointState) {
887 auto It = FullyAvailableBlocks.find(BB);
888 if (It == FullyAvailableBlocks.end())
889 return; // Never queried this block, leave as-is.
890 switch (AvailabilityState &State = It->second) {
891 case AvailabilityState::Unavailable:
892 case AvailabilityState::Available:
893 return; // Don't backpropagate further, continue processing worklist.
894 case AvailabilityState::SpeculativelyAvailable: // Fix it!
895 State = FixpointState;
896 #ifndef NDEBUG
897 assert(NewSpeculativelyAvailableBBs.erase(BB) &&
898 "Found a speculatively available successor leftover?");
899 #endif
900 // Queue successors for further processing.
901 Worklist.append(succ_begin(BB), succ_end(BB));
902 return;
903 }
904 };
905
906 if (UnavailableBB) {
907 // Okay, we have encountered an unavailable block.
908 // Mark speculatively available blocks reachable from UnavailableBB as
909 // unavailable as well. Paths are terminated when they reach blocks not in
910 // FullyAvailableBlocks or they are not marked as speculatively available.
911 Worklist.clear();
912 Worklist.append(succ_begin(*UnavailableBB), succ_end(*UnavailableBB));
913 while (!Worklist.empty())
914 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
915 AvailabilityState::Unavailable);
916 }
917
918 #ifndef NDEBUG
919 Worklist.clear();
920 for (BasicBlock *AvailableBB : AvailableBBs)
921 Worklist.append(succ_begin(AvailableBB), succ_end(AvailableBB));
922 while (!Worklist.empty())
923 MarkAsFixpointAndEnqueueSuccessors(Worklist.pop_back_val(),
924 AvailabilityState::Available);
925
926 assert(NewSpeculativelyAvailableBBs.empty() &&
927 "Must have fixed all the new speculatively available blocks.");
928 #endif
929
930 return !UnavailableBB;
931 }
932
933 /// Given a set of loads specified by ValuesPerBlock,
934 /// construct SSA form, allowing us to eliminate Load. This returns the value
935 /// that should be used at Load's definition site.
936 static Value *
ConstructSSAForLoadSet(LoadInst * Load,SmallVectorImpl<AvailableValueInBlock> & ValuesPerBlock,GVNPass & gvn)937 ConstructSSAForLoadSet(LoadInst *Load,
938 SmallVectorImpl<AvailableValueInBlock> &ValuesPerBlock,
939 GVNPass &gvn) {
940 // Check for the fully redundant, dominating load case. In this case, we can
941 // just use the dominating value directly.
942 if (ValuesPerBlock.size() == 1 &&
943 gvn.getDominatorTree().properlyDominates(ValuesPerBlock[0].BB,
944 Load->getParent())) {
945 assert(!ValuesPerBlock[0].AV.isUndefValue() &&
946 "Dead BB dominate this block");
947 return ValuesPerBlock[0].MaterializeAdjustedValue(Load, gvn);
948 }
949
950 // Otherwise, we have to construct SSA form.
951 SmallVector<PHINode*, 8> NewPHIs;
952 SSAUpdater SSAUpdate(&NewPHIs);
953 SSAUpdate.Initialize(Load->getType(), Load->getName());
954
955 for (const AvailableValueInBlock &AV : ValuesPerBlock) {
956 BasicBlock *BB = AV.BB;
957
958 if (AV.AV.isUndefValue())
959 continue;
960
961 if (SSAUpdate.HasValueForBlock(BB))
962 continue;
963
964 // If the value is the load that we will be eliminating, and the block it's
965 // available in is the block that the load is in, then don't add it as
966 // SSAUpdater will resolve the value to the relevant phi which may let it
967 // avoid phi construction entirely if there's actually only one value.
968 if (BB == Load->getParent() &&
969 ((AV.AV.isSimpleValue() && AV.AV.getSimpleValue() == Load) ||
970 (AV.AV.isCoercedLoadValue() && AV.AV.getCoercedLoadValue() == Load)))
971 continue;
972
973 SSAUpdate.AddAvailableValue(BB, AV.MaterializeAdjustedValue(Load, gvn));
974 }
975
976 // Perform PHI construction.
977 return SSAUpdate.GetValueInMiddleOfBlock(Load->getParent());
978 }
979
MaterializeAdjustedValue(LoadInst * Load,Instruction * InsertPt,GVNPass & gvn) const980 Value *AvailableValue::MaterializeAdjustedValue(LoadInst *Load,
981 Instruction *InsertPt,
982 GVNPass &gvn) const {
983 Value *Res;
984 Type *LoadTy = Load->getType();
985 const DataLayout &DL = Load->getModule()->getDataLayout();
986 if (isSimpleValue()) {
987 Res = getSimpleValue();
988 if (Res->getType() != LoadTy) {
989 Res = getStoreValueForLoad(Res, Offset, LoadTy, InsertPt, DL);
990
991 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL VAL:\nOffset: " << Offset
992 << " " << *getSimpleValue() << '\n'
993 << *Res << '\n'
994 << "\n\n\n");
995 }
996 } else if (isCoercedLoadValue()) {
997 LoadInst *CoercedLoad = getCoercedLoadValue();
998 if (CoercedLoad->getType() == LoadTy && Offset == 0) {
999 Res = CoercedLoad;
1000 } else {
1001 Res = getLoadValueForLoad(CoercedLoad, Offset, LoadTy, InsertPt, DL);
1002 // We would like to use gvn.markInstructionForDeletion here, but we can't
1003 // because the load is already memoized into the leader map table that GVN
1004 // tracks. It is potentially possible to remove the load from the table,
1005 // but then there all of the operations based on it would need to be
1006 // rehashed. Just leave the dead load around.
1007 gvn.getMemDep().removeInstruction(CoercedLoad);
1008 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL LOAD:\nOffset: " << Offset
1009 << " " << *getCoercedLoadValue() << '\n'
1010 << *Res << '\n'
1011 << "\n\n\n");
1012 }
1013 } else if (isMemIntrinValue()) {
1014 Res = getMemInstValueForLoad(getMemIntrinValue(), Offset, LoadTy,
1015 InsertPt, DL);
1016 LLVM_DEBUG(dbgs() << "GVN COERCED NONLOCAL MEM INTRIN:\nOffset: " << Offset
1017 << " " << *getMemIntrinValue() << '\n'
1018 << *Res << '\n'
1019 << "\n\n\n");
1020 } else if (isSelectValue()) {
1021 // Introduce a new value select for a load from an eligible pointer select.
1022 SelectInst *Sel = getSelectValue();
1023 assert(V1 && V2 && "both value operands of the select must be present");
1024 Res = SelectInst::Create(Sel->getCondition(), V1, V2, "", Sel);
1025 } else {
1026 llvm_unreachable("Should not materialize value from dead block");
1027 }
1028 assert(Res && "failed to materialize?");
1029 return Res;
1030 }
1031
isLifetimeStart(const Instruction * Inst)1032 static bool isLifetimeStart(const Instruction *Inst) {
1033 if (const IntrinsicInst* II = dyn_cast<IntrinsicInst>(Inst))
1034 return II->getIntrinsicID() == Intrinsic::lifetime_start;
1035 return false;
1036 }
1037
1038 /// Assuming To can be reached from both From and Between, does Between lie on
1039 /// every path from From to To?
liesBetween(const Instruction * From,Instruction * Between,const Instruction * To,DominatorTree * DT)1040 static bool liesBetween(const Instruction *From, Instruction *Between,
1041 const Instruction *To, DominatorTree *DT) {
1042 if (From->getParent() == Between->getParent())
1043 return DT->dominates(From, Between);
1044 SmallSet<BasicBlock *, 1> Exclusion;
1045 Exclusion.insert(Between->getParent());
1046 return !isPotentiallyReachable(From, To, &Exclusion, DT);
1047 }
1048
1049 /// Try to locate the three instruction involved in a missed
1050 /// load-elimination case that is due to an intervening store.
reportMayClobberedLoad(LoadInst * Load,MemDepResult DepInfo,DominatorTree * DT,OptimizationRemarkEmitter * ORE)1051 static void reportMayClobberedLoad(LoadInst *Load, MemDepResult DepInfo,
1052 DominatorTree *DT,
1053 OptimizationRemarkEmitter *ORE) {
1054 using namespace ore;
1055
1056 Instruction *OtherAccess = nullptr;
1057
1058 OptimizationRemarkMissed R(DEBUG_TYPE, "LoadClobbered", Load);
1059 R << "load of type " << NV("Type", Load->getType()) << " not eliminated"
1060 << setExtraArgs();
1061
1062 for (auto *U : Load->getPointerOperand()->users()) {
1063 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1064 auto *I = cast<Instruction>(U);
1065 if (I->getFunction() == Load->getFunction() && DT->dominates(I, Load)) {
1066 // Use the most immediately dominating value
1067 if (OtherAccess) {
1068 if (DT->dominates(OtherAccess, I))
1069 OtherAccess = I;
1070 else
1071 assert(U == OtherAccess || DT->dominates(I, OtherAccess));
1072 } else
1073 OtherAccess = I;
1074 }
1075 }
1076 }
1077
1078 if (!OtherAccess) {
1079 // There is no dominating use, check if we can find a closest non-dominating
1080 // use that lies between any other potentially available use and Load.
1081 for (auto *U : Load->getPointerOperand()->users()) {
1082 if (U != Load && (isa<LoadInst>(U) || isa<StoreInst>(U))) {
1083 auto *I = cast<Instruction>(U);
1084 if (I->getFunction() == Load->getFunction() &&
1085 isPotentiallyReachable(I, Load, nullptr, DT)) {
1086 if (OtherAccess) {
1087 if (liesBetween(OtherAccess, I, Load, DT)) {
1088 OtherAccess = I;
1089 } else if (!liesBetween(I, OtherAccess, Load, DT)) {
1090 // These uses are both partially available at Load were it not for
1091 // the clobber, but neither lies strictly after the other.
1092 OtherAccess = nullptr;
1093 break;
1094 } // else: keep current OtherAccess since it lies between U and Load
1095 } else {
1096 OtherAccess = I;
1097 }
1098 }
1099 }
1100 }
1101 }
1102
1103 if (OtherAccess)
1104 R << " in favor of " << NV("OtherAccess", OtherAccess);
1105
1106 R << " because it is clobbered by " << NV("ClobberedBy", DepInfo.getInst());
1107
1108 ORE->emit(R);
1109 }
1110
1111 // Find non-clobbered value for Loc memory location in extended basic block
1112 // (chain of basic blocks with single predecessors) starting From instruction.
findDominatingValue(const MemoryLocation & Loc,Type * LoadTy,Instruction * From,AAResults * AA)1113 static Value *findDominatingValue(const MemoryLocation &Loc, Type *LoadTy,
1114 Instruction *From, AAResults *AA) {
1115 uint32_t NumVisitedInsts = 0;
1116 BasicBlock *FromBB = From->getParent();
1117 BatchAAResults BatchAA(*AA);
1118 for (BasicBlock *BB = FromBB; BB; BB = BB->getSinglePredecessor())
1119 for (auto I = BB == FromBB ? From->getReverseIterator() : BB->rbegin(),
1120 E = BB->rend();
1121 I != E; ++I) {
1122 // Stop the search if limit is reached.
1123 if (++NumVisitedInsts > MaxNumVisitedInsts)
1124 return nullptr;
1125 Instruction *Inst = &*I;
1126 if (isModSet(BatchAA.getModRefInfo(Inst, Loc)))
1127 return nullptr;
1128 if (auto *LI = dyn_cast<LoadInst>(Inst))
1129 if (LI->getPointerOperand() == Loc.Ptr && LI->getType() == LoadTy)
1130 return LI;
1131 }
1132 return nullptr;
1133 }
1134
1135 std::optional<AvailableValue>
AnalyzeLoadAvailability(LoadInst * Load,MemDepResult DepInfo,Value * Address)1136 GVNPass::AnalyzeLoadAvailability(LoadInst *Load, MemDepResult DepInfo,
1137 Value *Address) {
1138 assert(Load->isUnordered() && "rules below are incorrect for ordered access");
1139 assert(DepInfo.isLocal() && "expected a local dependence");
1140
1141 Instruction *DepInst = DepInfo.getInst();
1142
1143 const DataLayout &DL = Load->getModule()->getDataLayout();
1144 if (DepInfo.isClobber()) {
1145 // If the dependence is to a store that writes to a superset of the bits
1146 // read by the load, we can extract the bits we need for the load from the
1147 // stored value.
1148 if (StoreInst *DepSI = dyn_cast<StoreInst>(DepInst)) {
1149 // Can't forward from non-atomic to atomic without violating memory model.
1150 if (Address && Load->isAtomic() <= DepSI->isAtomic()) {
1151 int Offset =
1152 analyzeLoadFromClobberingStore(Load->getType(), Address, DepSI, DL);
1153 if (Offset != -1)
1154 return AvailableValue::get(DepSI->getValueOperand(), Offset);
1155 }
1156 }
1157
1158 // Check to see if we have something like this:
1159 // load i32* P
1160 // load i8* (P+1)
1161 // if we have this, replace the later with an extraction from the former.
1162 if (LoadInst *DepLoad = dyn_cast<LoadInst>(DepInst)) {
1163 // If this is a clobber and L is the first instruction in its block, then
1164 // we have the first instruction in the entry block.
1165 // Can't forward from non-atomic to atomic without violating memory model.
1166 if (DepLoad != Load && Address &&
1167 Load->isAtomic() <= DepLoad->isAtomic()) {
1168 Type *LoadType = Load->getType();
1169 int Offset = -1;
1170
1171 // If MD reported clobber, check it was nested.
1172 if (DepInfo.isClobber() &&
1173 canCoerceMustAliasedValueToLoad(DepLoad, LoadType, DL)) {
1174 const auto ClobberOff = MD->getClobberOffset(DepLoad);
1175 // GVN has no deal with a negative offset.
1176 Offset = (ClobberOff == std::nullopt || *ClobberOff < 0)
1177 ? -1
1178 : *ClobberOff;
1179 }
1180 if (Offset == -1)
1181 Offset =
1182 analyzeLoadFromClobberingLoad(LoadType, Address, DepLoad, DL);
1183 if (Offset != -1)
1184 return AvailableValue::getLoad(DepLoad, Offset);
1185 }
1186 }
1187
1188 // If the clobbering value is a memset/memcpy/memmove, see if we can
1189 // forward a value on from it.
1190 if (MemIntrinsic *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1191 if (Address && !Load->isAtomic()) {
1192 int Offset = analyzeLoadFromClobberingMemInst(Load->getType(), Address,
1193 DepMI, DL);
1194 if (Offset != -1)
1195 return AvailableValue::getMI(DepMI, Offset);
1196 }
1197 }
1198
1199 // Nothing known about this clobber, have to be conservative
1200 LLVM_DEBUG(
1201 // fast print dep, using operator<< on instruction is too slow.
1202 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1203 dbgs() << " is clobbered by " << *DepInst << '\n';);
1204 if (ORE->allowExtraAnalysis(DEBUG_TYPE))
1205 reportMayClobberedLoad(Load, DepInfo, DT, ORE);
1206
1207 return std::nullopt;
1208 }
1209 assert(DepInfo.isDef() && "follows from above");
1210
1211 // Loading the alloca -> undef.
1212 // Loading immediately after lifetime begin -> undef.
1213 if (isa<AllocaInst>(DepInst) || isLifetimeStart(DepInst))
1214 return AvailableValue::get(UndefValue::get(Load->getType()));
1215
1216 if (Constant *InitVal =
1217 getInitialValueOfAllocation(DepInst, TLI, Load->getType()))
1218 return AvailableValue::get(InitVal);
1219
1220 if (StoreInst *S = dyn_cast<StoreInst>(DepInst)) {
1221 // Reject loads and stores that are to the same address but are of
1222 // different types if we have to. If the stored value is convertable to
1223 // the loaded value, we can reuse it.
1224 if (!canCoerceMustAliasedValueToLoad(S->getValueOperand(), Load->getType(),
1225 DL))
1226 return std::nullopt;
1227
1228 // Can't forward from non-atomic to atomic without violating memory model.
1229 if (S->isAtomic() < Load->isAtomic())
1230 return std::nullopt;
1231
1232 return AvailableValue::get(S->getValueOperand());
1233 }
1234
1235 if (LoadInst *LD = dyn_cast<LoadInst>(DepInst)) {
1236 // If the types mismatch and we can't handle it, reject reuse of the load.
1237 // If the stored value is larger or equal to the loaded value, we can reuse
1238 // it.
1239 if (!canCoerceMustAliasedValueToLoad(LD, Load->getType(), DL))
1240 return std::nullopt;
1241
1242 // Can't forward from non-atomic to atomic without violating memory model.
1243 if (LD->isAtomic() < Load->isAtomic())
1244 return std::nullopt;
1245
1246 return AvailableValue::getLoad(LD);
1247 }
1248
1249 // Check if load with Addr dependent from select can be converted to select
1250 // between load values. There must be no instructions between the found
1251 // loads and DepInst that may clobber the loads.
1252 if (auto *Sel = dyn_cast<SelectInst>(DepInst)) {
1253 assert(Sel->getType() == Load->getPointerOperandType());
1254 auto Loc = MemoryLocation::get(Load);
1255 Value *V1 =
1256 findDominatingValue(Loc.getWithNewPtr(Sel->getTrueValue()),
1257 Load->getType(), DepInst, getAliasAnalysis());
1258 if (!V1)
1259 return std::nullopt;
1260 Value *V2 =
1261 findDominatingValue(Loc.getWithNewPtr(Sel->getFalseValue()),
1262 Load->getType(), DepInst, getAliasAnalysis());
1263 if (!V2)
1264 return std::nullopt;
1265 return AvailableValue::getSelect(Sel, V1, V2);
1266 }
1267
1268 // Unknown def - must be conservative
1269 LLVM_DEBUG(
1270 // fast print dep, using operator<< on instruction is too slow.
1271 dbgs() << "GVN: load "; Load->printAsOperand(dbgs());
1272 dbgs() << " has unknown def " << *DepInst << '\n';);
1273 return std::nullopt;
1274 }
1275
AnalyzeLoadAvailability(LoadInst * Load,LoadDepVect & Deps,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1276 void GVNPass::AnalyzeLoadAvailability(LoadInst *Load, LoadDepVect &Deps,
1277 AvailValInBlkVect &ValuesPerBlock,
1278 UnavailBlkVect &UnavailableBlocks) {
1279 // Filter out useless results (non-locals, etc). Keep track of the blocks
1280 // where we have a value available in repl, also keep track of whether we see
1281 // dependencies that produce an unknown value for the load (such as a call
1282 // that could potentially clobber the load).
1283 for (const auto &Dep : Deps) {
1284 BasicBlock *DepBB = Dep.getBB();
1285 MemDepResult DepInfo = Dep.getResult();
1286
1287 if (DeadBlocks.count(DepBB)) {
1288 // Dead dependent mem-op disguise as a load evaluating the same value
1289 // as the load in question.
1290 ValuesPerBlock.push_back(AvailableValueInBlock::getUndef(DepBB));
1291 continue;
1292 }
1293
1294 if (!DepInfo.isLocal()) {
1295 UnavailableBlocks.push_back(DepBB);
1296 continue;
1297 }
1298
1299 // The address being loaded in this non-local block may not be the same as
1300 // the pointer operand of the load if PHI translation occurs. Make sure
1301 // to consider the right address.
1302 if (auto AV = AnalyzeLoadAvailability(Load, DepInfo, Dep.getAddress())) {
1303 // subtlety: because we know this was a non-local dependency, we know
1304 // it's safe to materialize anywhere between the instruction within
1305 // DepInfo and the end of it's block.
1306 ValuesPerBlock.push_back(
1307 AvailableValueInBlock::get(DepBB, std::move(*AV)));
1308 } else {
1309 UnavailableBlocks.push_back(DepBB);
1310 }
1311 }
1312
1313 assert(Deps.size() == ValuesPerBlock.size() + UnavailableBlocks.size() &&
1314 "post condition violation");
1315 }
1316
eliminatePartiallyRedundantLoad(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,MapVector<BasicBlock *,Value * > & AvailableLoads)1317 void GVNPass::eliminatePartiallyRedundantLoad(
1318 LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1319 MapVector<BasicBlock *, Value *> &AvailableLoads) {
1320 for (const auto &AvailableLoad : AvailableLoads) {
1321 BasicBlock *UnavailableBlock = AvailableLoad.first;
1322 Value *LoadPtr = AvailableLoad.second;
1323
1324 auto *NewLoad =
1325 new LoadInst(Load->getType(), LoadPtr, Load->getName() + ".pre",
1326 Load->isVolatile(), Load->getAlign(), Load->getOrdering(),
1327 Load->getSyncScopeID(), UnavailableBlock->getTerminator());
1328 NewLoad->setDebugLoc(Load->getDebugLoc());
1329 if (MSSAU) {
1330 auto *MSSA = MSSAU->getMemorySSA();
1331 // Get the defining access of the original load or use the load if it is a
1332 // MemoryDef (e.g. because it is volatile). The inserted loads are
1333 // guaranteed to load from the same definition.
1334 auto *LoadAcc = MSSA->getMemoryAccess(Load);
1335 auto *DefiningAcc =
1336 isa<MemoryDef>(LoadAcc) ? LoadAcc : LoadAcc->getDefiningAccess();
1337 auto *NewAccess = MSSAU->createMemoryAccessInBB(
1338 NewLoad, DefiningAcc, NewLoad->getParent(),
1339 MemorySSA::BeforeTerminator);
1340 if (auto *NewDef = dyn_cast<MemoryDef>(NewAccess))
1341 MSSAU->insertDef(NewDef, /*RenameUses=*/true);
1342 else
1343 MSSAU->insertUse(cast<MemoryUse>(NewAccess), /*RenameUses=*/true);
1344 }
1345
1346 // Transfer the old load's AA tags to the new load.
1347 AAMDNodes Tags = Load->getAAMetadata();
1348 if (Tags)
1349 NewLoad->setAAMetadata(Tags);
1350
1351 if (auto *MD = Load->getMetadata(LLVMContext::MD_invariant_load))
1352 NewLoad->setMetadata(LLVMContext::MD_invariant_load, MD);
1353 if (auto *InvGroupMD = Load->getMetadata(LLVMContext::MD_invariant_group))
1354 NewLoad->setMetadata(LLVMContext::MD_invariant_group, InvGroupMD);
1355 if (auto *RangeMD = Load->getMetadata(LLVMContext::MD_range))
1356 NewLoad->setMetadata(LLVMContext::MD_range, RangeMD);
1357 if (auto *AccessMD = Load->getMetadata(LLVMContext::MD_access_group))
1358 if (LI &&
1359 LI->getLoopFor(Load->getParent()) == LI->getLoopFor(UnavailableBlock))
1360 NewLoad->setMetadata(LLVMContext::MD_access_group, AccessMD);
1361
1362 // We do not propagate the old load's debug location, because the new
1363 // load now lives in a different BB, and we want to avoid a jumpy line
1364 // table.
1365 // FIXME: How do we retain source locations without causing poor debugging
1366 // behavior?
1367
1368 // Add the newly created load.
1369 ValuesPerBlock.push_back(
1370 AvailableValueInBlock::get(UnavailableBlock, NewLoad));
1371 MD->invalidateCachedPointerInfo(LoadPtr);
1372 LLVM_DEBUG(dbgs() << "GVN INSERTED " << *NewLoad << '\n');
1373 }
1374
1375 // Perform PHI construction.
1376 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1377 Load->replaceAllUsesWith(V);
1378 if (isa<PHINode>(V))
1379 V->takeName(Load);
1380 if (Instruction *I = dyn_cast<Instruction>(V))
1381 I->setDebugLoc(Load->getDebugLoc());
1382 if (V->getType()->isPtrOrPtrVectorTy())
1383 MD->invalidateCachedPointerInfo(V);
1384 markInstructionForDeletion(Load);
1385 ORE->emit([&]() {
1386 return OptimizationRemark(DEBUG_TYPE, "LoadPRE", Load)
1387 << "load eliminated by PRE";
1388 });
1389 }
1390
PerformLoadPRE(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1391 bool GVNPass::PerformLoadPRE(LoadInst *Load, AvailValInBlkVect &ValuesPerBlock,
1392 UnavailBlkVect &UnavailableBlocks) {
1393 // Okay, we have *some* definitions of the value. This means that the value
1394 // is available in some of our (transitive) predecessors. Lets think about
1395 // doing PRE of this load. This will involve inserting a new load into the
1396 // predecessor when it's not available. We could do this in general, but
1397 // prefer to not increase code size. As such, we only do this when we know
1398 // that we only have to insert *one* load (which means we're basically moving
1399 // the load, not inserting a new one).
1400
1401 SmallPtrSet<BasicBlock *, 4> Blockers(UnavailableBlocks.begin(),
1402 UnavailableBlocks.end());
1403
1404 // Let's find the first basic block with more than one predecessor. Walk
1405 // backwards through predecessors if needed.
1406 BasicBlock *LoadBB = Load->getParent();
1407 BasicBlock *TmpBB = LoadBB;
1408
1409 // Check that there is no implicit control flow instructions above our load in
1410 // its block. If there is an instruction that doesn't always pass the
1411 // execution to the following instruction, then moving through it may become
1412 // invalid. For example:
1413 //
1414 // int arr[LEN];
1415 // int index = ???;
1416 // ...
1417 // guard(0 <= index && index < LEN);
1418 // use(arr[index]);
1419 //
1420 // It is illegal to move the array access to any point above the guard,
1421 // because if the index is out of bounds we should deoptimize rather than
1422 // access the array.
1423 // Check that there is no guard in this block above our instruction.
1424 bool MustEnsureSafetyOfSpeculativeExecution =
1425 ICF->isDominatedByICFIFromSameBlock(Load);
1426
1427 while (TmpBB->getSinglePredecessor()) {
1428 TmpBB = TmpBB->getSinglePredecessor();
1429 if (TmpBB == LoadBB) // Infinite (unreachable) loop.
1430 return false;
1431 if (Blockers.count(TmpBB))
1432 return false;
1433
1434 // If any of these blocks has more than one successor (i.e. if the edge we
1435 // just traversed was critical), then there are other paths through this
1436 // block along which the load may not be anticipated. Hoisting the load
1437 // above this block would be adding the load to execution paths along
1438 // which it was not previously executed.
1439 if (TmpBB->getTerminator()->getNumSuccessors() != 1)
1440 return false;
1441
1442 // Check that there is no implicit control flow in a block above.
1443 MustEnsureSafetyOfSpeculativeExecution =
1444 MustEnsureSafetyOfSpeculativeExecution || ICF->hasICF(TmpBB);
1445 }
1446
1447 assert(TmpBB);
1448 LoadBB = TmpBB;
1449
1450 // Check to see how many predecessors have the loaded value fully
1451 // available.
1452 MapVector<BasicBlock *, Value *> PredLoads;
1453 DenseMap<BasicBlock *, AvailabilityState> FullyAvailableBlocks;
1454 for (const AvailableValueInBlock &AV : ValuesPerBlock)
1455 FullyAvailableBlocks[AV.BB] = AvailabilityState::Available;
1456 for (BasicBlock *UnavailableBB : UnavailableBlocks)
1457 FullyAvailableBlocks[UnavailableBB] = AvailabilityState::Unavailable;
1458
1459 SmallVector<BasicBlock *, 4> CriticalEdgePred;
1460 for (BasicBlock *Pred : predecessors(LoadBB)) {
1461 // If any predecessor block is an EH pad that does not allow non-PHI
1462 // instructions before the terminator, we can't PRE the load.
1463 if (Pred->getTerminator()->isEHPad()) {
1464 LLVM_DEBUG(
1465 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD PREDECESSOR '"
1466 << Pred->getName() << "': " << *Load << '\n');
1467 return false;
1468 }
1469
1470 if (IsValueFullyAvailableInBlock(Pred, FullyAvailableBlocks)) {
1471 continue;
1472 }
1473
1474 if (Pred->getTerminator()->getNumSuccessors() != 1) {
1475 if (isa<IndirectBrInst>(Pred->getTerminator())) {
1476 LLVM_DEBUG(
1477 dbgs() << "COULD NOT PRE LOAD BECAUSE OF INDBR CRITICAL EDGE '"
1478 << Pred->getName() << "': " << *Load << '\n');
1479 return false;
1480 }
1481
1482 if (LoadBB->isEHPad()) {
1483 LLVM_DEBUG(
1484 dbgs() << "COULD NOT PRE LOAD BECAUSE OF AN EH PAD CRITICAL EDGE '"
1485 << Pred->getName() << "': " << *Load << '\n');
1486 return false;
1487 }
1488
1489 // Do not split backedge as it will break the canonical loop form.
1490 if (!isLoadPRESplitBackedgeEnabled())
1491 if (DT->dominates(LoadBB, Pred)) {
1492 LLVM_DEBUG(
1493 dbgs()
1494 << "COULD NOT PRE LOAD BECAUSE OF A BACKEDGE CRITICAL EDGE '"
1495 << Pred->getName() << "': " << *Load << '\n');
1496 return false;
1497 }
1498
1499 CriticalEdgePred.push_back(Pred);
1500 } else {
1501 // Only add the predecessors that will not be split for now.
1502 PredLoads[Pred] = nullptr;
1503 }
1504 }
1505
1506 // Decide whether PRE is profitable for this load.
1507 unsigned NumUnavailablePreds = PredLoads.size() + CriticalEdgePred.size();
1508 assert(NumUnavailablePreds != 0 &&
1509 "Fully available value should already be eliminated!");
1510
1511 // If this load is unavailable in multiple predecessors, reject it.
1512 // FIXME: If we could restructure the CFG, we could make a common pred with
1513 // all the preds that don't have an available Load and insert a new load into
1514 // that one block.
1515 if (NumUnavailablePreds != 1)
1516 return false;
1517
1518 // Now we know where we will insert load. We must ensure that it is safe
1519 // to speculatively execute the load at that points.
1520 if (MustEnsureSafetyOfSpeculativeExecution) {
1521 if (CriticalEdgePred.size())
1522 if (!isSafeToSpeculativelyExecute(Load, LoadBB->getFirstNonPHI(), AC, DT))
1523 return false;
1524 for (auto &PL : PredLoads)
1525 if (!isSafeToSpeculativelyExecute(Load, PL.first->getTerminator(), AC,
1526 DT))
1527 return false;
1528 }
1529
1530 // Split critical edges, and update the unavailable predecessors accordingly.
1531 for (BasicBlock *OrigPred : CriticalEdgePred) {
1532 BasicBlock *NewPred = splitCriticalEdges(OrigPred, LoadBB);
1533 assert(!PredLoads.count(OrigPred) && "Split edges shouldn't be in map!");
1534 PredLoads[NewPred] = nullptr;
1535 LLVM_DEBUG(dbgs() << "Split critical edge " << OrigPred->getName() << "->"
1536 << LoadBB->getName() << '\n');
1537 }
1538
1539 // Check if the load can safely be moved to all the unavailable predecessors.
1540 bool CanDoPRE = true;
1541 const DataLayout &DL = Load->getModule()->getDataLayout();
1542 SmallVector<Instruction*, 8> NewInsts;
1543 for (auto &PredLoad : PredLoads) {
1544 BasicBlock *UnavailablePred = PredLoad.first;
1545
1546 // Do PHI translation to get its value in the predecessor if necessary. The
1547 // returned pointer (if non-null) is guaranteed to dominate UnavailablePred.
1548 // We do the translation for each edge we skipped by going from Load's block
1549 // to LoadBB, otherwise we might miss pieces needing translation.
1550
1551 // If all preds have a single successor, then we know it is safe to insert
1552 // the load on the pred (?!?), so we can insert code to materialize the
1553 // pointer if it is not available.
1554 Value *LoadPtr = Load->getPointerOperand();
1555 BasicBlock *Cur = Load->getParent();
1556 while (Cur != LoadBB) {
1557 PHITransAddr Address(LoadPtr, DL, AC);
1558 LoadPtr = Address.PHITranslateWithInsertion(
1559 Cur, Cur->getSinglePredecessor(), *DT, NewInsts);
1560 if (!LoadPtr) {
1561 CanDoPRE = false;
1562 break;
1563 }
1564 Cur = Cur->getSinglePredecessor();
1565 }
1566
1567 if (LoadPtr) {
1568 PHITransAddr Address(LoadPtr, DL, AC);
1569 LoadPtr = Address.PHITranslateWithInsertion(LoadBB, UnavailablePred, *DT,
1570 NewInsts);
1571 }
1572 // If we couldn't find or insert a computation of this phi translated value,
1573 // we fail PRE.
1574 if (!LoadPtr) {
1575 LLVM_DEBUG(dbgs() << "COULDN'T INSERT PHI TRANSLATED VALUE OF: "
1576 << *Load->getPointerOperand() << "\n");
1577 CanDoPRE = false;
1578 break;
1579 }
1580
1581 PredLoad.second = LoadPtr;
1582 }
1583
1584 if (!CanDoPRE) {
1585 while (!NewInsts.empty()) {
1586 // Erase instructions generated by the failed PHI translation before
1587 // trying to number them. PHI translation might insert instructions
1588 // in basic blocks other than the current one, and we delete them
1589 // directly, as markInstructionForDeletion only allows removing from the
1590 // current basic block.
1591 NewInsts.pop_back_val()->eraseFromParent();
1592 }
1593 // HINT: Don't revert the edge-splitting as following transformation may
1594 // also need to split these critical edges.
1595 return !CriticalEdgePred.empty();
1596 }
1597
1598 // Okay, we can eliminate this load by inserting a reload in the predecessor
1599 // and using PHI construction to get the value in the other predecessors, do
1600 // it.
1601 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOAD: " << *Load << '\n');
1602 LLVM_DEBUG(if (!NewInsts.empty()) dbgs() << "INSERTED " << NewInsts.size()
1603 << " INSTS: " << *NewInsts.back()
1604 << '\n');
1605
1606 // Assign value numbers to the new instructions.
1607 for (Instruction *I : NewInsts) {
1608 // Instructions that have been inserted in predecessor(s) to materialize
1609 // the load address do not retain their original debug locations. Doing
1610 // so could lead to confusing (but correct) source attributions.
1611 I->updateLocationAfterHoist();
1612
1613 // FIXME: We really _ought_ to insert these value numbers into their
1614 // parent's availability map. However, in doing so, we risk getting into
1615 // ordering issues. If a block hasn't been processed yet, we would be
1616 // marking a value as AVAIL-IN, which isn't what we intend.
1617 VN.lookupOrAdd(I);
1618 }
1619
1620 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, PredLoads);
1621 ++NumPRELoad;
1622 return true;
1623 }
1624
performLoopLoadPRE(LoadInst * Load,AvailValInBlkVect & ValuesPerBlock,UnavailBlkVect & UnavailableBlocks)1625 bool GVNPass::performLoopLoadPRE(LoadInst *Load,
1626 AvailValInBlkVect &ValuesPerBlock,
1627 UnavailBlkVect &UnavailableBlocks) {
1628 if (!LI)
1629 return false;
1630
1631 const Loop *L = LI->getLoopFor(Load->getParent());
1632 // TODO: Generalize to other loop blocks that dominate the latch.
1633 if (!L || L->getHeader() != Load->getParent())
1634 return false;
1635
1636 BasicBlock *Preheader = L->getLoopPreheader();
1637 BasicBlock *Latch = L->getLoopLatch();
1638 if (!Preheader || !Latch)
1639 return false;
1640
1641 Value *LoadPtr = Load->getPointerOperand();
1642 // Must be available in preheader.
1643 if (!L->isLoopInvariant(LoadPtr))
1644 return false;
1645
1646 // We plan to hoist the load to preheader without introducing a new fault.
1647 // In order to do it, we need to prove that we cannot side-exit the loop
1648 // once loop header is first entered before execution of the load.
1649 if (ICF->isDominatedByICFIFromSameBlock(Load))
1650 return false;
1651
1652 BasicBlock *LoopBlock = nullptr;
1653 for (auto *Blocker : UnavailableBlocks) {
1654 // Blockers from outside the loop are handled in preheader.
1655 if (!L->contains(Blocker))
1656 continue;
1657
1658 // Only allow one loop block. Loop header is not less frequently executed
1659 // than each loop block, and likely it is much more frequently executed. But
1660 // in case of multiple loop blocks, we need extra information (such as block
1661 // frequency info) to understand whether it is profitable to PRE into
1662 // multiple loop blocks.
1663 if (LoopBlock)
1664 return false;
1665
1666 // Do not sink into inner loops. This may be non-profitable.
1667 if (L != LI->getLoopFor(Blocker))
1668 return false;
1669
1670 // Blocks that dominate the latch execute on every single iteration, maybe
1671 // except the last one. So PREing into these blocks doesn't make much sense
1672 // in most cases. But the blocks that do not necessarily execute on each
1673 // iteration are sometimes much colder than the header, and this is when
1674 // PRE is potentially profitable.
1675 if (DT->dominates(Blocker, Latch))
1676 return false;
1677
1678 // Make sure that the terminator itself doesn't clobber.
1679 if (Blocker->getTerminator()->mayWriteToMemory())
1680 return false;
1681
1682 LoopBlock = Blocker;
1683 }
1684
1685 if (!LoopBlock)
1686 return false;
1687
1688 // Make sure the memory at this pointer cannot be freed, therefore we can
1689 // safely reload from it after clobber.
1690 if (LoadPtr->canBeFreed())
1691 return false;
1692
1693 // TODO: Support critical edge splitting if blocker has more than 1 successor.
1694 MapVector<BasicBlock *, Value *> AvailableLoads;
1695 AvailableLoads[LoopBlock] = LoadPtr;
1696 AvailableLoads[Preheader] = LoadPtr;
1697
1698 LLVM_DEBUG(dbgs() << "GVN REMOVING PRE LOOP LOAD: " << *Load << '\n');
1699 eliminatePartiallyRedundantLoad(Load, ValuesPerBlock, AvailableLoads);
1700 ++NumPRELoopLoad;
1701 return true;
1702 }
1703
reportLoadElim(LoadInst * Load,Value * AvailableValue,OptimizationRemarkEmitter * ORE)1704 static void reportLoadElim(LoadInst *Load, Value *AvailableValue,
1705 OptimizationRemarkEmitter *ORE) {
1706 using namespace ore;
1707
1708 ORE->emit([&]() {
1709 return OptimizationRemark(DEBUG_TYPE, "LoadElim", Load)
1710 << "load of type " << NV("Type", Load->getType()) << " eliminated"
1711 << setExtraArgs() << " in favor of "
1712 << NV("InfavorOfValue", AvailableValue);
1713 });
1714 }
1715
1716 /// Attempt to eliminate a load whose dependencies are
1717 /// non-local by performing PHI construction.
processNonLocalLoad(LoadInst * Load)1718 bool GVNPass::processNonLocalLoad(LoadInst *Load) {
1719 // non-local speculations are not allowed under asan.
1720 if (Load->getParent()->getParent()->hasFnAttribute(
1721 Attribute::SanitizeAddress) ||
1722 Load->getParent()->getParent()->hasFnAttribute(
1723 Attribute::SanitizeHWAddress))
1724 return false;
1725
1726 // Step 1: Find the non-local dependencies of the load.
1727 LoadDepVect Deps;
1728 MD->getNonLocalPointerDependency(Load, Deps);
1729
1730 // If we had to process more than one hundred blocks to find the
1731 // dependencies, this load isn't worth worrying about. Optimizing
1732 // it will be too expensive.
1733 unsigned NumDeps = Deps.size();
1734 if (NumDeps > MaxNumDeps)
1735 return false;
1736
1737 // If we had a phi translation failure, we'll have a single entry which is a
1738 // clobber in the current block. Reject this early.
1739 if (NumDeps == 1 &&
1740 !Deps[0].getResult().isDef() && !Deps[0].getResult().isClobber()) {
1741 LLVM_DEBUG(dbgs() << "GVN: non-local load "; Load->printAsOperand(dbgs());
1742 dbgs() << " has unknown dependencies\n";);
1743 return false;
1744 }
1745
1746 bool Changed = false;
1747 // If this load follows a GEP, see if we can PRE the indices before analyzing.
1748 if (GetElementPtrInst *GEP =
1749 dyn_cast<GetElementPtrInst>(Load->getOperand(0))) {
1750 for (Use &U : GEP->indices())
1751 if (Instruction *I = dyn_cast<Instruction>(U.get()))
1752 Changed |= performScalarPRE(I);
1753 }
1754
1755 // Step 2: Analyze the availability of the load
1756 AvailValInBlkVect ValuesPerBlock;
1757 UnavailBlkVect UnavailableBlocks;
1758 AnalyzeLoadAvailability(Load, Deps, ValuesPerBlock, UnavailableBlocks);
1759
1760 // If we have no predecessors that produce a known value for this load, exit
1761 // early.
1762 if (ValuesPerBlock.empty())
1763 return Changed;
1764
1765 // Step 3: Eliminate fully redundancy.
1766 //
1767 // If all of the instructions we depend on produce a known value for this
1768 // load, then it is fully redundant and we can use PHI insertion to compute
1769 // its value. Insert PHIs and remove the fully redundant value now.
1770 if (UnavailableBlocks.empty()) {
1771 LLVM_DEBUG(dbgs() << "GVN REMOVING NONLOCAL LOAD: " << *Load << '\n');
1772
1773 // Perform PHI construction.
1774 Value *V = ConstructSSAForLoadSet(Load, ValuesPerBlock, *this);
1775 Load->replaceAllUsesWith(V);
1776
1777 if (isa<PHINode>(V))
1778 V->takeName(Load);
1779 if (Instruction *I = dyn_cast<Instruction>(V))
1780 // If instruction I has debug info, then we should not update it.
1781 // Also, if I has a null DebugLoc, then it is still potentially incorrect
1782 // to propagate Load's DebugLoc because Load may not post-dominate I.
1783 if (Load->getDebugLoc() && Load->getParent() == I->getParent())
1784 I->setDebugLoc(Load->getDebugLoc());
1785 if (V->getType()->isPtrOrPtrVectorTy())
1786 MD->invalidateCachedPointerInfo(V);
1787 markInstructionForDeletion(Load);
1788 ++NumGVNLoad;
1789 reportLoadElim(Load, V, ORE);
1790 return true;
1791 }
1792
1793 // Step 4: Eliminate partial redundancy.
1794 if (!isPREEnabled() || !isLoadPREEnabled())
1795 return Changed;
1796 if (!isLoadInLoopPREEnabled() && LI && LI->getLoopFor(Load->getParent()))
1797 return Changed;
1798
1799 if (performLoopLoadPRE(Load, ValuesPerBlock, UnavailableBlocks) ||
1800 PerformLoadPRE(Load, ValuesPerBlock, UnavailableBlocks))
1801 return true;
1802
1803 return Changed;
1804 }
1805
impliesEquivalanceIfTrue(CmpInst * Cmp)1806 static bool impliesEquivalanceIfTrue(CmpInst* Cmp) {
1807 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_EQ)
1808 return true;
1809
1810 // Floating point comparisons can be equal, but not equivalent. Cases:
1811 // NaNs for unordered operators
1812 // +0.0 vs 0.0 for all operators
1813 if (Cmp->getPredicate() == CmpInst::Predicate::FCMP_OEQ ||
1814 (Cmp->getPredicate() == CmpInst::Predicate::FCMP_UEQ &&
1815 Cmp->getFastMathFlags().noNaNs())) {
1816 Value *LHS = Cmp->getOperand(0);
1817 Value *RHS = Cmp->getOperand(1);
1818 // If we can prove either side non-zero, then equality must imply
1819 // equivalence.
1820 // FIXME: We should do this optimization if 'no signed zeros' is
1821 // applicable via an instruction-level fast-math-flag or some other
1822 // indicator that relaxed FP semantics are being used.
1823 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1824 return true;
1825 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1826 return true;;
1827 // TODO: Handle vector floating point constants
1828 }
1829 return false;
1830 }
1831
impliesEquivalanceIfFalse(CmpInst * Cmp)1832 static bool impliesEquivalanceIfFalse(CmpInst* Cmp) {
1833 if (Cmp->getPredicate() == CmpInst::Predicate::ICMP_NE)
1834 return true;
1835
1836 // Floating point comparisons can be equal, but not equivelent. Cases:
1837 // NaNs for unordered operators
1838 // +0.0 vs 0.0 for all operators
1839 if ((Cmp->getPredicate() == CmpInst::Predicate::FCMP_ONE &&
1840 Cmp->getFastMathFlags().noNaNs()) ||
1841 Cmp->getPredicate() == CmpInst::Predicate::FCMP_UNE) {
1842 Value *LHS = Cmp->getOperand(0);
1843 Value *RHS = Cmp->getOperand(1);
1844 // If we can prove either side non-zero, then equality must imply
1845 // equivalence.
1846 // FIXME: We should do this optimization if 'no signed zeros' is
1847 // applicable via an instruction-level fast-math-flag or some other
1848 // indicator that relaxed FP semantics are being used.
1849 if (isa<ConstantFP>(LHS) && !cast<ConstantFP>(LHS)->isZero())
1850 return true;
1851 if (isa<ConstantFP>(RHS) && !cast<ConstantFP>(RHS)->isZero())
1852 return true;;
1853 // TODO: Handle vector floating point constants
1854 }
1855 return false;
1856 }
1857
1858
hasUsersIn(Value * V,BasicBlock * BB)1859 static bool hasUsersIn(Value *V, BasicBlock *BB) {
1860 return llvm::any_of(V->users(), [BB](User *U) {
1861 auto *I = dyn_cast<Instruction>(U);
1862 return I && I->getParent() == BB;
1863 });
1864 }
1865
processAssumeIntrinsic(AssumeInst * IntrinsicI)1866 bool GVNPass::processAssumeIntrinsic(AssumeInst *IntrinsicI) {
1867 Value *V = IntrinsicI->getArgOperand(0);
1868
1869 if (ConstantInt *Cond = dyn_cast<ConstantInt>(V)) {
1870 if (Cond->isZero()) {
1871 Type *Int8Ty = Type::getInt8Ty(V->getContext());
1872 // Insert a new store to null instruction before the load to indicate that
1873 // this code is not reachable. FIXME: We could insert unreachable
1874 // instruction directly because we can modify the CFG.
1875 auto *NewS = new StoreInst(PoisonValue::get(Int8Ty),
1876 Constant::getNullValue(Int8Ty->getPointerTo()),
1877 IntrinsicI);
1878 if (MSSAU) {
1879 const MemoryUseOrDef *FirstNonDom = nullptr;
1880 const auto *AL =
1881 MSSAU->getMemorySSA()->getBlockAccesses(IntrinsicI->getParent());
1882
1883 // If there are accesses in the current basic block, find the first one
1884 // that does not come before NewS. The new memory access is inserted
1885 // after the found access or before the terminator if no such access is
1886 // found.
1887 if (AL) {
1888 for (const auto &Acc : *AL) {
1889 if (auto *Current = dyn_cast<MemoryUseOrDef>(&Acc))
1890 if (!Current->getMemoryInst()->comesBefore(NewS)) {
1891 FirstNonDom = Current;
1892 break;
1893 }
1894 }
1895 }
1896
1897 // This added store is to null, so it will never executed and we can
1898 // just use the LiveOnEntry def as defining access.
1899 auto *NewDef =
1900 FirstNonDom ? MSSAU->createMemoryAccessBefore(
1901 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1902 const_cast<MemoryUseOrDef *>(FirstNonDom))
1903 : MSSAU->createMemoryAccessInBB(
1904 NewS, MSSAU->getMemorySSA()->getLiveOnEntryDef(),
1905 NewS->getParent(), MemorySSA::BeforeTerminator);
1906
1907 MSSAU->insertDef(cast<MemoryDef>(NewDef), /*RenameUses=*/false);
1908 }
1909 }
1910 if (isAssumeWithEmptyBundle(*IntrinsicI))
1911 markInstructionForDeletion(IntrinsicI);
1912 return false;
1913 } else if (isa<Constant>(V)) {
1914 // If it's not false, and constant, it must evaluate to true. This means our
1915 // assume is assume(true), and thus, pointless, and we don't want to do
1916 // anything more here.
1917 return false;
1918 }
1919
1920 Constant *True = ConstantInt::getTrue(V->getContext());
1921 bool Changed = false;
1922
1923 for (BasicBlock *Successor : successors(IntrinsicI->getParent())) {
1924 BasicBlockEdge Edge(IntrinsicI->getParent(), Successor);
1925
1926 // This property is only true in dominated successors, propagateEquality
1927 // will check dominance for us.
1928 Changed |= propagateEquality(V, True, Edge, false);
1929 }
1930
1931 // We can replace assume value with true, which covers cases like this:
1932 // call void @llvm.assume(i1 %cmp)
1933 // br i1 %cmp, label %bb1, label %bb2 ; will change %cmp to true
1934 ReplaceOperandsWithMap[V] = True;
1935
1936 // Similarly, after assume(!NotV) we know that NotV == false.
1937 Value *NotV;
1938 if (match(V, m_Not(m_Value(NotV))))
1939 ReplaceOperandsWithMap[NotV] = ConstantInt::getFalse(V->getContext());
1940
1941 // If we find an equality fact, canonicalize all dominated uses in this block
1942 // to one of the two values. We heuristically choice the "oldest" of the
1943 // two where age is determined by value number. (Note that propagateEquality
1944 // above handles the cross block case.)
1945 //
1946 // Key case to cover are:
1947 // 1)
1948 // %cmp = fcmp oeq float 3.000000e+00, %0 ; const on lhs could happen
1949 // call void @llvm.assume(i1 %cmp)
1950 // ret float %0 ; will change it to ret float 3.000000e+00
1951 // 2)
1952 // %load = load float, float* %addr
1953 // %cmp = fcmp oeq float %load, %0
1954 // call void @llvm.assume(i1 %cmp)
1955 // ret float %load ; will change it to ret float %0
1956 if (auto *CmpI = dyn_cast<CmpInst>(V)) {
1957 if (impliesEquivalanceIfTrue(CmpI)) {
1958 Value *CmpLHS = CmpI->getOperand(0);
1959 Value *CmpRHS = CmpI->getOperand(1);
1960 // Heuristically pick the better replacement -- the choice of heuristic
1961 // isn't terribly important here, but the fact we canonicalize on some
1962 // replacement is for exposing other simplifications.
1963 // TODO: pull this out as a helper function and reuse w/existing
1964 // (slightly different) logic.
1965 if (isa<Constant>(CmpLHS) && !isa<Constant>(CmpRHS))
1966 std::swap(CmpLHS, CmpRHS);
1967 if (!isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))
1968 std::swap(CmpLHS, CmpRHS);
1969 if ((isa<Argument>(CmpLHS) && isa<Argument>(CmpRHS)) ||
1970 (isa<Instruction>(CmpLHS) && isa<Instruction>(CmpRHS))) {
1971 // Move the 'oldest' value to the right-hand side, using the value
1972 // number as a proxy for age.
1973 uint32_t LVN = VN.lookupOrAdd(CmpLHS);
1974 uint32_t RVN = VN.lookupOrAdd(CmpRHS);
1975 if (LVN < RVN)
1976 std::swap(CmpLHS, CmpRHS);
1977 }
1978
1979 // Handle degenerate case where we either haven't pruned a dead path or a
1980 // removed a trivial assume yet.
1981 if (isa<Constant>(CmpLHS) && isa<Constant>(CmpRHS))
1982 return Changed;
1983
1984 LLVM_DEBUG(dbgs() << "Replacing dominated uses of "
1985 << *CmpLHS << " with "
1986 << *CmpRHS << " in block "
1987 << IntrinsicI->getParent()->getName() << "\n");
1988
1989
1990 // Setup the replacement map - this handles uses within the same block
1991 if (hasUsersIn(CmpLHS, IntrinsicI->getParent()))
1992 ReplaceOperandsWithMap[CmpLHS] = CmpRHS;
1993
1994 // NOTE: The non-block local cases are handled by the call to
1995 // propagateEquality above; this block is just about handling the block
1996 // local cases. TODO: There's a bunch of logic in propagateEqualiy which
1997 // isn't duplicated for the block local case, can we share it somehow?
1998 }
1999 }
2000 return Changed;
2001 }
2002
patchAndReplaceAllUsesWith(Instruction * I,Value * Repl)2003 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
2004 patchReplacementInstruction(I, Repl);
2005 I->replaceAllUsesWith(Repl);
2006 }
2007
2008 /// Attempt to eliminate a load, first by eliminating it
2009 /// locally, and then attempting non-local elimination if that fails.
processLoad(LoadInst * L)2010 bool GVNPass::processLoad(LoadInst *L) {
2011 if (!MD)
2012 return false;
2013
2014 // This code hasn't been audited for ordered or volatile memory access
2015 if (!L->isUnordered())
2016 return false;
2017
2018 if (L->use_empty()) {
2019 markInstructionForDeletion(L);
2020 return true;
2021 }
2022
2023 // ... to a pointer that has been loaded from before...
2024 MemDepResult Dep = MD->getDependency(L);
2025
2026 // If it is defined in another block, try harder.
2027 if (Dep.isNonLocal())
2028 return processNonLocalLoad(L);
2029
2030 // Only handle the local case below
2031 if (!Dep.isLocal()) {
2032 // This might be a NonFuncLocal or an Unknown
2033 LLVM_DEBUG(
2034 // fast print dep, using operator<< on instruction is too slow.
2035 dbgs() << "GVN: load "; L->printAsOperand(dbgs());
2036 dbgs() << " has unknown dependence\n";);
2037 return false;
2038 }
2039
2040 auto AV = AnalyzeLoadAvailability(L, Dep, L->getPointerOperand());
2041 if (!AV)
2042 return false;
2043
2044 Value *AvailableValue = AV->MaterializeAdjustedValue(L, L, *this);
2045
2046 // Replace the load!
2047 patchAndReplaceAllUsesWith(L, AvailableValue);
2048 markInstructionForDeletion(L);
2049 if (MSSAU)
2050 MSSAU->removeMemoryAccess(L);
2051 ++NumGVNLoad;
2052 reportLoadElim(L, AvailableValue, ORE);
2053 // Tell MDA to reexamine the reused pointer since we might have more
2054 // information after forwarding it.
2055 if (MD && AvailableValue->getType()->isPtrOrPtrVectorTy())
2056 MD->invalidateCachedPointerInfo(AvailableValue);
2057 return true;
2058 }
2059
2060 /// Return a pair the first field showing the value number of \p Exp and the
2061 /// second field showing whether it is a value number newly created.
2062 std::pair<uint32_t, bool>
assignExpNewValueNum(Expression & Exp)2063 GVNPass::ValueTable::assignExpNewValueNum(Expression &Exp) {
2064 uint32_t &e = expressionNumbering[Exp];
2065 bool CreateNewValNum = !e;
2066 if (CreateNewValNum) {
2067 Expressions.push_back(Exp);
2068 if (ExprIdx.size() < nextValueNumber + 1)
2069 ExprIdx.resize(nextValueNumber * 2);
2070 e = nextValueNumber;
2071 ExprIdx[nextValueNumber++] = nextExprNumber++;
2072 }
2073 return {e, CreateNewValNum};
2074 }
2075
2076 /// Return whether all the values related with the same \p num are
2077 /// defined in \p BB.
areAllValsInBB(uint32_t Num,const BasicBlock * BB,GVNPass & Gvn)2078 bool GVNPass::ValueTable::areAllValsInBB(uint32_t Num, const BasicBlock *BB,
2079 GVNPass &Gvn) {
2080 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2081 while (Vals && Vals->BB == BB)
2082 Vals = Vals->Next;
2083 return !Vals;
2084 }
2085
2086 /// Wrap phiTranslateImpl to provide caching functionality.
phiTranslate(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVNPass & Gvn)2087 uint32_t GVNPass::ValueTable::phiTranslate(const BasicBlock *Pred,
2088 const BasicBlock *PhiBlock,
2089 uint32_t Num, GVNPass &Gvn) {
2090 auto FindRes = PhiTranslateTable.find({Num, Pred});
2091 if (FindRes != PhiTranslateTable.end())
2092 return FindRes->second;
2093 uint32_t NewNum = phiTranslateImpl(Pred, PhiBlock, Num, Gvn);
2094 PhiTranslateTable.insert({{Num, Pred}, NewNum});
2095 return NewNum;
2096 }
2097
2098 // Return true if the value number \p Num and NewNum have equal value.
2099 // Return false if the result is unknown.
areCallValsEqual(uint32_t Num,uint32_t NewNum,const BasicBlock * Pred,const BasicBlock * PhiBlock,GVNPass & Gvn)2100 bool GVNPass::ValueTable::areCallValsEqual(uint32_t Num, uint32_t NewNum,
2101 const BasicBlock *Pred,
2102 const BasicBlock *PhiBlock,
2103 GVNPass &Gvn) {
2104 CallInst *Call = nullptr;
2105 LeaderTableEntry *Vals = &Gvn.LeaderTable[Num];
2106 while (Vals) {
2107 Call = dyn_cast<CallInst>(Vals->Val);
2108 if (Call && Call->getParent() == PhiBlock)
2109 break;
2110 Vals = Vals->Next;
2111 }
2112
2113 if (AA->doesNotAccessMemory(Call))
2114 return true;
2115
2116 if (!MD || !AA->onlyReadsMemory(Call))
2117 return false;
2118
2119 MemDepResult local_dep = MD->getDependency(Call);
2120 if (!local_dep.isNonLocal())
2121 return false;
2122
2123 const MemoryDependenceResults::NonLocalDepInfo &deps =
2124 MD->getNonLocalCallDependency(Call);
2125
2126 // Check to see if the Call has no function local clobber.
2127 for (const NonLocalDepEntry &D : deps) {
2128 if (D.getResult().isNonFuncLocal())
2129 return true;
2130 }
2131 return false;
2132 }
2133
2134 /// Translate value number \p Num using phis, so that it has the values of
2135 /// the phis in BB.
phiTranslateImpl(const BasicBlock * Pred,const BasicBlock * PhiBlock,uint32_t Num,GVNPass & Gvn)2136 uint32_t GVNPass::ValueTable::phiTranslateImpl(const BasicBlock *Pred,
2137 const BasicBlock *PhiBlock,
2138 uint32_t Num, GVNPass &Gvn) {
2139 if (PHINode *PN = NumberingPhi[Num]) {
2140 for (unsigned i = 0; i != PN->getNumIncomingValues(); ++i) {
2141 if (PN->getParent() == PhiBlock && PN->getIncomingBlock(i) == Pred)
2142 if (uint32_t TransVal = lookup(PN->getIncomingValue(i), false))
2143 return TransVal;
2144 }
2145 return Num;
2146 }
2147
2148 // If there is any value related with Num is defined in a BB other than
2149 // PhiBlock, it cannot depend on a phi in PhiBlock without going through
2150 // a backedge. We can do an early exit in that case to save compile time.
2151 if (!areAllValsInBB(Num, PhiBlock, Gvn))
2152 return Num;
2153
2154 if (Num >= ExprIdx.size() || ExprIdx[Num] == 0)
2155 return Num;
2156 Expression Exp = Expressions[ExprIdx[Num]];
2157
2158 for (unsigned i = 0; i < Exp.varargs.size(); i++) {
2159 // For InsertValue and ExtractValue, some varargs are index numbers
2160 // instead of value numbers. Those index numbers should not be
2161 // translated.
2162 if ((i > 1 && Exp.opcode == Instruction::InsertValue) ||
2163 (i > 0 && Exp.opcode == Instruction::ExtractValue) ||
2164 (i > 1 && Exp.opcode == Instruction::ShuffleVector))
2165 continue;
2166 Exp.varargs[i] = phiTranslate(Pred, PhiBlock, Exp.varargs[i], Gvn);
2167 }
2168
2169 if (Exp.commutative) {
2170 assert(Exp.varargs.size() >= 2 && "Unsupported commutative instruction!");
2171 if (Exp.varargs[0] > Exp.varargs[1]) {
2172 std::swap(Exp.varargs[0], Exp.varargs[1]);
2173 uint32_t Opcode = Exp.opcode >> 8;
2174 if (Opcode == Instruction::ICmp || Opcode == Instruction::FCmp)
2175 Exp.opcode = (Opcode << 8) |
2176 CmpInst::getSwappedPredicate(
2177 static_cast<CmpInst::Predicate>(Exp.opcode & 255));
2178 }
2179 }
2180
2181 if (uint32_t NewNum = expressionNumbering[Exp]) {
2182 if (Exp.opcode == Instruction::Call && NewNum != Num)
2183 return areCallValsEqual(Num, NewNum, Pred, PhiBlock, Gvn) ? NewNum : Num;
2184 return NewNum;
2185 }
2186 return Num;
2187 }
2188
2189 /// Erase stale entry from phiTranslate cache so phiTranslate can be computed
2190 /// again.
eraseTranslateCacheEntry(uint32_t Num,const BasicBlock & CurrBlock)2191 void GVNPass::ValueTable::eraseTranslateCacheEntry(
2192 uint32_t Num, const BasicBlock &CurrBlock) {
2193 for (const BasicBlock *Pred : predecessors(&CurrBlock))
2194 PhiTranslateTable.erase({Num, Pred});
2195 }
2196
2197 // In order to find a leader for a given value number at a
2198 // specific basic block, we first obtain the list of all Values for that number,
2199 // and then scan the list to find one whose block dominates the block in
2200 // question. This is fast because dominator tree queries consist of only
2201 // a few comparisons of DFS numbers.
findLeader(const BasicBlock * BB,uint32_t num)2202 Value *GVNPass::findLeader(const BasicBlock *BB, uint32_t num) {
2203 LeaderTableEntry Vals = LeaderTable[num];
2204 if (!Vals.Val) return nullptr;
2205
2206 Value *Val = nullptr;
2207 if (DT->dominates(Vals.BB, BB)) {
2208 Val = Vals.Val;
2209 if (isa<Constant>(Val)) return Val;
2210 }
2211
2212 LeaderTableEntry* Next = Vals.Next;
2213 while (Next) {
2214 if (DT->dominates(Next->BB, BB)) {
2215 if (isa<Constant>(Next->Val)) return Next->Val;
2216 if (!Val) Val = Next->Val;
2217 }
2218
2219 Next = Next->Next;
2220 }
2221
2222 return Val;
2223 }
2224
2225 /// There is an edge from 'Src' to 'Dst'. Return
2226 /// true if every path from the entry block to 'Dst' passes via this edge. In
2227 /// particular 'Dst' must not be reachable via another edge from 'Src'.
isOnlyReachableViaThisEdge(const BasicBlockEdge & E,DominatorTree * DT)2228 static bool isOnlyReachableViaThisEdge(const BasicBlockEdge &E,
2229 DominatorTree *DT) {
2230 // While in theory it is interesting to consider the case in which Dst has
2231 // more than one predecessor, because Dst might be part of a loop which is
2232 // only reachable from Src, in practice it is pointless since at the time
2233 // GVN runs all such loops have preheaders, which means that Dst will have
2234 // been changed to have only one predecessor, namely Src.
2235 const BasicBlock *Pred = E.getEnd()->getSinglePredecessor();
2236 assert((!Pred || Pred == E.getStart()) &&
2237 "No edge between these basic blocks!");
2238 return Pred != nullptr;
2239 }
2240
assignBlockRPONumber(Function & F)2241 void GVNPass::assignBlockRPONumber(Function &F) {
2242 BlockRPONumber.clear();
2243 uint32_t NextBlockNumber = 1;
2244 ReversePostOrderTraversal<Function *> RPOT(&F);
2245 for (BasicBlock *BB : RPOT)
2246 BlockRPONumber[BB] = NextBlockNumber++;
2247 InvalidBlockRPONumbers = false;
2248 }
2249
replaceOperandsForInBlockEquality(Instruction * Instr) const2250 bool GVNPass::replaceOperandsForInBlockEquality(Instruction *Instr) const {
2251 bool Changed = false;
2252 for (unsigned OpNum = 0; OpNum < Instr->getNumOperands(); ++OpNum) {
2253 Value *Operand = Instr->getOperand(OpNum);
2254 auto it = ReplaceOperandsWithMap.find(Operand);
2255 if (it != ReplaceOperandsWithMap.end()) {
2256 LLVM_DEBUG(dbgs() << "GVN replacing: " << *Operand << " with "
2257 << *it->second << " in instruction " << *Instr << '\n');
2258 Instr->setOperand(OpNum, it->second);
2259 Changed = true;
2260 }
2261 }
2262 return Changed;
2263 }
2264
2265 /// The given values are known to be equal in every block
2266 /// dominated by 'Root'. Exploit this, for example by replacing 'LHS' with
2267 /// 'RHS' everywhere in the scope. Returns whether a change was made.
2268 /// If DominatesByEdge is false, then it means that we will propagate the RHS
2269 /// value starting from the end of Root.Start.
propagateEquality(Value * LHS,Value * RHS,const BasicBlockEdge & Root,bool DominatesByEdge)2270 bool GVNPass::propagateEquality(Value *LHS, Value *RHS,
2271 const BasicBlockEdge &Root,
2272 bool DominatesByEdge) {
2273 SmallVector<std::pair<Value*, Value*>, 4> Worklist;
2274 Worklist.push_back(std::make_pair(LHS, RHS));
2275 bool Changed = false;
2276 // For speed, compute a conservative fast approximation to
2277 // DT->dominates(Root, Root.getEnd());
2278 const bool RootDominatesEnd = isOnlyReachableViaThisEdge(Root, DT);
2279
2280 while (!Worklist.empty()) {
2281 std::pair<Value*, Value*> Item = Worklist.pop_back_val();
2282 LHS = Item.first; RHS = Item.second;
2283
2284 if (LHS == RHS)
2285 continue;
2286 assert(LHS->getType() == RHS->getType() && "Equality but unequal types!");
2287
2288 // Don't try to propagate equalities between constants.
2289 if (isa<Constant>(LHS) && isa<Constant>(RHS))
2290 continue;
2291
2292 // Prefer a constant on the right-hand side, or an Argument if no constants.
2293 if (isa<Constant>(LHS) || (isa<Argument>(LHS) && !isa<Constant>(RHS)))
2294 std::swap(LHS, RHS);
2295 assert((isa<Argument>(LHS) || isa<Instruction>(LHS)) && "Unexpected value!");
2296
2297 // If there is no obvious reason to prefer the left-hand side over the
2298 // right-hand side, ensure the longest lived term is on the right-hand side,
2299 // so the shortest lived term will be replaced by the longest lived.
2300 // This tends to expose more simplifications.
2301 uint32_t LVN = VN.lookupOrAdd(LHS);
2302 if ((isa<Argument>(LHS) && isa<Argument>(RHS)) ||
2303 (isa<Instruction>(LHS) && isa<Instruction>(RHS))) {
2304 // Move the 'oldest' value to the right-hand side, using the value number
2305 // as a proxy for age.
2306 uint32_t RVN = VN.lookupOrAdd(RHS);
2307 if (LVN < RVN) {
2308 std::swap(LHS, RHS);
2309 LVN = RVN;
2310 }
2311 }
2312
2313 // If value numbering later sees that an instruction in the scope is equal
2314 // to 'LHS' then ensure it will be turned into 'RHS'. In order to preserve
2315 // the invariant that instructions only occur in the leader table for their
2316 // own value number (this is used by removeFromLeaderTable), do not do this
2317 // if RHS is an instruction (if an instruction in the scope is morphed into
2318 // LHS then it will be turned into RHS by the next GVN iteration anyway, so
2319 // using the leader table is about compiling faster, not optimizing better).
2320 // The leader table only tracks basic blocks, not edges. Only add to if we
2321 // have the simple case where the edge dominates the end.
2322 if (RootDominatesEnd && !isa<Instruction>(RHS))
2323 addToLeaderTable(LVN, RHS, Root.getEnd());
2324
2325 // Replace all occurrences of 'LHS' with 'RHS' everywhere in the scope. As
2326 // LHS always has at least one use that is not dominated by Root, this will
2327 // never do anything if LHS has only one use.
2328 if (!LHS->hasOneUse()) {
2329 unsigned NumReplacements =
2330 DominatesByEdge
2331 ? replaceDominatedUsesWith(LHS, RHS, *DT, Root)
2332 : replaceDominatedUsesWith(LHS, RHS, *DT, Root.getStart());
2333
2334 Changed |= NumReplacements > 0;
2335 NumGVNEqProp += NumReplacements;
2336 // Cached information for anything that uses LHS will be invalid.
2337 if (MD)
2338 MD->invalidateCachedPointerInfo(LHS);
2339 }
2340
2341 // Now try to deduce additional equalities from this one. For example, if
2342 // the known equality was "(A != B)" == "false" then it follows that A and B
2343 // are equal in the scope. Only boolean equalities with an explicit true or
2344 // false RHS are currently supported.
2345 if (!RHS->getType()->isIntegerTy(1))
2346 // Not a boolean equality - bail out.
2347 continue;
2348 ConstantInt *CI = dyn_cast<ConstantInt>(RHS);
2349 if (!CI)
2350 // RHS neither 'true' nor 'false' - bail out.
2351 continue;
2352 // Whether RHS equals 'true'. Otherwise it equals 'false'.
2353 bool isKnownTrue = CI->isMinusOne();
2354 bool isKnownFalse = !isKnownTrue;
2355
2356 // If "A && B" is known true then both A and B are known true. If "A || B"
2357 // is known false then both A and B are known false.
2358 Value *A, *B;
2359 if ((isKnownTrue && match(LHS, m_LogicalAnd(m_Value(A), m_Value(B)))) ||
2360 (isKnownFalse && match(LHS, m_LogicalOr(m_Value(A), m_Value(B))))) {
2361 Worklist.push_back(std::make_pair(A, RHS));
2362 Worklist.push_back(std::make_pair(B, RHS));
2363 continue;
2364 }
2365
2366 // If we are propagating an equality like "(A == B)" == "true" then also
2367 // propagate the equality A == B. When propagating a comparison such as
2368 // "(A >= B)" == "true", replace all instances of "A < B" with "false".
2369 if (CmpInst *Cmp = dyn_cast<CmpInst>(LHS)) {
2370 Value *Op0 = Cmp->getOperand(0), *Op1 = Cmp->getOperand(1);
2371
2372 // If "A == B" is known true, or "A != B" is known false, then replace
2373 // A with B everywhere in the scope. For floating point operations, we
2374 // have to be careful since equality does not always imply equivalance.
2375 if ((isKnownTrue && impliesEquivalanceIfTrue(Cmp)) ||
2376 (isKnownFalse && impliesEquivalanceIfFalse(Cmp)))
2377 Worklist.push_back(std::make_pair(Op0, Op1));
2378
2379 // If "A >= B" is known true, replace "A < B" with false everywhere.
2380 CmpInst::Predicate NotPred = Cmp->getInversePredicate();
2381 Constant *NotVal = ConstantInt::get(Cmp->getType(), isKnownFalse);
2382 // Since we don't have the instruction "A < B" immediately to hand, work
2383 // out the value number that it would have and use that to find an
2384 // appropriate instruction (if any).
2385 uint32_t NextNum = VN.getNextUnusedValueNumber();
2386 uint32_t Num = VN.lookupOrAddCmp(Cmp->getOpcode(), NotPred, Op0, Op1);
2387 // If the number we were assigned was brand new then there is no point in
2388 // looking for an instruction realizing it: there cannot be one!
2389 if (Num < NextNum) {
2390 Value *NotCmp = findLeader(Root.getEnd(), Num);
2391 if (NotCmp && isa<Instruction>(NotCmp)) {
2392 unsigned NumReplacements =
2393 DominatesByEdge
2394 ? replaceDominatedUsesWith(NotCmp, NotVal, *DT, Root)
2395 : replaceDominatedUsesWith(NotCmp, NotVal, *DT,
2396 Root.getStart());
2397 Changed |= NumReplacements > 0;
2398 NumGVNEqProp += NumReplacements;
2399 // Cached information for anything that uses NotCmp will be invalid.
2400 if (MD)
2401 MD->invalidateCachedPointerInfo(NotCmp);
2402 }
2403 }
2404 // Ensure that any instruction in scope that gets the "A < B" value number
2405 // is replaced with false.
2406 // The leader table only tracks basic blocks, not edges. Only add to if we
2407 // have the simple case where the edge dominates the end.
2408 if (RootDominatesEnd)
2409 addToLeaderTable(Num, NotVal, Root.getEnd());
2410
2411 continue;
2412 }
2413 }
2414
2415 return Changed;
2416 }
2417
2418 /// When calculating availability, handle an instruction
2419 /// by inserting it into the appropriate sets
processInstruction(Instruction * I)2420 bool GVNPass::processInstruction(Instruction *I) {
2421 // Ignore dbg info intrinsics.
2422 if (isa<DbgInfoIntrinsic>(I))
2423 return false;
2424
2425 // If the instruction can be easily simplified then do so now in preference
2426 // to value numbering it. Value numbering often exposes redundancies, for
2427 // example if it determines that %y is equal to %x then the instruction
2428 // "%z = and i32 %x, %y" becomes "%z = and i32 %x, %x" which we now simplify.
2429 const DataLayout &DL = I->getModule()->getDataLayout();
2430 if (Value *V = simplifyInstruction(I, {DL, TLI, DT, AC})) {
2431 bool Changed = false;
2432 if (!I->use_empty()) {
2433 // Simplification can cause a special instruction to become not special.
2434 // For example, devirtualization to a willreturn function.
2435 ICF->removeUsersOf(I);
2436 I->replaceAllUsesWith(V);
2437 Changed = true;
2438 }
2439 if (isInstructionTriviallyDead(I, TLI)) {
2440 markInstructionForDeletion(I);
2441 Changed = true;
2442 }
2443 if (Changed) {
2444 if (MD && V->getType()->isPtrOrPtrVectorTy())
2445 MD->invalidateCachedPointerInfo(V);
2446 ++NumGVNSimpl;
2447 return true;
2448 }
2449 }
2450
2451 if (auto *Assume = dyn_cast<AssumeInst>(I))
2452 return processAssumeIntrinsic(Assume);
2453
2454 if (LoadInst *Load = dyn_cast<LoadInst>(I)) {
2455 if (processLoad(Load))
2456 return true;
2457
2458 unsigned Num = VN.lookupOrAdd(Load);
2459 addToLeaderTable(Num, Load, Load->getParent());
2460 return false;
2461 }
2462
2463 // For conditional branches, we can perform simple conditional propagation on
2464 // the condition value itself.
2465 if (BranchInst *BI = dyn_cast<BranchInst>(I)) {
2466 if (!BI->isConditional())
2467 return false;
2468
2469 if (isa<Constant>(BI->getCondition()))
2470 return processFoldableCondBr(BI);
2471
2472 Value *BranchCond = BI->getCondition();
2473 BasicBlock *TrueSucc = BI->getSuccessor(0);
2474 BasicBlock *FalseSucc = BI->getSuccessor(1);
2475 // Avoid multiple edges early.
2476 if (TrueSucc == FalseSucc)
2477 return false;
2478
2479 BasicBlock *Parent = BI->getParent();
2480 bool Changed = false;
2481
2482 Value *TrueVal = ConstantInt::getTrue(TrueSucc->getContext());
2483 BasicBlockEdge TrueE(Parent, TrueSucc);
2484 Changed |= propagateEquality(BranchCond, TrueVal, TrueE, true);
2485
2486 Value *FalseVal = ConstantInt::getFalse(FalseSucc->getContext());
2487 BasicBlockEdge FalseE(Parent, FalseSucc);
2488 Changed |= propagateEquality(BranchCond, FalseVal, FalseE, true);
2489
2490 return Changed;
2491 }
2492
2493 // For switches, propagate the case values into the case destinations.
2494 if (SwitchInst *SI = dyn_cast<SwitchInst>(I)) {
2495 Value *SwitchCond = SI->getCondition();
2496 BasicBlock *Parent = SI->getParent();
2497 bool Changed = false;
2498
2499 // Remember how many outgoing edges there are to every successor.
2500 SmallDenseMap<BasicBlock *, unsigned, 16> SwitchEdges;
2501 for (unsigned i = 0, n = SI->getNumSuccessors(); i != n; ++i)
2502 ++SwitchEdges[SI->getSuccessor(i)];
2503
2504 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end();
2505 i != e; ++i) {
2506 BasicBlock *Dst = i->getCaseSuccessor();
2507 // If there is only a single edge, propagate the case value into it.
2508 if (SwitchEdges.lookup(Dst) == 1) {
2509 BasicBlockEdge E(Parent, Dst);
2510 Changed |= propagateEquality(SwitchCond, i->getCaseValue(), E, true);
2511 }
2512 }
2513 return Changed;
2514 }
2515
2516 // Instructions with void type don't return a value, so there's
2517 // no point in trying to find redundancies in them.
2518 if (I->getType()->isVoidTy())
2519 return false;
2520
2521 uint32_t NextNum = VN.getNextUnusedValueNumber();
2522 unsigned Num = VN.lookupOrAdd(I);
2523
2524 // Allocations are always uniquely numbered, so we can save time and memory
2525 // by fast failing them.
2526 if (isa<AllocaInst>(I) || I->isTerminator() || isa<PHINode>(I)) {
2527 addToLeaderTable(Num, I, I->getParent());
2528 return false;
2529 }
2530
2531 // If the number we were assigned was a brand new VN, then we don't
2532 // need to do a lookup to see if the number already exists
2533 // somewhere in the domtree: it can't!
2534 if (Num >= NextNum) {
2535 addToLeaderTable(Num, I, I->getParent());
2536 return false;
2537 }
2538
2539 // Perform fast-path value-number based elimination of values inherited from
2540 // dominators.
2541 Value *Repl = findLeader(I->getParent(), Num);
2542 if (!Repl) {
2543 // Failure, just remember this instance for future use.
2544 addToLeaderTable(Num, I, I->getParent());
2545 return false;
2546 } else if (Repl == I) {
2547 // If I was the result of a shortcut PRE, it might already be in the table
2548 // and the best replacement for itself. Nothing to do.
2549 return false;
2550 }
2551
2552 // Remove it!
2553 patchAndReplaceAllUsesWith(I, Repl);
2554 if (MD && Repl->getType()->isPtrOrPtrVectorTy())
2555 MD->invalidateCachedPointerInfo(Repl);
2556 markInstructionForDeletion(I);
2557 return true;
2558 }
2559
2560 /// runOnFunction - This is the main transformation entry point for a function.
runImpl(Function & F,AssumptionCache & RunAC,DominatorTree & RunDT,const TargetLibraryInfo & RunTLI,AAResults & RunAA,MemoryDependenceResults * RunMD,LoopInfo * LI,OptimizationRemarkEmitter * RunORE,MemorySSA * MSSA)2561 bool GVNPass::runImpl(Function &F, AssumptionCache &RunAC, DominatorTree &RunDT,
2562 const TargetLibraryInfo &RunTLI, AAResults &RunAA,
2563 MemoryDependenceResults *RunMD, LoopInfo *LI,
2564 OptimizationRemarkEmitter *RunORE, MemorySSA *MSSA) {
2565 AC = &RunAC;
2566 DT = &RunDT;
2567 VN.setDomTree(DT);
2568 TLI = &RunTLI;
2569 VN.setAliasAnalysis(&RunAA);
2570 MD = RunMD;
2571 ImplicitControlFlowTracking ImplicitCFT;
2572 ICF = &ImplicitCFT;
2573 this->LI = LI;
2574 VN.setMemDep(MD);
2575 ORE = RunORE;
2576 InvalidBlockRPONumbers = true;
2577 MemorySSAUpdater Updater(MSSA);
2578 MSSAU = MSSA ? &Updater : nullptr;
2579
2580 bool Changed = false;
2581 bool ShouldContinue = true;
2582
2583 DomTreeUpdater DTU(DT, DomTreeUpdater::UpdateStrategy::Eager);
2584 // Merge unconditional branches, allowing PRE to catch more
2585 // optimization opportunities.
2586 for (BasicBlock &BB : llvm::make_early_inc_range(F)) {
2587 bool removedBlock = MergeBlockIntoPredecessor(&BB, &DTU, LI, MSSAU, MD);
2588 if (removedBlock)
2589 ++NumGVNBlocks;
2590
2591 Changed |= removedBlock;
2592 }
2593
2594 unsigned Iteration = 0;
2595 while (ShouldContinue) {
2596 LLVM_DEBUG(dbgs() << "GVN iteration: " << Iteration << "\n");
2597 (void) Iteration;
2598 ShouldContinue = iterateOnFunction(F);
2599 Changed |= ShouldContinue;
2600 ++Iteration;
2601 }
2602
2603 if (isPREEnabled()) {
2604 // Fabricate val-num for dead-code in order to suppress assertion in
2605 // performPRE().
2606 assignValNumForDeadCode();
2607 bool PREChanged = true;
2608 while (PREChanged) {
2609 PREChanged = performPRE(F);
2610 Changed |= PREChanged;
2611 }
2612 }
2613
2614 // FIXME: Should perform GVN again after PRE does something. PRE can move
2615 // computations into blocks where they become fully redundant. Note that
2616 // we can't do this until PRE's critical edge splitting updates memdep.
2617 // Actually, when this happens, we should just fully integrate PRE into GVN.
2618
2619 cleanupGlobalSets();
2620 // Do not cleanup DeadBlocks in cleanupGlobalSets() as it's called for each
2621 // iteration.
2622 DeadBlocks.clear();
2623
2624 if (MSSA && VerifyMemorySSA)
2625 MSSA->verifyMemorySSA();
2626
2627 return Changed;
2628 }
2629
processBlock(BasicBlock * BB)2630 bool GVNPass::processBlock(BasicBlock *BB) {
2631 // FIXME: Kill off InstrsToErase by doing erasing eagerly in a helper function
2632 // (and incrementing BI before processing an instruction).
2633 assert(InstrsToErase.empty() &&
2634 "We expect InstrsToErase to be empty across iterations");
2635 if (DeadBlocks.count(BB))
2636 return false;
2637
2638 // Clearing map before every BB because it can be used only for single BB.
2639 ReplaceOperandsWithMap.clear();
2640 bool ChangedFunction = false;
2641
2642 // Since we may not have visited the input blocks of the phis, we can't
2643 // use our normal hash approach for phis. Instead, simply look for
2644 // obvious duplicates. The first pass of GVN will tend to create
2645 // identical phis, and the second or later passes can eliminate them.
2646 ChangedFunction |= EliminateDuplicatePHINodes(BB);
2647
2648 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
2649 BI != BE;) {
2650 if (!ReplaceOperandsWithMap.empty())
2651 ChangedFunction |= replaceOperandsForInBlockEquality(&*BI);
2652 ChangedFunction |= processInstruction(&*BI);
2653
2654 if (InstrsToErase.empty()) {
2655 ++BI;
2656 continue;
2657 }
2658
2659 // If we need some instructions deleted, do it now.
2660 NumGVNInstr += InstrsToErase.size();
2661
2662 // Avoid iterator invalidation.
2663 bool AtStart = BI == BB->begin();
2664 if (!AtStart)
2665 --BI;
2666
2667 for (auto *I : InstrsToErase) {
2668 assert(I->getParent() == BB && "Removing instruction from wrong block?");
2669 LLVM_DEBUG(dbgs() << "GVN removed: " << *I << '\n');
2670 salvageKnowledge(I, AC);
2671 salvageDebugInfo(*I);
2672 if (MD) MD->removeInstruction(I);
2673 if (MSSAU)
2674 MSSAU->removeMemoryAccess(I);
2675 LLVM_DEBUG(verifyRemoved(I));
2676 ICF->removeInstruction(I);
2677 I->eraseFromParent();
2678 }
2679 InstrsToErase.clear();
2680
2681 if (AtStart)
2682 BI = BB->begin();
2683 else
2684 ++BI;
2685 }
2686
2687 return ChangedFunction;
2688 }
2689
2690 // Instantiate an expression in a predecessor that lacked it.
performScalarPREInsertion(Instruction * Instr,BasicBlock * Pred,BasicBlock * Curr,unsigned int ValNo)2691 bool GVNPass::performScalarPREInsertion(Instruction *Instr, BasicBlock *Pred,
2692 BasicBlock *Curr, unsigned int ValNo) {
2693 // Because we are going top-down through the block, all value numbers
2694 // will be available in the predecessor by the time we need them. Any
2695 // that weren't originally present will have been instantiated earlier
2696 // in this loop.
2697 bool success = true;
2698 for (unsigned i = 0, e = Instr->getNumOperands(); i != e; ++i) {
2699 Value *Op = Instr->getOperand(i);
2700 if (isa<Argument>(Op) || isa<Constant>(Op) || isa<GlobalValue>(Op))
2701 continue;
2702 // This could be a newly inserted instruction, in which case, we won't
2703 // find a value number, and should give up before we hurt ourselves.
2704 // FIXME: Rewrite the infrastructure to let it easier to value number
2705 // and process newly inserted instructions.
2706 if (!VN.exists(Op)) {
2707 success = false;
2708 break;
2709 }
2710 uint32_t TValNo =
2711 VN.phiTranslate(Pred, Curr, VN.lookup(Op), *this);
2712 if (Value *V = findLeader(Pred, TValNo)) {
2713 Instr->setOperand(i, V);
2714 } else {
2715 success = false;
2716 break;
2717 }
2718 }
2719
2720 // Fail out if we encounter an operand that is not available in
2721 // the PRE predecessor. This is typically because of loads which
2722 // are not value numbered precisely.
2723 if (!success)
2724 return false;
2725
2726 Instr->insertBefore(Pred->getTerminator());
2727 Instr->setName(Instr->getName() + ".pre");
2728 Instr->setDebugLoc(Instr->getDebugLoc());
2729
2730 ICF->insertInstructionTo(Instr, Pred);
2731
2732 unsigned Num = VN.lookupOrAdd(Instr);
2733 VN.add(Instr, Num);
2734
2735 // Update the availability map to include the new instruction.
2736 addToLeaderTable(Num, Instr, Pred);
2737 return true;
2738 }
2739
performScalarPRE(Instruction * CurInst)2740 bool GVNPass::performScalarPRE(Instruction *CurInst) {
2741 if (isa<AllocaInst>(CurInst) || CurInst->isTerminator() ||
2742 isa<PHINode>(CurInst) || CurInst->getType()->isVoidTy() ||
2743 CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
2744 isa<DbgInfoIntrinsic>(CurInst))
2745 return false;
2746
2747 // Don't do PRE on compares. The PHI would prevent CodeGenPrepare from
2748 // sinking the compare again, and it would force the code generator to
2749 // move the i1 from processor flags or predicate registers into a general
2750 // purpose register.
2751 if (isa<CmpInst>(CurInst))
2752 return false;
2753
2754 // Don't do PRE on GEPs. The inserted PHI would prevent CodeGenPrepare from
2755 // sinking the addressing mode computation back to its uses. Extending the
2756 // GEP's live range increases the register pressure, and therefore it can
2757 // introduce unnecessary spills.
2758 //
2759 // This doesn't prevent Load PRE. PHI translation will make the GEP available
2760 // to the load by moving it to the predecessor block if necessary.
2761 if (isa<GetElementPtrInst>(CurInst))
2762 return false;
2763
2764 if (auto *CallB = dyn_cast<CallBase>(CurInst)) {
2765 // We don't currently value number ANY inline asm calls.
2766 if (CallB->isInlineAsm())
2767 return false;
2768 // Don't do PRE on convergent calls.
2769 if (CallB->isConvergent())
2770 return false;
2771 }
2772
2773 uint32_t ValNo = VN.lookup(CurInst);
2774
2775 // Look for the predecessors for PRE opportunities. We're
2776 // only trying to solve the basic diamond case, where
2777 // a value is computed in the successor and one predecessor,
2778 // but not the other. We also explicitly disallow cases
2779 // where the successor is its own predecessor, because they're
2780 // more complicated to get right.
2781 unsigned NumWith = 0;
2782 unsigned NumWithout = 0;
2783 BasicBlock *PREPred = nullptr;
2784 BasicBlock *CurrentBlock = CurInst->getParent();
2785
2786 // Update the RPO numbers for this function.
2787 if (InvalidBlockRPONumbers)
2788 assignBlockRPONumber(*CurrentBlock->getParent());
2789
2790 SmallVector<std::pair<Value *, BasicBlock *>, 8> predMap;
2791 for (BasicBlock *P : predecessors(CurrentBlock)) {
2792 // We're not interested in PRE where blocks with predecessors that are
2793 // not reachable.
2794 if (!DT->isReachableFromEntry(P)) {
2795 NumWithout = 2;
2796 break;
2797 }
2798 // It is not safe to do PRE when P->CurrentBlock is a loop backedge.
2799 assert(BlockRPONumber.count(P) && BlockRPONumber.count(CurrentBlock) &&
2800 "Invalid BlockRPONumber map.");
2801 if (BlockRPONumber[P] >= BlockRPONumber[CurrentBlock]) {
2802 NumWithout = 2;
2803 break;
2804 }
2805
2806 uint32_t TValNo = VN.phiTranslate(P, CurrentBlock, ValNo, *this);
2807 Value *predV = findLeader(P, TValNo);
2808 if (!predV) {
2809 predMap.push_back(std::make_pair(static_cast<Value *>(nullptr), P));
2810 PREPred = P;
2811 ++NumWithout;
2812 } else if (predV == CurInst) {
2813 /* CurInst dominates this predecessor. */
2814 NumWithout = 2;
2815 break;
2816 } else {
2817 predMap.push_back(std::make_pair(predV, P));
2818 ++NumWith;
2819 }
2820 }
2821
2822 // Don't do PRE when it might increase code size, i.e. when
2823 // we would need to insert instructions in more than one pred.
2824 if (NumWithout > 1 || NumWith == 0)
2825 return false;
2826
2827 // We may have a case where all predecessors have the instruction,
2828 // and we just need to insert a phi node. Otherwise, perform
2829 // insertion.
2830 Instruction *PREInstr = nullptr;
2831
2832 if (NumWithout != 0) {
2833 if (!isSafeToSpeculativelyExecute(CurInst)) {
2834 // It is only valid to insert a new instruction if the current instruction
2835 // is always executed. An instruction with implicit control flow could
2836 // prevent us from doing it. If we cannot speculate the execution, then
2837 // PRE should be prohibited.
2838 if (ICF->isDominatedByICFIFromSameBlock(CurInst))
2839 return false;
2840 }
2841
2842 // Don't do PRE across indirect branch.
2843 if (isa<IndirectBrInst>(PREPred->getTerminator()))
2844 return false;
2845
2846 // We can't do PRE safely on a critical edge, so instead we schedule
2847 // the edge to be split and perform the PRE the next time we iterate
2848 // on the function.
2849 unsigned SuccNum = GetSuccessorNumber(PREPred, CurrentBlock);
2850 if (isCriticalEdge(PREPred->getTerminator(), SuccNum)) {
2851 toSplit.push_back(std::make_pair(PREPred->getTerminator(), SuccNum));
2852 return false;
2853 }
2854 // We need to insert somewhere, so let's give it a shot
2855 PREInstr = CurInst->clone();
2856 if (!performScalarPREInsertion(PREInstr, PREPred, CurrentBlock, ValNo)) {
2857 // If we failed insertion, make sure we remove the instruction.
2858 LLVM_DEBUG(verifyRemoved(PREInstr));
2859 PREInstr->deleteValue();
2860 return false;
2861 }
2862 }
2863
2864 // Either we should have filled in the PRE instruction, or we should
2865 // not have needed insertions.
2866 assert(PREInstr != nullptr || NumWithout == 0);
2867
2868 ++NumGVNPRE;
2869
2870 // Create a PHI to make the value available in this block.
2871 PHINode *Phi =
2872 PHINode::Create(CurInst->getType(), predMap.size(),
2873 CurInst->getName() + ".pre-phi", &CurrentBlock->front());
2874 for (unsigned i = 0, e = predMap.size(); i != e; ++i) {
2875 if (Value *V = predMap[i].first) {
2876 // If we use an existing value in this phi, we have to patch the original
2877 // value because the phi will be used to replace a later value.
2878 patchReplacementInstruction(CurInst, V);
2879 Phi->addIncoming(V, predMap[i].second);
2880 } else
2881 Phi->addIncoming(PREInstr, PREPred);
2882 }
2883
2884 VN.add(Phi, ValNo);
2885 // After creating a new PHI for ValNo, the phi translate result for ValNo will
2886 // be changed, so erase the related stale entries in phi translate cache.
2887 VN.eraseTranslateCacheEntry(ValNo, *CurrentBlock);
2888 addToLeaderTable(ValNo, Phi, CurrentBlock);
2889 Phi->setDebugLoc(CurInst->getDebugLoc());
2890 CurInst->replaceAllUsesWith(Phi);
2891 if (MD && Phi->getType()->isPtrOrPtrVectorTy())
2892 MD->invalidateCachedPointerInfo(Phi);
2893 VN.erase(CurInst);
2894 removeFromLeaderTable(ValNo, CurInst, CurrentBlock);
2895
2896 LLVM_DEBUG(dbgs() << "GVN PRE removed: " << *CurInst << '\n');
2897 if (MD)
2898 MD->removeInstruction(CurInst);
2899 if (MSSAU)
2900 MSSAU->removeMemoryAccess(CurInst);
2901 LLVM_DEBUG(verifyRemoved(CurInst));
2902 // FIXME: Intended to be markInstructionForDeletion(CurInst), but it causes
2903 // some assertion failures.
2904 ICF->removeInstruction(CurInst);
2905 CurInst->eraseFromParent();
2906 ++NumGVNInstr;
2907
2908 return true;
2909 }
2910
2911 /// Perform a purely local form of PRE that looks for diamond
2912 /// control flow patterns and attempts to perform simple PRE at the join point.
performPRE(Function & F)2913 bool GVNPass::performPRE(Function &F) {
2914 bool Changed = false;
2915 for (BasicBlock *CurrentBlock : depth_first(&F.getEntryBlock())) {
2916 // Nothing to PRE in the entry block.
2917 if (CurrentBlock == &F.getEntryBlock())
2918 continue;
2919
2920 // Don't perform PRE on an EH pad.
2921 if (CurrentBlock->isEHPad())
2922 continue;
2923
2924 for (BasicBlock::iterator BI = CurrentBlock->begin(),
2925 BE = CurrentBlock->end();
2926 BI != BE;) {
2927 Instruction *CurInst = &*BI++;
2928 Changed |= performScalarPRE(CurInst);
2929 }
2930 }
2931
2932 if (splitCriticalEdges())
2933 Changed = true;
2934
2935 return Changed;
2936 }
2937
2938 /// Split the critical edge connecting the given two blocks, and return
2939 /// the block inserted to the critical edge.
splitCriticalEdges(BasicBlock * Pred,BasicBlock * Succ)2940 BasicBlock *GVNPass::splitCriticalEdges(BasicBlock *Pred, BasicBlock *Succ) {
2941 // GVN does not require loop-simplify, do not try to preserve it if it is not
2942 // possible.
2943 BasicBlock *BB = SplitCriticalEdge(
2944 Pred, Succ,
2945 CriticalEdgeSplittingOptions(DT, LI, MSSAU).unsetPreserveLoopSimplify());
2946 if (BB) {
2947 if (MD)
2948 MD->invalidateCachedPredecessors();
2949 InvalidBlockRPONumbers = true;
2950 }
2951 return BB;
2952 }
2953
2954 /// Split critical edges found during the previous
2955 /// iteration that may enable further optimization.
splitCriticalEdges()2956 bool GVNPass::splitCriticalEdges() {
2957 if (toSplit.empty())
2958 return false;
2959
2960 bool Changed = false;
2961 do {
2962 std::pair<Instruction *, unsigned> Edge = toSplit.pop_back_val();
2963 Changed |= SplitCriticalEdge(Edge.first, Edge.second,
2964 CriticalEdgeSplittingOptions(DT, LI, MSSAU)) !=
2965 nullptr;
2966 } while (!toSplit.empty());
2967 if (Changed) {
2968 if (MD)
2969 MD->invalidateCachedPredecessors();
2970 InvalidBlockRPONumbers = true;
2971 }
2972 return Changed;
2973 }
2974
2975 /// Executes one iteration of GVN
iterateOnFunction(Function & F)2976 bool GVNPass::iterateOnFunction(Function &F) {
2977 cleanupGlobalSets();
2978
2979 // Top-down walk of the dominator tree
2980 bool Changed = false;
2981 // Needed for value numbering with phi construction to work.
2982 // RPOT walks the graph in its constructor and will not be invalidated during
2983 // processBlock.
2984 ReversePostOrderTraversal<Function *> RPOT(&F);
2985
2986 for (BasicBlock *BB : RPOT)
2987 Changed |= processBlock(BB);
2988
2989 return Changed;
2990 }
2991
cleanupGlobalSets()2992 void GVNPass::cleanupGlobalSets() {
2993 VN.clear();
2994 LeaderTable.clear();
2995 BlockRPONumber.clear();
2996 TableAllocator.Reset();
2997 ICF->clear();
2998 InvalidBlockRPONumbers = true;
2999 }
3000
3001 /// Verify that the specified instruction does not occur in our
3002 /// internal data structures.
verifyRemoved(const Instruction * Inst) const3003 void GVNPass::verifyRemoved(const Instruction *Inst) const {
3004 VN.verifyRemoved(Inst);
3005
3006 // Walk through the value number scope to make sure the instruction isn't
3007 // ferreted away in it.
3008 for (const auto &I : LeaderTable) {
3009 const LeaderTableEntry *Node = &I.second;
3010 assert(Node->Val != Inst && "Inst still in value numbering scope!");
3011
3012 while (Node->Next) {
3013 Node = Node->Next;
3014 assert(Node->Val != Inst && "Inst still in value numbering scope!");
3015 }
3016 }
3017 }
3018
3019 /// BB is declared dead, which implied other blocks become dead as well. This
3020 /// function is to add all these blocks to "DeadBlocks". For the dead blocks'
3021 /// live successors, update their phi nodes by replacing the operands
3022 /// corresponding to dead blocks with UndefVal.
addDeadBlock(BasicBlock * BB)3023 void GVNPass::addDeadBlock(BasicBlock *BB) {
3024 SmallVector<BasicBlock *, 4> NewDead;
3025 SmallSetVector<BasicBlock *, 4> DF;
3026
3027 NewDead.push_back(BB);
3028 while (!NewDead.empty()) {
3029 BasicBlock *D = NewDead.pop_back_val();
3030 if (DeadBlocks.count(D))
3031 continue;
3032
3033 // All blocks dominated by D are dead.
3034 SmallVector<BasicBlock *, 8> Dom;
3035 DT->getDescendants(D, Dom);
3036 DeadBlocks.insert(Dom.begin(), Dom.end());
3037
3038 // Figure out the dominance-frontier(D).
3039 for (BasicBlock *B : Dom) {
3040 for (BasicBlock *S : successors(B)) {
3041 if (DeadBlocks.count(S))
3042 continue;
3043
3044 bool AllPredDead = true;
3045 for (BasicBlock *P : predecessors(S))
3046 if (!DeadBlocks.count(P)) {
3047 AllPredDead = false;
3048 break;
3049 }
3050
3051 if (!AllPredDead) {
3052 // S could be proved dead later on. That is why we don't update phi
3053 // operands at this moment.
3054 DF.insert(S);
3055 } else {
3056 // While S is not dominated by D, it is dead by now. This could take
3057 // place if S already have a dead predecessor before D is declared
3058 // dead.
3059 NewDead.push_back(S);
3060 }
3061 }
3062 }
3063 }
3064
3065 // For the dead blocks' live successors, update their phi nodes by replacing
3066 // the operands corresponding to dead blocks with UndefVal.
3067 for (BasicBlock *B : DF) {
3068 if (DeadBlocks.count(B))
3069 continue;
3070
3071 // First, split the critical edges. This might also create additional blocks
3072 // to preserve LoopSimplify form and adjust edges accordingly.
3073 SmallVector<BasicBlock *, 4> Preds(predecessors(B));
3074 for (BasicBlock *P : Preds) {
3075 if (!DeadBlocks.count(P))
3076 continue;
3077
3078 if (llvm::is_contained(successors(P), B) &&
3079 isCriticalEdge(P->getTerminator(), B)) {
3080 if (BasicBlock *S = splitCriticalEdges(P, B))
3081 DeadBlocks.insert(P = S);
3082 }
3083 }
3084
3085 // Now poison the incoming values from the dead predecessors.
3086 for (BasicBlock *P : predecessors(B)) {
3087 if (!DeadBlocks.count(P))
3088 continue;
3089 for (PHINode &Phi : B->phis()) {
3090 Phi.setIncomingValueForBlock(P, PoisonValue::get(Phi.getType()));
3091 if (MD)
3092 MD->invalidateCachedPointerInfo(&Phi);
3093 }
3094 }
3095 }
3096 }
3097
3098 // If the given branch is recognized as a foldable branch (i.e. conditional
3099 // branch with constant condition), it will perform following analyses and
3100 // transformation.
3101 // 1) If the dead out-coming edge is a critical-edge, split it. Let
3102 // R be the target of the dead out-coming edge.
3103 // 1) Identify the set of dead blocks implied by the branch's dead outcoming
3104 // edge. The result of this step will be {X| X is dominated by R}
3105 // 2) Identify those blocks which haves at least one dead predecessor. The
3106 // result of this step will be dominance-frontier(R).
3107 // 3) Update the PHIs in DF(R) by replacing the operands corresponding to
3108 // dead blocks with "UndefVal" in an hope these PHIs will optimized away.
3109 //
3110 // Return true iff *NEW* dead code are found.
processFoldableCondBr(BranchInst * BI)3111 bool GVNPass::processFoldableCondBr(BranchInst *BI) {
3112 if (!BI || BI->isUnconditional())
3113 return false;
3114
3115 // If a branch has two identical successors, we cannot declare either dead.
3116 if (BI->getSuccessor(0) == BI->getSuccessor(1))
3117 return false;
3118
3119 ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition());
3120 if (!Cond)
3121 return false;
3122
3123 BasicBlock *DeadRoot =
3124 Cond->getZExtValue() ? BI->getSuccessor(1) : BI->getSuccessor(0);
3125 if (DeadBlocks.count(DeadRoot))
3126 return false;
3127
3128 if (!DeadRoot->getSinglePredecessor())
3129 DeadRoot = splitCriticalEdges(BI->getParent(), DeadRoot);
3130
3131 addDeadBlock(DeadRoot);
3132 return true;
3133 }
3134
3135 // performPRE() will trigger assert if it comes across an instruction without
3136 // associated val-num. As it normally has far more live instructions than dead
3137 // instructions, it makes more sense just to "fabricate" a val-number for the
3138 // dead code than checking if instruction involved is dead or not.
assignValNumForDeadCode()3139 void GVNPass::assignValNumForDeadCode() {
3140 for (BasicBlock *BB : DeadBlocks) {
3141 for (Instruction &Inst : *BB) {
3142 unsigned ValNum = VN.lookupOrAdd(&Inst);
3143 addToLeaderTable(ValNum, &Inst, BB);
3144 }
3145 }
3146 }
3147
3148 class llvm::gvn::GVNLegacyPass : public FunctionPass {
3149 public:
3150 static char ID; // Pass identification, replacement for typeid
3151
GVNLegacyPass(bool NoMemDepAnalysis=!GVNEnableMemDep)3152 explicit GVNLegacyPass(bool NoMemDepAnalysis = !GVNEnableMemDep)
3153 : FunctionPass(ID), Impl(GVNOptions().setMemDep(!NoMemDepAnalysis)) {
3154 initializeGVNLegacyPassPass(*PassRegistry::getPassRegistry());
3155 }
3156
runOnFunction(Function & F)3157 bool runOnFunction(Function &F) override {
3158 if (skipFunction(F))
3159 return false;
3160
3161 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
3162
3163 auto *MSSAWP = getAnalysisIfAvailable<MemorySSAWrapperPass>();
3164 return Impl.runImpl(
3165 F, getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
3166 getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
3167 getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
3168 getAnalysis<AAResultsWrapperPass>().getAAResults(),
3169 Impl.isMemDepEnabled()
3170 ? &getAnalysis<MemoryDependenceWrapperPass>().getMemDep()
3171 : nullptr,
3172 LIWP ? &LIWP->getLoopInfo() : nullptr,
3173 &getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE(),
3174 MSSAWP ? &MSSAWP->getMSSA() : nullptr);
3175 }
3176
getAnalysisUsage(AnalysisUsage & AU) const3177 void getAnalysisUsage(AnalysisUsage &AU) const override {
3178 AU.addRequired<AssumptionCacheTracker>();
3179 AU.addRequired<DominatorTreeWrapperPass>();
3180 AU.addRequired<TargetLibraryInfoWrapperPass>();
3181 AU.addRequired<LoopInfoWrapperPass>();
3182 if (Impl.isMemDepEnabled())
3183 AU.addRequired<MemoryDependenceWrapperPass>();
3184 AU.addRequired<AAResultsWrapperPass>();
3185 AU.addPreserved<DominatorTreeWrapperPass>();
3186 AU.addPreserved<GlobalsAAWrapperPass>();
3187 AU.addPreserved<TargetLibraryInfoWrapperPass>();
3188 AU.addPreserved<LoopInfoWrapperPass>();
3189 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
3190 AU.addPreserved<MemorySSAWrapperPass>();
3191 }
3192
3193 private:
3194 GVNPass Impl;
3195 };
3196
3197 char GVNLegacyPass::ID = 0;
3198
3199 INITIALIZE_PASS_BEGIN(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)3200 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
3201 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
3202 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3203 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3204 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
3205 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
3206 INITIALIZE_PASS_DEPENDENCY(OptimizationRemarkEmitterWrapperPass)
3207 INITIALIZE_PASS_END(GVNLegacyPass, "gvn", "Global Value Numbering", false, false)
3208
3209 // The public interface to this file...
3210 FunctionPass *llvm::createGVNPass(bool NoMemDepAnalysis) {
3211 return new GVNLegacyPass(NoMemDepAnalysis);
3212 }
3213