1 //===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// \file 10 /// This file implements the new LLVM's Global Value Numbering pass. 11 /// GVN partitions values computed by a function into congruence classes. 12 /// Values ending up in the same congruence class are guaranteed to be the same 13 /// for every execution of the program. In that respect, congruency is a 14 /// compile-time approximation of equivalence of values at runtime. 15 /// The algorithm implemented here uses a sparse formulation and it's based 16 /// on the ideas described in the paper: 17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from 18 /// Karthik Gargi. 19 /// 20 /// A brief overview of the algorithm: The algorithm is essentially the same as 21 /// the standard RPO value numbering algorithm (a good reference is the paper 22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference: 23 /// The RPO algorithm proceeds, on every iteration, to process every reachable 24 /// block and every instruction in that block. This is because the standard RPO 25 /// algorithm does not track what things have the same value number, it only 26 /// tracks what the value number of a given operation is (the mapping is 27 /// operation -> value number). Thus, when a value number of an operation 28 /// changes, it must reprocess everything to ensure all uses of a value number 29 /// get updated properly. In constrast, the sparse algorithm we use *also* 30 /// tracks what operations have a given value number (IE it also tracks the 31 /// reverse mapping from value number -> operations with that value number), so 32 /// that it only needs to reprocess the instructions that are affected when 33 /// something's value number changes. The vast majority of complexity and code 34 /// in this file is devoted to tracking what value numbers could change for what 35 /// instructions when various things happen. The rest of the algorithm is 36 /// devoted to performing symbolic evaluation, forward propagation, and 37 /// simplification of operations based on the value numbers deduced so far 38 /// 39 /// In order to make the GVN mostly-complete, we use a technique derived from 40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time 41 /// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA 42 /// based GVN algorithms is related to their inability to detect equivalence 43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)). 44 /// We resolve this issue by generating the equivalent "phi of ops" form for 45 /// each op of phis we see, in a way that only takes polynomial time to resolve. 46 /// 47 /// We also do not perform elimination by using any published algorithm. All 48 /// published algorithms are O(Instructions). Instead, we use a technique that 49 /// is O(number of operations with the same value number), enabling us to skip 50 /// trying to eliminate things that have unique value numbers. 51 // 52 //===----------------------------------------------------------------------===// 53 54 #include "llvm/Transforms/Scalar/NewGVN.h" 55 #include "llvm/ADT/ArrayRef.h" 56 #include "llvm/ADT/BitVector.h" 57 #include "llvm/ADT/DenseMap.h" 58 #include "llvm/ADT/DenseMapInfo.h" 59 #include "llvm/ADT/DenseSet.h" 60 #include "llvm/ADT/DepthFirstIterator.h" 61 #include "llvm/ADT/GraphTraits.h" 62 #include "llvm/ADT/Hashing.h" 63 #include "llvm/ADT/PointerIntPair.h" 64 #include "llvm/ADT/PostOrderIterator.h" 65 #include "llvm/ADT/SetOperations.h" 66 #include "llvm/ADT/SmallPtrSet.h" 67 #include "llvm/ADT/SmallVector.h" 68 #include "llvm/ADT/SparseBitVector.h" 69 #include "llvm/ADT/Statistic.h" 70 #include "llvm/ADT/iterator_range.h" 71 #include "llvm/Analysis/AliasAnalysis.h" 72 #include "llvm/Analysis/AssumptionCache.h" 73 #include "llvm/Analysis/CFGPrinter.h" 74 #include "llvm/Analysis/ConstantFolding.h" 75 #include "llvm/Analysis/GlobalsModRef.h" 76 #include "llvm/Analysis/InstructionSimplify.h" 77 #include "llvm/Analysis/MemoryBuiltins.h" 78 #include "llvm/Analysis/MemorySSA.h" 79 #include "llvm/Analysis/TargetLibraryInfo.h" 80 #include "llvm/Analysis/ValueTracking.h" 81 #include "llvm/IR/Argument.h" 82 #include "llvm/IR/BasicBlock.h" 83 #include "llvm/IR/Constant.h" 84 #include "llvm/IR/Constants.h" 85 #include "llvm/IR/Dominators.h" 86 #include "llvm/IR/Function.h" 87 #include "llvm/IR/InstrTypes.h" 88 #include "llvm/IR/Instruction.h" 89 #include "llvm/IR/Instructions.h" 90 #include "llvm/IR/IntrinsicInst.h" 91 #include "llvm/IR/PatternMatch.h" 92 #include "llvm/IR/Type.h" 93 #include "llvm/IR/Use.h" 94 #include "llvm/IR/User.h" 95 #include "llvm/IR/Value.h" 96 #include "llvm/Support/Allocator.h" 97 #include "llvm/Support/ArrayRecycler.h" 98 #include "llvm/Support/Casting.h" 99 #include "llvm/Support/CommandLine.h" 100 #include "llvm/Support/Debug.h" 101 #include "llvm/Support/DebugCounter.h" 102 #include "llvm/Support/ErrorHandling.h" 103 #include "llvm/Support/PointerLikeTypeTraits.h" 104 #include "llvm/Support/raw_ostream.h" 105 #include "llvm/Transforms/Scalar/GVNExpression.h" 106 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" 107 #include "llvm/Transforms/Utils/Local.h" 108 #include "llvm/Transforms/Utils/PredicateInfo.h" 109 #include "llvm/Transforms/Utils/VNCoercion.h" 110 #include <algorithm> 111 #include <cassert> 112 #include <cstdint> 113 #include <iterator> 114 #include <map> 115 #include <memory> 116 #include <set> 117 #include <string> 118 #include <tuple> 119 #include <utility> 120 #include <vector> 121 122 using namespace llvm; 123 using namespace llvm::GVNExpression; 124 using namespace llvm::VNCoercion; 125 using namespace llvm::PatternMatch; 126 127 #define DEBUG_TYPE "newgvn" 128 129 STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted"); 130 STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted"); 131 STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified"); 132 STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same"); 133 STATISTIC(NumGVNMaxIterations, 134 "Maximum Number of iterations it took to converge GVN"); 135 STATISTIC(NumGVNLeaderChanges, "Number of leader changes"); 136 STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes"); 137 STATISTIC(NumGVNAvoidedSortedLeaderChanges, 138 "Number of avoided sorted leader changes"); 139 STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated"); 140 STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created"); 141 STATISTIC(NumGVNPHIOfOpsEliminations, 142 "Number of things eliminated using PHI of ops"); 143 DEBUG_COUNTER(VNCounter, "newgvn-vn", 144 "Controls which instructions are value numbered"); 145 DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi", 146 "Controls which instructions we create phi of ops for"); 147 // Currently store defining access refinement is too slow due to basicaa being 148 // egregiously slow. This flag lets us keep it working while we work on this 149 // issue. 150 static cl::opt<bool> EnableStoreRefinement("enable-store-refinement", 151 cl::init(false), cl::Hidden); 152 153 /// Currently, the generation "phi of ops" can result in correctness issues. 154 static cl::opt<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(true), 155 cl::Hidden); 156 157 //===----------------------------------------------------------------------===// 158 // GVN Pass 159 //===----------------------------------------------------------------------===// 160 161 // Anchor methods. 162 namespace llvm { 163 namespace GVNExpression { 164 165 Expression::~Expression() = default; 166 BasicExpression::~BasicExpression() = default; 167 CallExpression::~CallExpression() = default; 168 LoadExpression::~LoadExpression() = default; 169 StoreExpression::~StoreExpression() = default; 170 AggregateValueExpression::~AggregateValueExpression() = default; 171 PHIExpression::~PHIExpression() = default; 172 173 } // end namespace GVNExpression 174 } // end namespace llvm 175 176 namespace { 177 178 // Tarjan's SCC finding algorithm with Nuutila's improvements 179 // SCCIterator is actually fairly complex for the simple thing we want. 180 // It also wants to hand us SCC's that are unrelated to the phi node we ask 181 // about, and have us process them there or risk redoing work. 182 // Graph traits over a filter iterator also doesn't work that well here. 183 // This SCC finder is specialized to walk use-def chains, and only follows 184 // instructions, 185 // not generic values (arguments, etc). 186 struct TarjanSCC { 187 TarjanSCC() : Components(1) {} 188 189 void Start(const Instruction *Start) { 190 if (Root.lookup(Start) == 0) 191 FindSCC(Start); 192 } 193 194 const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const { 195 unsigned ComponentID = ValueToComponent.lookup(V); 196 197 assert(ComponentID > 0 && 198 "Asking for a component for a value we never processed"); 199 return Components[ComponentID]; 200 } 201 202 private: 203 void FindSCC(const Instruction *I) { 204 Root[I] = ++DFSNum; 205 // Store the DFS Number we had before it possibly gets incremented. 206 unsigned int OurDFS = DFSNum; 207 for (const auto &Op : I->operands()) { 208 if (auto *InstOp = dyn_cast<Instruction>(Op)) { 209 if (Root.lookup(Op) == 0) 210 FindSCC(InstOp); 211 if (!InComponent.count(Op)) 212 Root[I] = std::min(Root.lookup(I), Root.lookup(Op)); 213 } 214 } 215 // See if we really were the root of a component, by seeing if we still have 216 // our DFSNumber. If we do, we are the root of the component, and we have 217 // completed a component. If we do not, we are not the root of a component, 218 // and belong on the component stack. 219 if (Root.lookup(I) == OurDFS) { 220 unsigned ComponentID = Components.size(); 221 Components.resize(Components.size() + 1); 222 auto &Component = Components.back(); 223 Component.insert(I); 224 LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n"); 225 InComponent.insert(I); 226 ValueToComponent[I] = ComponentID; 227 // Pop a component off the stack and label it. 228 while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) { 229 auto *Member = Stack.back(); 230 LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n"); 231 Component.insert(Member); 232 InComponent.insert(Member); 233 ValueToComponent[Member] = ComponentID; 234 Stack.pop_back(); 235 } 236 } else { 237 // Part of a component, push to stack 238 Stack.push_back(I); 239 } 240 } 241 242 unsigned int DFSNum = 1; 243 SmallPtrSet<const Value *, 8> InComponent; 244 DenseMap<const Value *, unsigned int> Root; 245 SmallVector<const Value *, 8> Stack; 246 247 // Store the components as vector of ptr sets, because we need the topo order 248 // of SCC's, but not individual member order 249 SmallVector<SmallPtrSet<const Value *, 8>, 8> Components; 250 251 DenseMap<const Value *, unsigned> ValueToComponent; 252 }; 253 254 // Congruence classes represent the set of expressions/instructions 255 // that are all the same *during some scope in the function*. 256 // That is, because of the way we perform equality propagation, and 257 // because of memory value numbering, it is not correct to assume 258 // you can willy-nilly replace any member with any other at any 259 // point in the function. 260 // 261 // For any Value in the Member set, it is valid to replace any dominated member 262 // with that Value. 263 // 264 // Every congruence class has a leader, and the leader is used to symbolize 265 // instructions in a canonical way (IE every operand of an instruction that is a 266 // member of the same congruence class will always be replaced with leader 267 // during symbolization). To simplify symbolization, we keep the leader as a 268 // constant if class can be proved to be a constant value. Otherwise, the 269 // leader is the member of the value set with the smallest DFS number. Each 270 // congruence class also has a defining expression, though the expression may be 271 // null. If it exists, it can be used for forward propagation and reassociation 272 // of values. 273 274 // For memory, we also track a representative MemoryAccess, and a set of memory 275 // members for MemoryPhis (which have no real instructions). Note that for 276 // memory, it seems tempting to try to split the memory members into a 277 // MemoryCongruenceClass or something. Unfortunately, this does not work 278 // easily. The value numbering of a given memory expression depends on the 279 // leader of the memory congruence class, and the leader of memory congruence 280 // class depends on the value numbering of a given memory expression. This 281 // leads to wasted propagation, and in some cases, missed optimization. For 282 // example: If we had value numbered two stores together before, but now do not, 283 // we move them to a new value congruence class. This in turn will move at one 284 // of the memorydefs to a new memory congruence class. Which in turn, affects 285 // the value numbering of the stores we just value numbered (because the memory 286 // congruence class is part of the value number). So while theoretically 287 // possible to split them up, it turns out to be *incredibly* complicated to get 288 // it to work right, because of the interdependency. While structurally 289 // slightly messier, it is algorithmically much simpler and faster to do what we 290 // do here, and track them both at once in the same class. 291 // Note: The default iterators for this class iterate over values 292 class CongruenceClass { 293 public: 294 using MemberType = Value; 295 using MemberSet = SmallPtrSet<MemberType *, 4>; 296 using MemoryMemberType = MemoryPhi; 297 using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>; 298 299 explicit CongruenceClass(unsigned ID) : ID(ID) {} 300 CongruenceClass(unsigned ID, std::pair<Value *, unsigned int> Leader, 301 const Expression *E) 302 : ID(ID), RepLeader(Leader), DefiningExpr(E) {} 303 304 unsigned getID() const { return ID; } 305 306 // True if this class has no members left. This is mainly used for assertion 307 // purposes, and for skipping empty classes. 308 bool isDead() const { 309 // If it's both dead from a value perspective, and dead from a memory 310 // perspective, it's really dead. 311 return empty() && memory_empty(); 312 } 313 314 // Leader functions 315 Value *getLeader() const { return RepLeader.first; } 316 void setLeader(std::pair<Value *, unsigned int> Leader) { 317 RepLeader = Leader; 318 } 319 const std::pair<Value *, unsigned int> &getNextLeader() const { 320 return NextLeader; 321 } 322 void resetNextLeader() { NextLeader = {nullptr, ~0}; } 323 bool addPossibleLeader(std::pair<Value *, unsigned int> LeaderPair) { 324 if (LeaderPair.second < RepLeader.second) { 325 NextLeader = RepLeader; 326 RepLeader = LeaderPair; 327 return true; 328 } else if (LeaderPair.second < NextLeader.second) { 329 NextLeader = LeaderPair; 330 } 331 return false; 332 } 333 334 Value *getStoredValue() const { return RepStoredValue; } 335 void setStoredValue(Value *Leader) { RepStoredValue = Leader; } 336 const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; } 337 void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; } 338 339 // Forward propagation info 340 const Expression *getDefiningExpr() const { return DefiningExpr; } 341 342 // Value member set 343 bool empty() const { return Members.empty(); } 344 unsigned size() const { return Members.size(); } 345 MemberSet::const_iterator begin() const { return Members.begin(); } 346 MemberSet::const_iterator end() const { return Members.end(); } 347 void insert(MemberType *M) { Members.insert(M); } 348 void erase(MemberType *M) { Members.erase(M); } 349 void swap(MemberSet &Other) { Members.swap(Other); } 350 351 // Memory member set 352 bool memory_empty() const { return MemoryMembers.empty(); } 353 unsigned memory_size() const { return MemoryMembers.size(); } 354 MemoryMemberSet::const_iterator memory_begin() const { 355 return MemoryMembers.begin(); 356 } 357 MemoryMemberSet::const_iterator memory_end() const { 358 return MemoryMembers.end(); 359 } 360 iterator_range<MemoryMemberSet::const_iterator> memory() const { 361 return make_range(memory_begin(), memory_end()); 362 } 363 364 void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(M); } 365 void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(M); } 366 367 // Store count 368 unsigned getStoreCount() const { return StoreCount; } 369 void incStoreCount() { ++StoreCount; } 370 void decStoreCount() { 371 assert(StoreCount != 0 && "Store count went negative"); 372 --StoreCount; 373 } 374 375 // True if this class has no memory members. 376 bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); } 377 378 // Return true if two congruence classes are equivalent to each other. This 379 // means that every field but the ID number and the dead field are equivalent. 380 bool isEquivalentTo(const CongruenceClass *Other) const { 381 if (!Other) 382 return false; 383 if (this == Other) 384 return true; 385 386 if (std::tie(StoreCount, RepLeader, RepStoredValue, RepMemoryAccess) != 387 std::tie(Other->StoreCount, Other->RepLeader, Other->RepStoredValue, 388 Other->RepMemoryAccess)) 389 return false; 390 if (DefiningExpr != Other->DefiningExpr) 391 if (!DefiningExpr || !Other->DefiningExpr || 392 *DefiningExpr != *Other->DefiningExpr) 393 return false; 394 395 if (Members.size() != Other->Members.size()) 396 return false; 397 398 return llvm::set_is_subset(Members, Other->Members); 399 } 400 401 private: 402 unsigned ID; 403 404 // Representative leader and its corresponding RPO number. 405 // The leader must have the lowest RPO number. 406 std::pair<Value *, unsigned int> RepLeader = {nullptr, ~0U}; 407 408 // The most dominating leader after our current leader (given by the RPO 409 // number), because the member set is not sorted and is expensive to keep 410 // sorted all the time. 411 std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U}; 412 413 // If this is represented by a store, the value of the store. 414 Value *RepStoredValue = nullptr; 415 416 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory 417 // access. 418 const MemoryAccess *RepMemoryAccess = nullptr; 419 420 // Defining Expression. 421 const Expression *DefiningExpr = nullptr; 422 423 // Actual members of this class. 424 MemberSet Members; 425 426 // This is the set of MemoryPhis that exist in the class. MemoryDefs and 427 // MemoryUses have real instructions representing them, so we only need to 428 // track MemoryPhis here. 429 MemoryMemberSet MemoryMembers; 430 431 // Number of stores in this congruence class. 432 // This is used so we can detect store equivalence changes properly. 433 int StoreCount = 0; 434 }; 435 436 } // end anonymous namespace 437 438 namespace llvm { 439 440 struct ExactEqualsExpression { 441 const Expression &E; 442 443 explicit ExactEqualsExpression(const Expression &E) : E(E) {} 444 445 hash_code getComputedHash() const { return E.getComputedHash(); } 446 447 bool operator==(const Expression &Other) const { 448 return E.exactlyEquals(Other); 449 } 450 }; 451 452 template <> struct DenseMapInfo<const Expression *> { 453 static const Expression *getEmptyKey() { 454 auto Val = static_cast<uintptr_t>(-1); 455 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable; 456 return reinterpret_cast<const Expression *>(Val); 457 } 458 459 static const Expression *getTombstoneKey() { 460 auto Val = static_cast<uintptr_t>(~1U); 461 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable; 462 return reinterpret_cast<const Expression *>(Val); 463 } 464 465 static unsigned getHashValue(const Expression *E) { 466 return E->getComputedHash(); 467 } 468 469 static unsigned getHashValue(const ExactEqualsExpression &E) { 470 return E.getComputedHash(); 471 } 472 473 static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) { 474 if (RHS == getTombstoneKey() || RHS == getEmptyKey()) 475 return false; 476 return LHS == *RHS; 477 } 478 479 static bool isEqual(const Expression *LHS, const Expression *RHS) { 480 if (LHS == RHS) 481 return true; 482 if (LHS == getTombstoneKey() || RHS == getTombstoneKey() || 483 LHS == getEmptyKey() || RHS == getEmptyKey()) 484 return false; 485 // Compare hashes before equality. This is *not* what the hashtable does, 486 // since it is computing it modulo the number of buckets, whereas we are 487 // using the full hash keyspace. Since the hashes are precomputed, this 488 // check is *much* faster than equality. 489 if (LHS->getComputedHash() != RHS->getComputedHash()) 490 return false; 491 return *LHS == *RHS; 492 } 493 }; 494 495 } // end namespace llvm 496 497 namespace { 498 499 class NewGVN { 500 Function &F; 501 DominatorTree *DT = nullptr; 502 const TargetLibraryInfo *TLI = nullptr; 503 AliasAnalysis *AA = nullptr; 504 MemorySSA *MSSA = nullptr; 505 MemorySSAWalker *MSSAWalker = nullptr; 506 AssumptionCache *AC = nullptr; 507 const DataLayout &DL; 508 std::unique_ptr<PredicateInfo> PredInfo; 509 510 // These are the only two things the create* functions should have 511 // side-effects on due to allocating memory. 512 mutable BumpPtrAllocator ExpressionAllocator; 513 mutable ArrayRecycler<Value *> ArgRecycler; 514 mutable TarjanSCC SCCFinder; 515 const SimplifyQuery SQ; 516 517 // Number of function arguments, used by ranking 518 unsigned int NumFuncArgs = 0; 519 520 // RPOOrdering of basic blocks 521 DenseMap<const DomTreeNode *, unsigned> RPOOrdering; 522 523 // Congruence class info. 524 525 // This class is called INITIAL in the paper. It is the class everything 526 // startsout in, and represents any value. Being an optimistic analysis, 527 // anything in the TOP class has the value TOP, which is indeterminate and 528 // equivalent to everything. 529 CongruenceClass *TOPClass = nullptr; 530 std::vector<CongruenceClass *> CongruenceClasses; 531 unsigned NextCongruenceNum = 0; 532 533 // Value Mappings. 534 DenseMap<Value *, CongruenceClass *> ValueToClass; 535 DenseMap<Value *, const Expression *> ValueToExpression; 536 537 // Value PHI handling, used to make equivalence between phi(op, op) and 538 // op(phi, phi). 539 // These mappings just store various data that would normally be part of the 540 // IR. 541 SmallPtrSet<const Instruction *, 8> PHINodeUses; 542 543 // The cached results, in general, are only valid for the specific block where 544 // they were computed. The unsigned part of the key is a unique block 545 // identifier 546 DenseMap<std::pair<const Value *, unsigned>, bool> OpSafeForPHIOfOps; 547 unsigned CacheIdx; 548 549 // Map a temporary instruction we created to a parent block. 550 DenseMap<const Value *, BasicBlock *> TempToBlock; 551 552 // Map between the already in-program instructions and the temporary phis we 553 // created that they are known equivalent to. 554 DenseMap<const Value *, PHINode *> RealToTemp; 555 556 // In order to know when we should re-process instructions that have 557 // phi-of-ops, we track the set of expressions that they needed as 558 // leaders. When we discover new leaders for those expressions, we process the 559 // associated phi-of-op instructions again in case they have changed. The 560 // other way they may change is if they had leaders, and those leaders 561 // disappear. However, at the point they have leaders, there are uses of the 562 // relevant operands in the created phi node, and so they will get reprocessed 563 // through the normal user marking we perform. 564 mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers; 565 DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>> 566 ExpressionToPhiOfOps; 567 568 // Map from temporary operation to MemoryAccess. 569 DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory; 570 571 // Set of all temporary instructions we created. 572 // Note: This will include instructions that were just created during value 573 // numbering. The way to test if something is using them is to check 574 // RealToTemp. 575 DenseSet<Instruction *> AllTempInstructions; 576 577 // This is the set of instructions to revisit on a reachability change. At 578 // the end of the main iteration loop it will contain at least all the phi of 579 // ops instructions that will be changed to phis, as well as regular phis. 580 // During the iteration loop, it may contain other things, such as phi of ops 581 // instructions that used edge reachability to reach a result, and so need to 582 // be revisited when the edge changes, independent of whether the phi they 583 // depended on changes. 584 DenseMap<BasicBlock *, SparseBitVector<>> RevisitOnReachabilityChange; 585 586 // Mapping from predicate info we used to the instructions we used it with. 587 // In order to correctly ensure propagation, we must keep track of what 588 // comparisons we used, so that when the values of the comparisons change, we 589 // propagate the information to the places we used the comparison. 590 mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>> 591 PredicateToUsers; 592 593 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for 594 // stores, we no longer can rely solely on the def-use chains of MemorySSA. 595 mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>> 596 MemoryToUsers; 597 598 // A table storing which memorydefs/phis represent a memory state provably 599 // equivalent to another memory state. 600 // We could use the congruence class machinery, but the MemoryAccess's are 601 // abstract memory states, so they can only ever be equivalent to each other, 602 // and not to constants, etc. 603 DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass; 604 605 // We could, if we wanted, build MemoryPhiExpressions and 606 // MemoryVariableExpressions, etc, and value number them the same way we value 607 // number phi expressions. For the moment, this seems like overkill. They 608 // can only exist in one of three states: they can be TOP (equal to 609 // everything), Equivalent to something else, or unique. Because we do not 610 // create expressions for them, we need to simulate leader change not just 611 // when they change class, but when they change state. Note: We can do the 612 // same thing for phis, and avoid having phi expressions if we wanted, We 613 // should eventually unify in one direction or the other, so this is a little 614 // bit of an experiment in which turns out easier to maintain. 615 enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique }; 616 DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState; 617 618 enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle }; 619 mutable DenseMap<const Instruction *, InstCycleState> InstCycleState; 620 621 // Expression to class mapping. 622 using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>; 623 ExpressionClassMap ExpressionToClass; 624 625 // We have a single expression that represents currently DeadExpressions. 626 // For dead expressions we can prove will stay dead, we mark them with 627 // DFS number zero. However, it's possible in the case of phi nodes 628 // for us to assume/prove all arguments are dead during fixpointing. 629 // We use DeadExpression for that case. 630 DeadExpression *SingletonDeadExpression = nullptr; 631 632 // Which values have changed as a result of leader changes. 633 SmallPtrSet<Value *, 8> LeaderChanges; 634 635 // Reachability info. 636 using BlockEdge = BasicBlockEdge; 637 DenseSet<BlockEdge> ReachableEdges; 638 SmallPtrSet<const BasicBlock *, 8> ReachableBlocks; 639 640 // This is a bitvector because, on larger functions, we may have 641 // thousands of touched instructions at once (entire blocks, 642 // instructions with hundreds of uses, etc). Even with optimization 643 // for when we mark whole blocks as touched, when this was a 644 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all 645 // the time in GVN just managing this list. The bitvector, on the 646 // other hand, efficiently supports test/set/clear of both 647 // individual and ranges, as well as "find next element" This 648 // enables us to use it as a worklist with essentially 0 cost. 649 BitVector TouchedInstructions; 650 651 DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange; 652 mutable DenseMap<const IntrinsicInst *, const Value *> IntrinsicInstPred; 653 654 #ifndef NDEBUG 655 // Debugging for how many times each block and instruction got processed. 656 DenseMap<const Value *, unsigned> ProcessedCount; 657 #endif 658 659 // DFS info. 660 // This contains a mapping from Instructions to DFS numbers. 661 // The numbering starts at 1. An instruction with DFS number zero 662 // means that the instruction is dead. 663 DenseMap<const Value *, unsigned> InstrDFS; 664 665 // This contains the mapping DFS numbers to instructions. 666 SmallVector<Value *, 32> DFSToInstr; 667 668 // Deletion info. 669 SmallPtrSet<Instruction *, 8> InstructionsToErase; 670 671 public: 672 NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC, 673 TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA, 674 const DataLayout &DL) 675 : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), AC(AC), DL(DL), 676 PredInfo(std::make_unique<PredicateInfo>(F, *DT, *AC)), 677 SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false, 678 /*CanUseUndef=*/false) {} 679 680 bool runGVN(); 681 682 private: 683 /// Helper struct return a Expression with an optional extra dependency. 684 struct ExprResult { 685 const Expression *Expr; 686 Value *ExtraDep; 687 const PredicateBase *PredDep; 688 689 ExprResult(const Expression *Expr, Value *ExtraDep = nullptr, 690 const PredicateBase *PredDep = nullptr) 691 : Expr(Expr), ExtraDep(ExtraDep), PredDep(PredDep) {} 692 ExprResult(const ExprResult &) = delete; 693 ExprResult(ExprResult &&Other) 694 : Expr(Other.Expr), ExtraDep(Other.ExtraDep), PredDep(Other.PredDep) { 695 Other.Expr = nullptr; 696 Other.ExtraDep = nullptr; 697 Other.PredDep = nullptr; 698 } 699 ExprResult &operator=(const ExprResult &Other) = delete; 700 ExprResult &operator=(ExprResult &&Other) = delete; 701 702 ~ExprResult() { assert(!ExtraDep && "unhandled ExtraDep"); } 703 704 operator bool() const { return Expr; } 705 706 static ExprResult none() { return {nullptr, nullptr, nullptr}; } 707 static ExprResult some(const Expression *Expr, Value *ExtraDep = nullptr) { 708 return {Expr, ExtraDep, nullptr}; 709 } 710 static ExprResult some(const Expression *Expr, 711 const PredicateBase *PredDep) { 712 return {Expr, nullptr, PredDep}; 713 } 714 static ExprResult some(const Expression *Expr, Value *ExtraDep, 715 const PredicateBase *PredDep) { 716 return {Expr, ExtraDep, PredDep}; 717 } 718 }; 719 720 // Expression handling. 721 ExprResult createExpression(Instruction *) const; 722 const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *, 723 Instruction *) const; 724 725 // Our canonical form for phi arguments is a pair of incoming value, incoming 726 // basic block. 727 using ValPair = std::pair<Value *, BasicBlock *>; 728 729 PHIExpression *createPHIExpression(ArrayRef<ValPair>, const Instruction *, 730 BasicBlock *, bool &HasBackEdge, 731 bool &OriginalOpsConstant) const; 732 const DeadExpression *createDeadExpression() const; 733 const VariableExpression *createVariableExpression(Value *) const; 734 const ConstantExpression *createConstantExpression(Constant *) const; 735 const Expression *createVariableOrConstant(Value *V) const; 736 const UnknownExpression *createUnknownExpression(Instruction *) const; 737 const StoreExpression *createStoreExpression(StoreInst *, 738 const MemoryAccess *) const; 739 LoadExpression *createLoadExpression(Type *, Value *, LoadInst *, 740 const MemoryAccess *) const; 741 const CallExpression *createCallExpression(CallInst *, 742 const MemoryAccess *) const; 743 const AggregateValueExpression * 744 createAggregateValueExpression(Instruction *) const; 745 bool setBasicExpressionInfo(Instruction *, BasicExpression *) const; 746 747 // Congruence class handling. 748 CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) { 749 // Set RPO to 0 for values that are always available (constants and function 750 // args). These should always be made leader. 751 unsigned LeaderDFS = 0; 752 753 // If Leader is not specified, either we have a memory class or the leader 754 // will be set later. Otherwise, if Leader is an Instruction, set LeaderDFS 755 // to its RPO number. 756 if (!Leader) 757 LeaderDFS = ~0; 758 else if (auto *I = dyn_cast<Instruction>(Leader)) 759 LeaderDFS = InstrToDFSNum(I); 760 auto *result = 761 new CongruenceClass(NextCongruenceNum++, {Leader, LeaderDFS}, E); 762 CongruenceClasses.emplace_back(result); 763 return result; 764 } 765 766 CongruenceClass *createMemoryClass(MemoryAccess *MA) { 767 auto *CC = createCongruenceClass(nullptr, nullptr); 768 CC->setMemoryLeader(MA); 769 return CC; 770 } 771 772 CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) { 773 auto *CC = getMemoryClass(MA); 774 if (CC->getMemoryLeader() != MA) 775 CC = createMemoryClass(MA); 776 return CC; 777 } 778 779 CongruenceClass *createSingletonCongruenceClass(Value *Member) { 780 CongruenceClass *CClass = createCongruenceClass(Member, nullptr); 781 CClass->insert(Member); 782 ValueToClass[Member] = CClass; 783 return CClass; 784 } 785 786 void initializeCongruenceClasses(Function &F); 787 const Expression *makePossiblePHIOfOps(Instruction *, 788 SmallPtrSetImpl<Value *> &); 789 Value *findLeaderForInst(Instruction *ValueOp, 790 SmallPtrSetImpl<Value *> &Visited, 791 MemoryAccess *MemAccess, Instruction *OrigInst, 792 BasicBlock *PredBB); 793 bool OpIsSafeForPHIOfOps(Value *Op, const BasicBlock *PHIBlock, 794 SmallPtrSetImpl<const Value *> &); 795 void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue); 796 void removePhiOfOps(Instruction *I, PHINode *PHITemp); 797 798 // Value number an Instruction or MemoryPhi. 799 void valueNumberMemoryPhi(MemoryPhi *); 800 void valueNumberInstruction(Instruction *); 801 802 // Symbolic evaluation. 803 ExprResult checkExprResults(Expression *, Instruction *, Value *) const; 804 ExprResult performSymbolicEvaluation(Instruction *, 805 SmallPtrSetImpl<Value *> &) const; 806 const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *, 807 Instruction *, 808 MemoryAccess *) const; 809 const Expression *performSymbolicLoadEvaluation(Instruction *) const; 810 const Expression *performSymbolicStoreEvaluation(Instruction *) const; 811 ExprResult performSymbolicCallEvaluation(Instruction *) const; 812 void sortPHIOps(MutableArrayRef<ValPair> Ops) const; 813 const Expression *performSymbolicPHIEvaluation(ArrayRef<ValPair>, 814 Instruction *I, 815 BasicBlock *PHIBlock) const; 816 const Expression *performSymbolicAggrValueEvaluation(Instruction *) const; 817 ExprResult performSymbolicCmpEvaluation(Instruction *) const; 818 ExprResult performSymbolicPredicateInfoEvaluation(IntrinsicInst *) const; 819 820 // Congruence finding. 821 bool someEquivalentDominates(const Instruction *, const Instruction *) const; 822 Value *lookupOperandLeader(Value *) const; 823 CongruenceClass *getClassForExpression(const Expression *E) const; 824 void performCongruenceFinding(Instruction *, const Expression *); 825 void moveValueToNewCongruenceClass(Instruction *, const Expression *, 826 CongruenceClass *, CongruenceClass *); 827 void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *, 828 CongruenceClass *, CongruenceClass *); 829 Value *getNextValueLeader(CongruenceClass *) const; 830 const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const; 831 bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To); 832 CongruenceClass *getMemoryClass(const MemoryAccess *MA) const; 833 const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const; 834 bool isMemoryAccessTOP(const MemoryAccess *) const; 835 836 // Ranking 837 unsigned int getRank(const Value *) const; 838 bool shouldSwapOperands(const Value *, const Value *) const; 839 bool shouldSwapOperandsForIntrinsic(const Value *, const Value *, 840 const IntrinsicInst *I) const; 841 842 // Reachability handling. 843 void updateReachableEdge(BasicBlock *, BasicBlock *); 844 void processOutgoingEdges(Instruction *, BasicBlock *); 845 Value *findConditionEquivalence(Value *) const; 846 847 // Elimination. 848 struct ValueDFS; 849 void convertClassToDFSOrdered(const CongruenceClass &, 850 SmallVectorImpl<ValueDFS> &, 851 DenseMap<const Value *, unsigned int> &, 852 SmallPtrSetImpl<Instruction *> &) const; 853 void convertClassToLoadsAndStores(const CongruenceClass &, 854 SmallVectorImpl<ValueDFS> &) const; 855 856 bool eliminateInstructions(Function &); 857 void replaceInstruction(Instruction *, Value *); 858 void markInstructionForDeletion(Instruction *); 859 void deleteInstructionsInBlock(BasicBlock *); 860 Value *findPHIOfOpsLeader(const Expression *, const Instruction *, 861 const BasicBlock *) const; 862 863 // Various instruction touch utilities 864 template <typename Map, typename KeyType> 865 void touchAndErase(Map &, const KeyType &); 866 void markUsersTouched(Value *); 867 void markMemoryUsersTouched(const MemoryAccess *); 868 void markMemoryDefTouched(const MemoryAccess *); 869 void markPredicateUsersTouched(Instruction *); 870 void markValueLeaderChangeTouched(CongruenceClass *CC); 871 void markMemoryLeaderChangeTouched(CongruenceClass *CC); 872 void markPhiOfOpsChanged(const Expression *E); 873 void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const; 874 void addAdditionalUsers(Value *To, Value *User) const; 875 void addAdditionalUsers(ExprResult &Res, Instruction *User) const; 876 877 // Main loop of value numbering 878 void iterateTouchedInstructions(); 879 880 // Utilities. 881 void cleanupTables(); 882 std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned); 883 void updateProcessedCount(const Value *V); 884 void verifyMemoryCongruency() const; 885 void verifyIterationSettled(Function &F); 886 void verifyStoreExpressions() const; 887 bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &, 888 const MemoryAccess *, const MemoryAccess *) const; 889 BasicBlock *getBlockForValue(Value *V) const; 890 void deleteExpression(const Expression *E) const; 891 MemoryUseOrDef *getMemoryAccess(const Instruction *) const; 892 MemoryPhi *getMemoryAccess(const BasicBlock *) const; 893 template <class T, class Range> T *getMinDFSOfRange(const Range &) const; 894 895 unsigned InstrToDFSNum(const Value *V) const { 896 assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses"); 897 return InstrDFS.lookup(V); 898 } 899 900 unsigned InstrToDFSNum(const MemoryAccess *MA) const { 901 return MemoryToDFSNum(MA); 902 } 903 904 Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; } 905 906 // Given a MemoryAccess, return the relevant instruction DFS number. Note: 907 // This deliberately takes a value so it can be used with Use's, which will 908 // auto-convert to Value's but not to MemoryAccess's. 909 unsigned MemoryToDFSNum(const Value *MA) const { 910 assert(isa<MemoryAccess>(MA) && 911 "This should not be used with instructions"); 912 return isa<MemoryUseOrDef>(MA) 913 ? InstrToDFSNum(cast<MemoryUseOrDef>(MA)->getMemoryInst()) 914 : InstrDFS.lookup(MA); 915 } 916 917 bool isCycleFree(const Instruction *) const; 918 bool isBackedge(BasicBlock *From, BasicBlock *To) const; 919 920 // Debug counter info. When verifying, we have to reset the value numbering 921 // debug counter to the same state it started in to get the same results. 922 DebugCounter::CounterState StartingVNCounter; 923 }; 924 925 } // end anonymous namespace 926 927 template <typename T> 928 static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) { 929 if (!isa<LoadExpression>(RHS) && !isa<StoreExpression>(RHS)) 930 return false; 931 return LHS.MemoryExpression::equals(RHS); 932 } 933 934 bool LoadExpression::equals(const Expression &Other) const { 935 return equalsLoadStoreHelper(*this, Other); 936 } 937 938 bool StoreExpression::equals(const Expression &Other) const { 939 if (!equalsLoadStoreHelper(*this, Other)) 940 return false; 941 // Make sure that store vs store includes the value operand. 942 if (const auto *S = dyn_cast<StoreExpression>(&Other)) 943 if (getStoredValue() != S->getStoredValue()) 944 return false; 945 return true; 946 } 947 948 // Determine if the edge From->To is a backedge 949 bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const { 950 return From == To || 951 RPOOrdering.lookup(DT->getNode(From)) >= 952 RPOOrdering.lookup(DT->getNode(To)); 953 } 954 955 #ifndef NDEBUG 956 static std::string getBlockName(const BasicBlock *B) { 957 return DOTGraphTraits<DOTFuncInfo *>::getSimpleNodeLabel(B, nullptr); 958 } 959 #endif 960 961 // Get a MemoryAccess for an instruction, fake or real. 962 MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const { 963 auto *Result = MSSA->getMemoryAccess(I); 964 return Result ? Result : TempToMemory.lookup(I); 965 } 966 967 // Get a MemoryPhi for a basic block. These are all real. 968 MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const { 969 return MSSA->getMemoryAccess(BB); 970 } 971 972 // Get the basic block from an instruction/memory value. 973 BasicBlock *NewGVN::getBlockForValue(Value *V) const { 974 if (auto *I = dyn_cast<Instruction>(V)) { 975 auto *Parent = I->getParent(); 976 if (Parent) 977 return Parent; 978 Parent = TempToBlock.lookup(V); 979 assert(Parent && "Every fake instruction should have a block"); 980 return Parent; 981 } 982 983 auto *MP = dyn_cast<MemoryPhi>(V); 984 assert(MP && "Should have been an instruction or a MemoryPhi"); 985 return MP->getBlock(); 986 } 987 988 // Delete a definitely dead expression, so it can be reused by the expression 989 // allocator. Some of these are not in creation functions, so we have to accept 990 // const versions. 991 void NewGVN::deleteExpression(const Expression *E) const { 992 assert(isa<BasicExpression>(E)); 993 auto *BE = cast<BasicExpression>(E); 994 const_cast<BasicExpression *>(BE)->deallocateOperands(ArgRecycler); 995 ExpressionAllocator.Deallocate(E); 996 } 997 998 // If V is a predicateinfo copy, get the thing it is a copy of. 999 static Value *getCopyOf(const Value *V) { 1000 if (auto *II = dyn_cast<IntrinsicInst>(V)) 1001 if (II->getIntrinsicID() == Intrinsic::ssa_copy) 1002 return II->getOperand(0); 1003 return nullptr; 1004 } 1005 1006 // Return true if V is really PN, even accounting for predicateinfo copies. 1007 static bool isCopyOfPHI(const Value *V, const PHINode *PN) { 1008 return V == PN || getCopyOf(V) == PN; 1009 } 1010 1011 static bool isCopyOfAPHI(const Value *V) { 1012 auto *CO = getCopyOf(V); 1013 return CO && isa<PHINode>(CO); 1014 } 1015 1016 // Sort PHI Operands into a canonical order. What we use here is an RPO 1017 // order. The BlockInstRange numbers are generated in an RPO walk of the basic 1018 // blocks. 1019 void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const { 1020 llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) { 1021 return BlockInstRange.lookup(P1.second).first < 1022 BlockInstRange.lookup(P2.second).first; 1023 }); 1024 } 1025 1026 // Return true if V is a value that will always be available (IE can 1027 // be placed anywhere) in the function. We don't do globals here 1028 // because they are often worse to put in place. 1029 static bool alwaysAvailable(Value *V) { 1030 return isa<Constant>(V) || isa<Argument>(V); 1031 } 1032 1033 // Create a PHIExpression from an array of {incoming edge, value} pairs. I is 1034 // the original instruction we are creating a PHIExpression for (but may not be 1035 // a phi node). We require, as an invariant, that all the PHIOperands in the 1036 // same block are sorted the same way. sortPHIOps will sort them into a 1037 // canonical order. 1038 PHIExpression *NewGVN::createPHIExpression(ArrayRef<ValPair> PHIOperands, 1039 const Instruction *I, 1040 BasicBlock *PHIBlock, 1041 bool &HasBackedge, 1042 bool &OriginalOpsConstant) const { 1043 unsigned NumOps = PHIOperands.size(); 1044 auto *E = new (ExpressionAllocator) PHIExpression(NumOps, PHIBlock); 1045 1046 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1047 E->setType(PHIOperands.begin()->first->getType()); 1048 E->setOpcode(Instruction::PHI); 1049 1050 // Filter out unreachable phi operands. 1051 auto Filtered = make_filter_range(PHIOperands, [&](const ValPair &P) { 1052 auto *BB = P.second; 1053 if (auto *PHIOp = dyn_cast<PHINode>(I)) 1054 if (isCopyOfPHI(P.first, PHIOp)) 1055 return false; 1056 if (!ReachableEdges.count({BB, PHIBlock})) 1057 return false; 1058 // Things in TOPClass are equivalent to everything. 1059 if (ValueToClass.lookup(P.first) == TOPClass) 1060 return false; 1061 OriginalOpsConstant = OriginalOpsConstant && isa<Constant>(P.first); 1062 HasBackedge = HasBackedge || isBackedge(BB, PHIBlock); 1063 return lookupOperandLeader(P.first) != I; 1064 }); 1065 std::transform(Filtered.begin(), Filtered.end(), op_inserter(E), 1066 [&](const ValPair &P) -> Value * { 1067 return lookupOperandLeader(P.first); 1068 }); 1069 return E; 1070 } 1071 1072 // Set basic expression info (Arguments, type, opcode) for Expression 1073 // E from Instruction I in block B. 1074 bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const { 1075 bool AllConstant = true; 1076 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) 1077 E->setType(GEP->getSourceElementType()); 1078 else 1079 E->setType(I->getType()); 1080 E->setOpcode(I->getOpcode()); 1081 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1082 1083 // Transform the operand array into an operand leader array, and keep track of 1084 // whether all members are constant. 1085 std::transform(I->op_begin(), I->op_end(), op_inserter(E), [&](Value *O) { 1086 auto Operand = lookupOperandLeader(O); 1087 AllConstant = AllConstant && isa<Constant>(Operand); 1088 return Operand; 1089 }); 1090 1091 return AllConstant; 1092 } 1093 1094 const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T, 1095 Value *Arg1, Value *Arg2, 1096 Instruction *I) const { 1097 auto *E = new (ExpressionAllocator) BasicExpression(2); 1098 // TODO: we need to remove context instruction after Value Tracking 1099 // can run without context instruction 1100 const SimplifyQuery Q = SQ.getWithInstruction(I); 1101 1102 E->setType(T); 1103 E->setOpcode(Opcode); 1104 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1105 if (Instruction::isCommutative(Opcode)) { 1106 // Ensure that commutative instructions that only differ by a permutation 1107 // of their operands get the same value number by sorting the operand value 1108 // numbers. Since all commutative instructions have two operands it is more 1109 // efficient to sort by hand rather than using, say, std::sort. 1110 if (shouldSwapOperands(Arg1, Arg2)) 1111 std::swap(Arg1, Arg2); 1112 } 1113 E->op_push_back(lookupOperandLeader(Arg1)); 1114 E->op_push_back(lookupOperandLeader(Arg2)); 1115 1116 Value *V = simplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), Q); 1117 if (auto Simplified = checkExprResults(E, I, V)) { 1118 addAdditionalUsers(Simplified, I); 1119 return Simplified.Expr; 1120 } 1121 return E; 1122 } 1123 1124 // Take a Value returned by simplification of Expression E/Instruction 1125 // I, and see if it resulted in a simpler expression. If so, return 1126 // that expression. 1127 NewGVN::ExprResult NewGVN::checkExprResults(Expression *E, Instruction *I, 1128 Value *V) const { 1129 if (!V) 1130 return ExprResult::none(); 1131 1132 if (auto *C = dyn_cast<Constant>(V)) { 1133 if (I) 1134 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1135 << " constant " << *C << "\n"); 1136 NumGVNOpsSimplified++; 1137 assert(isa<BasicExpression>(E) && 1138 "We should always have had a basic expression here"); 1139 deleteExpression(E); 1140 return ExprResult::some(createConstantExpression(C)); 1141 } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) { 1142 if (I) 1143 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1144 << " variable " << *V << "\n"); 1145 deleteExpression(E); 1146 return ExprResult::some(createVariableExpression(V)); 1147 } 1148 1149 CongruenceClass *CC = ValueToClass.lookup(V); 1150 if (CC) { 1151 if (CC->getLeader() && CC->getLeader() != I) { 1152 return ExprResult::some(createVariableOrConstant(CC->getLeader()), V); 1153 } 1154 if (CC->getDefiningExpr()) { 1155 if (I) 1156 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to " 1157 << " expression " << *CC->getDefiningExpr() << "\n"); 1158 NumGVNOpsSimplified++; 1159 deleteExpression(E); 1160 return ExprResult::some(CC->getDefiningExpr(), V); 1161 } 1162 } 1163 1164 return ExprResult::none(); 1165 } 1166 1167 // Create a value expression from the instruction I, replacing operands with 1168 // their leaders. 1169 1170 NewGVN::ExprResult NewGVN::createExpression(Instruction *I) const { 1171 auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands()); 1172 // TODO: we need to remove context instruction after Value Tracking 1173 // can run without context instruction 1174 const SimplifyQuery Q = SQ.getWithInstruction(I); 1175 1176 bool AllConstant = setBasicExpressionInfo(I, E); 1177 1178 if (I->isCommutative()) { 1179 // Ensure that commutative instructions that only differ by a permutation 1180 // of their operands get the same value number by sorting the operand value 1181 // numbers. Since all commutative instructions have two operands it is more 1182 // efficient to sort by hand rather than using, say, std::sort. 1183 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!"); 1184 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) 1185 E->swapOperands(0, 1); 1186 } 1187 // Perform simplification. 1188 if (auto *CI = dyn_cast<CmpInst>(I)) { 1189 // Sort the operand value numbers so x<y and y>x get the same value 1190 // number. 1191 CmpInst::Predicate Predicate = CI->getPredicate(); 1192 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) { 1193 E->swapOperands(0, 1); 1194 Predicate = CmpInst::getSwappedPredicate(Predicate); 1195 } 1196 E->setOpcode((CI->getOpcode() << 8) | Predicate); 1197 // TODO: 25% of our time is spent in simplifyCmpInst with pointer operands 1198 assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() && 1199 "Wrong types on cmp instruction"); 1200 assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() && 1201 E->getOperand(1)->getType() == I->getOperand(1)->getType())); 1202 Value *V = 1203 simplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), Q); 1204 if (auto Simplified = checkExprResults(E, I, V)) 1205 return Simplified; 1206 } else if (isa<SelectInst>(I)) { 1207 if (isa<Constant>(E->getOperand(0)) || 1208 E->getOperand(1) == E->getOperand(2)) { 1209 assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() && 1210 E->getOperand(2)->getType() == I->getOperand(2)->getType()); 1211 Value *V = simplifySelectInst(E->getOperand(0), E->getOperand(1), 1212 E->getOperand(2), Q); 1213 if (auto Simplified = checkExprResults(E, I, V)) 1214 return Simplified; 1215 } 1216 } else if (I->isBinaryOp()) { 1217 Value *V = 1218 simplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), Q); 1219 if (auto Simplified = checkExprResults(E, I, V)) 1220 return Simplified; 1221 } else if (auto *CI = dyn_cast<CastInst>(I)) { 1222 Value *V = 1223 simplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), Q); 1224 if (auto Simplified = checkExprResults(E, I, V)) 1225 return Simplified; 1226 } else if (auto *GEPI = dyn_cast<GetElementPtrInst>(I)) { 1227 Value *V = simplifyGEPInst(GEPI->getSourceElementType(), *E->op_begin(), 1228 ArrayRef(std::next(E->op_begin()), E->op_end()), 1229 GEPI->getNoWrapFlags(), Q); 1230 if (auto Simplified = checkExprResults(E, I, V)) 1231 return Simplified; 1232 } else if (AllConstant) { 1233 // We don't bother trying to simplify unless all of the operands 1234 // were constant. 1235 // TODO: There are a lot of Simplify*'s we could call here, if we 1236 // wanted to. The original motivating case for this code was a 1237 // zext i1 false to i8, which we don't have an interface to 1238 // simplify (IE there is no SimplifyZExt). 1239 1240 SmallVector<Constant *, 8> C; 1241 for (Value *Arg : E->operands()) 1242 C.emplace_back(cast<Constant>(Arg)); 1243 1244 if (Value *V = ConstantFoldInstOperands(I, C, DL, TLI)) 1245 if (auto Simplified = checkExprResults(E, I, V)) 1246 return Simplified; 1247 } 1248 return ExprResult::some(E); 1249 } 1250 1251 const AggregateValueExpression * 1252 NewGVN::createAggregateValueExpression(Instruction *I) const { 1253 if (auto *II = dyn_cast<InsertValueInst>(I)) { 1254 auto *E = new (ExpressionAllocator) 1255 AggregateValueExpression(I->getNumOperands(), II->getNumIndices()); 1256 setBasicExpressionInfo(I, E); 1257 E->allocateIntOperands(ExpressionAllocator); 1258 std::copy(II->idx_begin(), II->idx_end(), int_op_inserter(E)); 1259 return E; 1260 } else if (auto *EI = dyn_cast<ExtractValueInst>(I)) { 1261 auto *E = new (ExpressionAllocator) 1262 AggregateValueExpression(I->getNumOperands(), EI->getNumIndices()); 1263 setBasicExpressionInfo(EI, E); 1264 E->allocateIntOperands(ExpressionAllocator); 1265 std::copy(EI->idx_begin(), EI->idx_end(), int_op_inserter(E)); 1266 return E; 1267 } 1268 llvm_unreachable("Unhandled type of aggregate value operation"); 1269 } 1270 1271 const DeadExpression *NewGVN::createDeadExpression() const { 1272 // DeadExpression has no arguments and all DeadExpression's are the same, 1273 // so we only need one of them. 1274 return SingletonDeadExpression; 1275 } 1276 1277 const VariableExpression *NewGVN::createVariableExpression(Value *V) const { 1278 auto *E = new (ExpressionAllocator) VariableExpression(V); 1279 E->setOpcode(V->getValueID()); 1280 return E; 1281 } 1282 1283 const Expression *NewGVN::createVariableOrConstant(Value *V) const { 1284 if (auto *C = dyn_cast<Constant>(V)) 1285 return createConstantExpression(C); 1286 return createVariableExpression(V); 1287 } 1288 1289 const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const { 1290 auto *E = new (ExpressionAllocator) ConstantExpression(C); 1291 E->setOpcode(C->getValueID()); 1292 return E; 1293 } 1294 1295 const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const { 1296 auto *E = new (ExpressionAllocator) UnknownExpression(I); 1297 E->setOpcode(I->getOpcode()); 1298 return E; 1299 } 1300 1301 const CallExpression * 1302 NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const { 1303 // FIXME: Add operand bundles for calls. 1304 auto *E = 1305 new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA); 1306 setBasicExpressionInfo(CI, E); 1307 if (CI->isCommutative()) { 1308 // Ensure that commutative intrinsics that only differ by a permutation 1309 // of their operands get the same value number by sorting the operand value 1310 // numbers. 1311 assert(CI->getNumOperands() >= 2 && "Unsupported commutative intrinsic!"); 1312 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) 1313 E->swapOperands(0, 1); 1314 } 1315 return E; 1316 } 1317 1318 // Return true if some equivalent of instruction Inst dominates instruction U. 1319 bool NewGVN::someEquivalentDominates(const Instruction *Inst, 1320 const Instruction *U) const { 1321 auto *CC = ValueToClass.lookup(Inst); 1322 // This must be an instruction because we are only called from phi nodes 1323 // in the case that the value it needs to check against is an instruction. 1324 1325 // The most likely candidates for dominance are the leader and the next leader. 1326 // The leader or nextleader will dominate in all cases where there is an 1327 // equivalent that is higher up in the dom tree. 1328 // We can't *only* check them, however, because the 1329 // dominator tree could have an infinite number of non-dominating siblings 1330 // with instructions that are in the right congruence class. 1331 // A 1332 // B C D E F G 1333 // | 1334 // H 1335 // Instruction U could be in H, with equivalents in every other sibling. 1336 // Depending on the rpo order picked, the leader could be the equivalent in 1337 // any of these siblings. 1338 if (!CC) 1339 return false; 1340 if (alwaysAvailable(CC->getLeader())) 1341 return true; 1342 if (DT->dominates(cast<Instruction>(CC->getLeader()), U)) 1343 return true; 1344 if (CC->getNextLeader().first && 1345 DT->dominates(cast<Instruction>(CC->getNextLeader().first), U)) 1346 return true; 1347 return llvm::any_of(*CC, [&](const Value *Member) { 1348 return Member != CC->getLeader() && 1349 DT->dominates(cast<Instruction>(Member), U); 1350 }); 1351 } 1352 1353 // See if we have a congruence class and leader for this operand, and if so, 1354 // return it. Otherwise, return the operand itself. 1355 Value *NewGVN::lookupOperandLeader(Value *V) const { 1356 CongruenceClass *CC = ValueToClass.lookup(V); 1357 if (CC) { 1358 // Everything in TOP is represented by poison, as it can be any value. 1359 // We do have to make sure we get the type right though, so we can't set the 1360 // RepLeader to poison. 1361 if (CC == TOPClass) 1362 return PoisonValue::get(V->getType()); 1363 return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader(); 1364 } 1365 1366 return V; 1367 } 1368 1369 const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const { 1370 auto *CC = getMemoryClass(MA); 1371 assert(CC->getMemoryLeader() && 1372 "Every MemoryAccess should be mapped to a congruence class with a " 1373 "representative memory access"); 1374 return CC->getMemoryLeader(); 1375 } 1376 1377 // Return true if the MemoryAccess is really equivalent to everything. This is 1378 // equivalent to the lattice value "TOP" in most lattices. This is the initial 1379 // state of all MemoryAccesses. 1380 bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const { 1381 return getMemoryClass(MA) == TOPClass; 1382 } 1383 1384 LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp, 1385 LoadInst *LI, 1386 const MemoryAccess *MA) const { 1387 auto *E = 1388 new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA)); 1389 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1390 E->setType(LoadType); 1391 1392 // Give store and loads same opcode so they value number together. 1393 E->setOpcode(0); 1394 E->op_push_back(PointerOp); 1395 1396 // TODO: Value number heap versions. We may be able to discover 1397 // things alias analysis can't on it's own (IE that a store and a 1398 // load have the same value, and thus, it isn't clobbering the load). 1399 return E; 1400 } 1401 1402 const StoreExpression * 1403 NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const { 1404 auto *StoredValueLeader = lookupOperandLeader(SI->getValueOperand()); 1405 auto *E = new (ExpressionAllocator) 1406 StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA); 1407 E->allocateOperands(ArgRecycler, ExpressionAllocator); 1408 E->setType(SI->getValueOperand()->getType()); 1409 1410 // Give store and loads same opcode so they value number together. 1411 E->setOpcode(0); 1412 E->op_push_back(lookupOperandLeader(SI->getPointerOperand())); 1413 1414 // TODO: Value number heap versions. We may be able to discover 1415 // things alias analysis can't on it's own (IE that a store and a 1416 // load have the same value, and thus, it isn't clobbering the load). 1417 return E; 1418 } 1419 1420 const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const { 1421 // Unlike loads, we never try to eliminate stores, so we do not check if they 1422 // are simple and avoid value numbering them. 1423 auto *SI = cast<StoreInst>(I); 1424 auto *StoreAccess = getMemoryAccess(SI); 1425 // Get the expression, if any, for the RHS of the MemoryDef. 1426 const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess(); 1427 if (EnableStoreRefinement) 1428 StoreRHS = MSSAWalker->getClobberingMemoryAccess(StoreAccess); 1429 // If we bypassed the use-def chains, make sure we add a use. 1430 StoreRHS = lookupMemoryLeader(StoreRHS); 1431 if (StoreRHS != StoreAccess->getDefiningAccess()) 1432 addMemoryUsers(StoreRHS, StoreAccess); 1433 // If we are defined by ourselves, use the live on entry def. 1434 if (StoreRHS == StoreAccess) 1435 StoreRHS = MSSA->getLiveOnEntryDef(); 1436 1437 if (SI->isSimple()) { 1438 // See if we are defined by a previous store expression, it already has a 1439 // value, and it's the same value as our current store. FIXME: Right now, we 1440 // only do this for simple stores, we should expand to cover memcpys, etc. 1441 const auto *LastStore = createStoreExpression(SI, StoreRHS); 1442 const auto *LastCC = ExpressionToClass.lookup(LastStore); 1443 // We really want to check whether the expression we matched was a store. No 1444 // easy way to do that. However, we can check that the class we found has a 1445 // store, which, assuming the value numbering state is not corrupt, is 1446 // sufficient, because we must also be equivalent to that store's expression 1447 // for it to be in the same class as the load. 1448 if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue()) 1449 return LastStore; 1450 // Also check if our value operand is defined by a load of the same memory 1451 // location, and the memory state is the same as it was then (otherwise, it 1452 // could have been overwritten later. See test32 in 1453 // transforms/DeadStoreElimination/simple.ll). 1454 if (auto *LI = dyn_cast<LoadInst>(LastStore->getStoredValue())) 1455 if ((lookupOperandLeader(LI->getPointerOperand()) == 1456 LastStore->getOperand(0)) && 1457 (lookupMemoryLeader(getMemoryAccess(LI)->getDefiningAccess()) == 1458 StoreRHS)) 1459 return LastStore; 1460 deleteExpression(LastStore); 1461 } 1462 1463 // If the store is not equivalent to anything, value number it as a store that 1464 // produces a unique memory state (instead of using it's MemoryUse, we use 1465 // it's MemoryDef). 1466 return createStoreExpression(SI, StoreAccess); 1467 } 1468 1469 // See if we can extract the value of a loaded pointer from a load, a store, or 1470 // a memory instruction. 1471 const Expression * 1472 NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr, 1473 LoadInst *LI, Instruction *DepInst, 1474 MemoryAccess *DefiningAccess) const { 1475 assert((!LI || LI->isSimple()) && "Not a simple load"); 1476 if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) { 1477 // Can't forward from non-atomic to atomic without violating memory model. 1478 // Also don't need to coerce if they are the same type, we will just 1479 // propagate. 1480 if (LI->isAtomic() > DepSI->isAtomic() || 1481 LoadType == DepSI->getValueOperand()->getType()) 1482 return nullptr; 1483 int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL); 1484 if (Offset >= 0) { 1485 if (auto *C = dyn_cast<Constant>( 1486 lookupOperandLeader(DepSI->getValueOperand()))) { 1487 if (Constant *Res = getConstantValueForLoad(C, Offset, LoadType, DL)) { 1488 LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI 1489 << " to constant " << *Res << "\n"); 1490 return createConstantExpression(Res); 1491 } 1492 } 1493 } 1494 } else if (auto *DepLI = dyn_cast<LoadInst>(DepInst)) { 1495 // Can't forward from non-atomic to atomic without violating memory model. 1496 if (LI->isAtomic() > DepLI->isAtomic()) 1497 return nullptr; 1498 int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL); 1499 if (Offset >= 0) { 1500 // We can coerce a constant load into a load. 1501 if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI))) 1502 if (auto *PossibleConstant = 1503 getConstantValueForLoad(C, Offset, LoadType, DL)) { 1504 LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI 1505 << " to constant " << *PossibleConstant << "\n"); 1506 return createConstantExpression(PossibleConstant); 1507 } 1508 } 1509 } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) { 1510 int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL); 1511 if (Offset >= 0) { 1512 if (auto *PossibleConstant = 1513 getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) { 1514 LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI 1515 << " to constant " << *PossibleConstant << "\n"); 1516 return createConstantExpression(PossibleConstant); 1517 } 1518 } 1519 } 1520 1521 // All of the below are only true if the loaded pointer is produced 1522 // by the dependent instruction. 1523 if (LoadPtr != lookupOperandLeader(DepInst) && 1524 !AA->isMustAlias(LoadPtr, DepInst)) 1525 return nullptr; 1526 // If this load really doesn't depend on anything, then we must be loading an 1527 // undef value. This can happen when loading for a fresh allocation with no 1528 // intervening stores, for example. Note that this is only true in the case 1529 // that the result of the allocation is pointer equal to the load ptr. 1530 if (isa<AllocaInst>(DepInst)) { 1531 return createConstantExpression(UndefValue::get(LoadType)); 1532 } 1533 // If this load occurs either right after a lifetime begin, 1534 // then the loaded value is undefined. 1535 else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) { 1536 if (II->getIntrinsicID() == Intrinsic::lifetime_start) 1537 return createConstantExpression(UndefValue::get(LoadType)); 1538 } else if (auto *InitVal = 1539 getInitialValueOfAllocation(DepInst, TLI, LoadType)) 1540 return createConstantExpression(InitVal); 1541 1542 return nullptr; 1543 } 1544 1545 const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const { 1546 auto *LI = cast<LoadInst>(I); 1547 1548 // We can eliminate in favor of non-simple loads, but we won't be able to 1549 // eliminate the loads themselves. 1550 if (!LI->isSimple()) 1551 return nullptr; 1552 1553 Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand()); 1554 // Load of undef is UB. 1555 if (isa<UndefValue>(LoadAddressLeader)) 1556 return createConstantExpression(PoisonValue::get(LI->getType())); 1557 MemoryAccess *OriginalAccess = getMemoryAccess(I); 1558 MemoryAccess *DefiningAccess = 1559 MSSAWalker->getClobberingMemoryAccess(OriginalAccess); 1560 1561 if (!MSSA->isLiveOnEntryDef(DefiningAccess)) { 1562 if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) { 1563 Instruction *DefiningInst = MD->getMemoryInst(); 1564 // If the defining instruction is not reachable, replace with poison. 1565 if (!ReachableBlocks.count(DefiningInst->getParent())) 1566 return createConstantExpression(PoisonValue::get(LI->getType())); 1567 // This will handle stores and memory insts. We only do if it the 1568 // defining access has a different type, or it is a pointer produced by 1569 // certain memory operations that cause the memory to have a fixed value 1570 // (IE things like calloc). 1571 if (const auto *CoercionResult = 1572 performSymbolicLoadCoercion(LI->getType(), LoadAddressLeader, LI, 1573 DefiningInst, DefiningAccess)) 1574 return CoercionResult; 1575 } 1576 } 1577 1578 const auto *LE = createLoadExpression(LI->getType(), LoadAddressLeader, LI, 1579 DefiningAccess); 1580 // If our MemoryLeader is not our defining access, add a use to the 1581 // MemoryLeader, so that we get reprocessed when it changes. 1582 if (LE->getMemoryLeader() != DefiningAccess) 1583 addMemoryUsers(LE->getMemoryLeader(), OriginalAccess); 1584 return LE; 1585 } 1586 1587 NewGVN::ExprResult 1588 NewGVN::performSymbolicPredicateInfoEvaluation(IntrinsicInst *I) const { 1589 auto *PI = PredInfo->getPredicateInfoFor(I); 1590 if (!PI) 1591 return ExprResult::none(); 1592 1593 LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n"); 1594 1595 const std::optional<PredicateConstraint> &Constraint = PI->getConstraint(); 1596 if (!Constraint) 1597 return ExprResult::none(); 1598 1599 CmpInst::Predicate Predicate = Constraint->Predicate; 1600 Value *CmpOp0 = I->getOperand(0); 1601 Value *CmpOp1 = Constraint->OtherOp; 1602 1603 Value *FirstOp = lookupOperandLeader(CmpOp0); 1604 Value *SecondOp = lookupOperandLeader(CmpOp1); 1605 Value *AdditionallyUsedValue = CmpOp0; 1606 1607 // Sort the ops. 1608 if (shouldSwapOperandsForIntrinsic(FirstOp, SecondOp, I)) { 1609 std::swap(FirstOp, SecondOp); 1610 Predicate = CmpInst::getSwappedPredicate(Predicate); 1611 AdditionallyUsedValue = CmpOp1; 1612 } 1613 1614 if (Predicate == CmpInst::ICMP_EQ) 1615 return ExprResult::some(createVariableOrConstant(FirstOp), 1616 AdditionallyUsedValue, PI); 1617 1618 // Handle the special case of floating point. 1619 if (Predicate == CmpInst::FCMP_OEQ && isa<ConstantFP>(FirstOp) && 1620 !cast<ConstantFP>(FirstOp)->isZero()) 1621 return ExprResult::some(createConstantExpression(cast<Constant>(FirstOp)), 1622 AdditionallyUsedValue, PI); 1623 1624 return ExprResult::none(); 1625 } 1626 1627 // Evaluate read only and pure calls, and create an expression result. 1628 NewGVN::ExprResult NewGVN::performSymbolicCallEvaluation(Instruction *I) const { 1629 auto *CI = cast<CallInst>(I); 1630 if (auto *II = dyn_cast<IntrinsicInst>(I)) { 1631 // Intrinsics with the returned attribute are copies of arguments. 1632 if (auto *ReturnedValue = II->getReturnedArgOperand()) { 1633 if (II->getIntrinsicID() == Intrinsic::ssa_copy) 1634 if (auto Res = performSymbolicPredicateInfoEvaluation(II)) 1635 return Res; 1636 return ExprResult::some(createVariableOrConstant(ReturnedValue)); 1637 } 1638 } 1639 1640 // FIXME: Currently the calls which may access the thread id may 1641 // be considered as not accessing the memory. But this is 1642 // problematic for coroutines, since coroutines may resume in a 1643 // different thread. So we disable the optimization here for the 1644 // correctness. However, it may block many other correct 1645 // optimizations. Revert this one when we detect the memory 1646 // accessing kind more precisely. 1647 if (CI->getFunction()->isPresplitCoroutine()) 1648 return ExprResult::none(); 1649 1650 // Do not combine convergent calls since they implicitly depend on the set of 1651 // threads that is currently executing, and they might be in different basic 1652 // blocks. 1653 if (CI->isConvergent()) 1654 return ExprResult::none(); 1655 1656 if (AA->doesNotAccessMemory(CI)) { 1657 return ExprResult::some( 1658 createCallExpression(CI, TOPClass->getMemoryLeader())); 1659 } else if (AA->onlyReadsMemory(CI)) { 1660 if (auto *MA = MSSA->getMemoryAccess(CI)) { 1661 auto *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(MA); 1662 return ExprResult::some(createCallExpression(CI, DefiningAccess)); 1663 } else // MSSA determined that CI does not access memory. 1664 return ExprResult::some( 1665 createCallExpression(CI, TOPClass->getMemoryLeader())); 1666 } 1667 return ExprResult::none(); 1668 } 1669 1670 // Retrieve the memory class for a given MemoryAccess. 1671 CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const { 1672 auto *Result = MemoryAccessToClass.lookup(MA); 1673 assert(Result && "Should have found memory class"); 1674 return Result; 1675 } 1676 1677 // Update the MemoryAccess equivalence table to say that From is equal to To, 1678 // and return true if this is different from what already existed in the table. 1679 bool NewGVN::setMemoryClass(const MemoryAccess *From, 1680 CongruenceClass *NewClass) { 1681 assert(NewClass && 1682 "Every MemoryAccess should be getting mapped to a non-null class"); 1683 LLVM_DEBUG(dbgs() << "Setting " << *From); 1684 LLVM_DEBUG(dbgs() << " equivalent to congruence class "); 1685 LLVM_DEBUG(dbgs() << NewClass->getID() 1686 << " with current MemoryAccess leader "); 1687 LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n"); 1688 1689 auto LookupResult = MemoryAccessToClass.find(From); 1690 bool Changed = false; 1691 // If it's already in the table, see if the value changed. 1692 if (LookupResult != MemoryAccessToClass.end()) { 1693 auto *OldClass = LookupResult->second; 1694 if (OldClass != NewClass) { 1695 // If this is a phi, we have to handle memory member updates. 1696 if (auto *MP = dyn_cast<MemoryPhi>(From)) { 1697 OldClass->memory_erase(MP); 1698 NewClass->memory_insert(MP); 1699 // This may have killed the class if it had no non-memory members 1700 if (OldClass->getMemoryLeader() == From) { 1701 if (OldClass->definesNoMemory()) { 1702 OldClass->setMemoryLeader(nullptr); 1703 } else { 1704 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); 1705 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 1706 << OldClass->getID() << " to " 1707 << *OldClass->getMemoryLeader() 1708 << " due to removal of a memory member " << *From 1709 << "\n"); 1710 markMemoryLeaderChangeTouched(OldClass); 1711 } 1712 } 1713 } 1714 // It wasn't equivalent before, and now it is. 1715 LookupResult->second = NewClass; 1716 Changed = true; 1717 } 1718 } 1719 1720 return Changed; 1721 } 1722 1723 // Determine if a instruction is cycle-free. That means the values in the 1724 // instruction don't depend on any expressions that can change value as a result 1725 // of the instruction. For example, a non-cycle free instruction would be v = 1726 // phi(0, v+1). 1727 bool NewGVN::isCycleFree(const Instruction *I) const { 1728 // In order to compute cycle-freeness, we do SCC finding on the instruction, 1729 // and see what kind of SCC it ends up in. If it is a singleton, it is 1730 // cycle-free. If it is not in a singleton, it is only cycle free if the 1731 // other members are all phi nodes (as they do not compute anything, they are 1732 // copies). 1733 auto ICS = InstCycleState.lookup(I); 1734 if (ICS == ICS_Unknown) { 1735 SCCFinder.Start(I); 1736 auto &SCC = SCCFinder.getComponentFor(I); 1737 // It's cycle free if it's size 1 or the SCC is *only* phi nodes. 1738 if (SCC.size() == 1) 1739 InstCycleState.insert({I, ICS_CycleFree}); 1740 else { 1741 bool AllPhis = llvm::all_of(SCC, [](const Value *V) { 1742 return isa<PHINode>(V) || isCopyOfAPHI(V); 1743 }); 1744 ICS = AllPhis ? ICS_CycleFree : ICS_Cycle; 1745 for (const auto *Member : SCC) 1746 if (auto *MemberPhi = dyn_cast<PHINode>(Member)) 1747 InstCycleState.insert({MemberPhi, ICS}); 1748 } 1749 } 1750 if (ICS == ICS_Cycle) 1751 return false; 1752 return true; 1753 } 1754 1755 // Evaluate PHI nodes symbolically and create an expression result. 1756 const Expression * 1757 NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps, 1758 Instruction *I, 1759 BasicBlock *PHIBlock) const { 1760 // True if one of the incoming phi edges is a backedge. 1761 bool HasBackedge = false; 1762 // All constant tracks the state of whether all the *original* phi operands 1763 // This is really shorthand for "this phi cannot cycle due to forward 1764 // change in value of the phi is guaranteed not to later change the value of 1765 // the phi. IE it can't be v = phi(undef, v+1) 1766 bool OriginalOpsConstant = true; 1767 auto *E = cast<PHIExpression>(createPHIExpression( 1768 PHIOps, I, PHIBlock, HasBackedge, OriginalOpsConstant)); 1769 // We match the semantics of SimplifyPhiNode from InstructionSimplify here. 1770 // See if all arguments are the same. 1771 // We track if any were undef because they need special handling. 1772 bool HasUndef = false, HasPoison = false; 1773 auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) { 1774 if (isa<PoisonValue>(Arg)) { 1775 HasPoison = true; 1776 return false; 1777 } 1778 if (isa<UndefValue>(Arg)) { 1779 HasUndef = true; 1780 return false; 1781 } 1782 return true; 1783 }); 1784 // If we are left with no operands, it's dead. 1785 if (Filtered.empty()) { 1786 // If it has undef or poison at this point, it means there are no-non-undef 1787 // arguments, and thus, the value of the phi node must be undef. 1788 if (HasUndef) { 1789 LLVM_DEBUG( 1790 dbgs() << "PHI Node " << *I 1791 << " has no non-undef arguments, valuing it as undef\n"); 1792 return createConstantExpression(UndefValue::get(I->getType())); 1793 } 1794 if (HasPoison) { 1795 LLVM_DEBUG( 1796 dbgs() << "PHI Node " << *I 1797 << " has no non-poison arguments, valuing it as poison\n"); 1798 return createConstantExpression(PoisonValue::get(I->getType())); 1799 } 1800 1801 LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n"); 1802 deleteExpression(E); 1803 return createDeadExpression(); 1804 } 1805 Value *AllSameValue = *(Filtered.begin()); 1806 ++Filtered.begin(); 1807 // Can't use std::equal here, sadly, because filter.begin moves. 1808 if (llvm::all_of(Filtered, [&](Value *Arg) { return Arg == AllSameValue; })) { 1809 // Can't fold phi(undef, X) -> X unless X can't be poison (thus X is undef 1810 // in the worst case). 1811 if (HasUndef && !isGuaranteedNotToBePoison(AllSameValue, AC, nullptr, DT)) 1812 return E; 1813 1814 // In LLVM's non-standard representation of phi nodes, it's possible to have 1815 // phi nodes with cycles (IE dependent on other phis that are .... dependent 1816 // on the original phi node), especially in weird CFG's where some arguments 1817 // are unreachable, or uninitialized along certain paths. This can cause 1818 // infinite loops during evaluation. We work around this by not trying to 1819 // really evaluate them independently, but instead using a variable 1820 // expression to say if one is equivalent to the other. 1821 // We also special case undef/poison, so that if we have an undef, we can't 1822 // use the common value unless it dominates the phi block. 1823 if (HasPoison || HasUndef) { 1824 // If we have undef and at least one other value, this is really a 1825 // multivalued phi, and we need to know if it's cycle free in order to 1826 // evaluate whether we can ignore the undef. The other parts of this are 1827 // just shortcuts. If there is no backedge, or all operands are 1828 // constants, it also must be cycle free. 1829 if (HasBackedge && !OriginalOpsConstant && 1830 !isa<UndefValue>(AllSameValue) && !isCycleFree(I)) 1831 return E; 1832 1833 // Only have to check for instructions 1834 if (auto *AllSameInst = dyn_cast<Instruction>(AllSameValue)) 1835 if (!someEquivalentDominates(AllSameInst, I)) 1836 return E; 1837 } 1838 // Can't simplify to something that comes later in the iteration. 1839 // Otherwise, when and if it changes congruence class, we will never catch 1840 // up. We will always be a class behind it. 1841 if (isa<Instruction>(AllSameValue) && 1842 InstrToDFSNum(AllSameValue) > InstrToDFSNum(I)) 1843 return E; 1844 NumGVNPhisAllSame++; 1845 LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue 1846 << "\n"); 1847 deleteExpression(E); 1848 return createVariableOrConstant(AllSameValue); 1849 } 1850 return E; 1851 } 1852 1853 const Expression * 1854 NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const { 1855 if (auto *EI = dyn_cast<ExtractValueInst>(I)) { 1856 auto *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand()); 1857 if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0) 1858 // EI is an extract from one of our with.overflow intrinsics. Synthesize 1859 // a semantically equivalent expression instead of an extract value 1860 // expression. 1861 return createBinaryExpression(WO->getBinaryOp(), EI->getType(), 1862 WO->getLHS(), WO->getRHS(), I); 1863 } 1864 1865 return createAggregateValueExpression(I); 1866 } 1867 1868 NewGVN::ExprResult NewGVN::performSymbolicCmpEvaluation(Instruction *I) const { 1869 assert(isa<CmpInst>(I) && "Expected a cmp instruction."); 1870 1871 auto *CI = cast<CmpInst>(I); 1872 // See if our operands are equal to those of a previous predicate, and if so, 1873 // if it implies true or false. 1874 auto Op0 = lookupOperandLeader(CI->getOperand(0)); 1875 auto Op1 = lookupOperandLeader(CI->getOperand(1)); 1876 auto OurPredicate = CI->getPredicate(); 1877 if (shouldSwapOperands(Op0, Op1)) { 1878 std::swap(Op0, Op1); 1879 OurPredicate = CI->getSwappedPredicate(); 1880 } 1881 1882 // Avoid processing the same info twice. 1883 const PredicateBase *LastPredInfo = nullptr; 1884 // See if we know something about the comparison itself, like it is the target 1885 // of an assume. 1886 auto *CmpPI = PredInfo->getPredicateInfoFor(I); 1887 if (isa_and_nonnull<PredicateAssume>(CmpPI)) 1888 return ExprResult::some( 1889 createConstantExpression(ConstantInt::getTrue(CI->getType()))); 1890 1891 if (Op0 == Op1) { 1892 // This condition does not depend on predicates, no need to add users 1893 if (CI->isTrueWhenEqual()) 1894 return ExprResult::some( 1895 createConstantExpression(ConstantInt::getTrue(CI->getType()))); 1896 else if (CI->isFalseWhenEqual()) 1897 return ExprResult::some( 1898 createConstantExpression(ConstantInt::getFalse(CI->getType()))); 1899 } 1900 1901 // NOTE: Because we are comparing both operands here and below, and using 1902 // previous comparisons, we rely on fact that predicateinfo knows to mark 1903 // comparisons that use renamed operands as users of the earlier comparisons. 1904 // It is *not* enough to just mark predicateinfo renamed operands as users of 1905 // the earlier comparisons, because the *other* operand may have changed in a 1906 // previous iteration. 1907 // Example: 1908 // icmp slt %a, %b 1909 // %b.0 = ssa.copy(%b) 1910 // false branch: 1911 // icmp slt %c, %b.0 1912 1913 // %c and %a may start out equal, and thus, the code below will say the second 1914 // %icmp is false. c may become equal to something else, and in that case the 1915 // %second icmp *must* be reexamined, but would not if only the renamed 1916 // %operands are considered users of the icmp. 1917 1918 // *Currently* we only check one level of comparisons back, and only mark one 1919 // level back as touched when changes happen. If you modify this code to look 1920 // back farther through comparisons, you *must* mark the appropriate 1921 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if 1922 // we know something just from the operands themselves 1923 1924 // See if our operands have predicate info, so that we may be able to derive 1925 // something from a previous comparison. 1926 for (const auto &Op : CI->operands()) { 1927 auto *PI = PredInfo->getPredicateInfoFor(Op); 1928 if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI)) { 1929 if (PI == LastPredInfo) 1930 continue; 1931 LastPredInfo = PI; 1932 // In phi of ops cases, we may have predicate info that we are evaluating 1933 // in a different context. 1934 if (!DT->dominates(PBranch->To, I->getParent())) 1935 continue; 1936 // TODO: Along the false edge, we may know more things too, like 1937 // icmp of 1938 // same operands is false. 1939 // TODO: We only handle actual comparison conditions below, not 1940 // and/or. 1941 auto *BranchCond = dyn_cast<CmpInst>(PBranch->Condition); 1942 if (!BranchCond) 1943 continue; 1944 auto *BranchOp0 = lookupOperandLeader(BranchCond->getOperand(0)); 1945 auto *BranchOp1 = lookupOperandLeader(BranchCond->getOperand(1)); 1946 auto BranchPredicate = BranchCond->getPredicate(); 1947 if (shouldSwapOperands(BranchOp0, BranchOp1)) { 1948 std::swap(BranchOp0, BranchOp1); 1949 BranchPredicate = BranchCond->getSwappedPredicate(); 1950 } 1951 if (BranchOp0 == Op0 && BranchOp1 == Op1) { 1952 if (PBranch->TrueEdge) { 1953 // If we know the previous predicate is true and we are in the true 1954 // edge then we may be implied true or false. 1955 if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate, 1956 OurPredicate)) { 1957 return ExprResult::some( 1958 createConstantExpression(ConstantInt::getTrue(CI->getType())), 1959 PI); 1960 } 1961 1962 if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate, 1963 OurPredicate)) { 1964 return ExprResult::some( 1965 createConstantExpression(ConstantInt::getFalse(CI->getType())), 1966 PI); 1967 } 1968 } else { 1969 // Just handle the ne and eq cases, where if we have the same 1970 // operands, we may know something. 1971 if (BranchPredicate == OurPredicate) { 1972 // Same predicate, same ops,we know it was false, so this is false. 1973 return ExprResult::some( 1974 createConstantExpression(ConstantInt::getFalse(CI->getType())), 1975 PI); 1976 } else if (BranchPredicate == 1977 CmpInst::getInversePredicate(OurPredicate)) { 1978 // Inverse predicate, we know the other was false, so this is true. 1979 return ExprResult::some( 1980 createConstantExpression(ConstantInt::getTrue(CI->getType())), 1981 PI); 1982 } 1983 } 1984 } 1985 } 1986 } 1987 // Create expression will take care of simplifyCmpInst 1988 return createExpression(I); 1989 } 1990 1991 // Substitute and symbolize the instruction before value numbering. 1992 NewGVN::ExprResult 1993 NewGVN::performSymbolicEvaluation(Instruction *I, 1994 SmallPtrSetImpl<Value *> &Visited) const { 1995 1996 const Expression *E = nullptr; 1997 // TODO: memory intrinsics. 1998 // TODO: Some day, we should do the forward propagation and reassociation 1999 // parts of the algorithm. 2000 switch (I->getOpcode()) { 2001 case Instruction::ExtractValue: 2002 case Instruction::InsertValue: 2003 E = performSymbolicAggrValueEvaluation(I); 2004 break; 2005 case Instruction::PHI: { 2006 SmallVector<ValPair, 3> Ops; 2007 auto *PN = cast<PHINode>(I); 2008 for (unsigned i = 0; i < PN->getNumOperands(); ++i) 2009 Ops.push_back({PN->getIncomingValue(i), PN->getIncomingBlock(i)}); 2010 // Sort to ensure the invariant createPHIExpression requires is met. 2011 sortPHIOps(Ops); 2012 E = performSymbolicPHIEvaluation(Ops, I, getBlockForValue(I)); 2013 } break; 2014 case Instruction::Call: 2015 return performSymbolicCallEvaluation(I); 2016 break; 2017 case Instruction::Store: 2018 E = performSymbolicStoreEvaluation(I); 2019 break; 2020 case Instruction::Load: 2021 E = performSymbolicLoadEvaluation(I); 2022 break; 2023 case Instruction::BitCast: 2024 case Instruction::AddrSpaceCast: 2025 case Instruction::Freeze: 2026 return createExpression(I); 2027 break; 2028 case Instruction::ICmp: 2029 case Instruction::FCmp: 2030 return performSymbolicCmpEvaluation(I); 2031 break; 2032 case Instruction::FNeg: 2033 case Instruction::Add: 2034 case Instruction::FAdd: 2035 case Instruction::Sub: 2036 case Instruction::FSub: 2037 case Instruction::Mul: 2038 case Instruction::FMul: 2039 case Instruction::UDiv: 2040 case Instruction::SDiv: 2041 case Instruction::FDiv: 2042 case Instruction::URem: 2043 case Instruction::SRem: 2044 case Instruction::FRem: 2045 case Instruction::Shl: 2046 case Instruction::LShr: 2047 case Instruction::AShr: 2048 case Instruction::And: 2049 case Instruction::Or: 2050 case Instruction::Xor: 2051 case Instruction::Trunc: 2052 case Instruction::ZExt: 2053 case Instruction::SExt: 2054 case Instruction::FPToUI: 2055 case Instruction::FPToSI: 2056 case Instruction::UIToFP: 2057 case Instruction::SIToFP: 2058 case Instruction::FPTrunc: 2059 case Instruction::FPExt: 2060 case Instruction::PtrToInt: 2061 case Instruction::IntToPtr: 2062 case Instruction::Select: 2063 case Instruction::ExtractElement: 2064 case Instruction::InsertElement: 2065 case Instruction::GetElementPtr: 2066 return createExpression(I); 2067 break; 2068 case Instruction::ShuffleVector: 2069 // FIXME: Add support for shufflevector to createExpression. 2070 return ExprResult::none(); 2071 default: 2072 return ExprResult::none(); 2073 } 2074 return ExprResult::some(E); 2075 } 2076 2077 // Look up a container of values/instructions in a map, and touch all the 2078 // instructions in the container. Then erase value from the map. 2079 template <typename Map, typename KeyType> 2080 void NewGVN::touchAndErase(Map &M, const KeyType &Key) { 2081 const auto Result = M.find_as(Key); 2082 if (Result != M.end()) { 2083 for (const typename Map::mapped_type::value_type Mapped : Result->second) 2084 TouchedInstructions.set(InstrToDFSNum(Mapped)); 2085 M.erase(Result); 2086 } 2087 } 2088 2089 void NewGVN::addAdditionalUsers(Value *To, Value *User) const { 2090 assert(User && To != User); 2091 if (isa<Instruction>(To)) 2092 AdditionalUsers[To].insert(User); 2093 } 2094 2095 void NewGVN::addAdditionalUsers(ExprResult &Res, Instruction *User) const { 2096 if (Res.ExtraDep && Res.ExtraDep != User) 2097 addAdditionalUsers(Res.ExtraDep, User); 2098 Res.ExtraDep = nullptr; 2099 2100 if (Res.PredDep) { 2101 if (const auto *PBranch = dyn_cast<PredicateBranch>(Res.PredDep)) 2102 PredicateToUsers[PBranch->Condition].insert(User); 2103 else if (const auto *PAssume = dyn_cast<PredicateAssume>(Res.PredDep)) 2104 PredicateToUsers[PAssume->Condition].insert(User); 2105 } 2106 Res.PredDep = nullptr; 2107 } 2108 2109 void NewGVN::markUsersTouched(Value *V) { 2110 // Now mark the users as touched. 2111 for (auto *User : V->users()) { 2112 assert(isa<Instruction>(User) && "Use of value not within an instruction?"); 2113 TouchedInstructions.set(InstrToDFSNum(User)); 2114 } 2115 touchAndErase(AdditionalUsers, V); 2116 } 2117 2118 void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const { 2119 LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n"); 2120 MemoryToUsers[To].insert(U); 2121 } 2122 2123 void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) { 2124 TouchedInstructions.set(MemoryToDFSNum(MA)); 2125 } 2126 2127 void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) { 2128 if (isa<MemoryUse>(MA)) 2129 return; 2130 for (const auto *U : MA->users()) 2131 TouchedInstructions.set(MemoryToDFSNum(U)); 2132 touchAndErase(MemoryToUsers, MA); 2133 } 2134 2135 // Touch all the predicates that depend on this instruction. 2136 void NewGVN::markPredicateUsersTouched(Instruction *I) { 2137 touchAndErase(PredicateToUsers, I); 2138 } 2139 2140 // Mark users affected by a memory leader change. 2141 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) { 2142 for (const auto *M : CC->memory()) 2143 markMemoryDefTouched(M); 2144 } 2145 2146 // Touch the instructions that need to be updated after a congruence class has a 2147 // leader change, and mark changed values. 2148 void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) { 2149 for (auto *M : *CC) { 2150 if (auto *I = dyn_cast<Instruction>(M)) 2151 TouchedInstructions.set(InstrToDFSNum(I)); 2152 LeaderChanges.insert(M); 2153 } 2154 } 2155 2156 // Give a range of things that have instruction DFS numbers, this will return 2157 // the member of the range with the smallest dfs number. 2158 template <class T, class Range> 2159 T *NewGVN::getMinDFSOfRange(const Range &R) const { 2160 std::pair<T *, unsigned> MinDFS = {nullptr, ~0U}; 2161 for (const auto X : R) { 2162 auto DFSNum = InstrToDFSNum(X); 2163 if (DFSNum < MinDFS.second) 2164 MinDFS = {X, DFSNum}; 2165 } 2166 return MinDFS.first; 2167 } 2168 2169 // This function returns the MemoryAccess that should be the next leader of 2170 // congruence class CC, under the assumption that the current leader is going to 2171 // disappear. 2172 const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const { 2173 // TODO: If this ends up to slow, we can maintain a next memory leader like we 2174 // do for regular leaders. 2175 // Make sure there will be a leader to find. 2176 assert(!CC->definesNoMemory() && "Can't get next leader if there is none"); 2177 if (CC->getStoreCount() > 0) { 2178 if (auto *NL = dyn_cast_or_null<StoreInst>(CC->getNextLeader().first)) 2179 return getMemoryAccess(NL); 2180 // Find the store with the minimum DFS number. 2181 auto *V = getMinDFSOfRange<Value>(make_filter_range( 2182 *CC, [&](const Value *V) { return isa<StoreInst>(V); })); 2183 return getMemoryAccess(cast<StoreInst>(V)); 2184 } 2185 assert(CC->getStoreCount() == 0); 2186 2187 // Given our assertion, hitting this part must mean 2188 // !OldClass->memory_empty() 2189 if (CC->memory_size() == 1) 2190 return *CC->memory_begin(); 2191 return getMinDFSOfRange<const MemoryPhi>(CC->memory()); 2192 } 2193 2194 // This function returns the next value leader of a congruence class, under the 2195 // assumption that the current leader is going away. This should end up being 2196 // the next most dominating member. 2197 Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const { 2198 // We don't need to sort members if there is only 1, and we don't care about 2199 // sorting the TOP class because everything either gets out of it or is 2200 // unreachable. 2201 2202 if (CC->size() == 1 || CC == TOPClass) { 2203 return *(CC->begin()); 2204 } else if (CC->getNextLeader().first) { 2205 ++NumGVNAvoidedSortedLeaderChanges; 2206 return CC->getNextLeader().first; 2207 } else { 2208 ++NumGVNSortedLeaderChanges; 2209 // NOTE: If this ends up to slow, we can maintain a dual structure for 2210 // member testing/insertion, or keep things mostly sorted, and sort only 2211 // here, or use SparseBitVector or .... 2212 return getMinDFSOfRange<Value>(*CC); 2213 } 2214 } 2215 2216 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to 2217 // the memory members, etc for the move. 2218 // 2219 // The invariants of this function are: 2220 // 2221 // - I must be moving to NewClass from OldClass 2222 // - The StoreCount of OldClass and NewClass is expected to have been updated 2223 // for I already if it is a store. 2224 // - The OldClass memory leader has not been updated yet if I was the leader. 2225 void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I, 2226 MemoryAccess *InstMA, 2227 CongruenceClass *OldClass, 2228 CongruenceClass *NewClass) { 2229 // If the leader is I, and we had a representative MemoryAccess, it should 2230 // be the MemoryAccess of OldClass. 2231 assert((!InstMA || !OldClass->getMemoryLeader() || 2232 OldClass->getLeader() != I || 2233 MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) == 2234 MemoryAccessToClass.lookup(InstMA)) && 2235 "Representative MemoryAccess mismatch"); 2236 // First, see what happens to the new class 2237 if (!NewClass->getMemoryLeader()) { 2238 // Should be a new class, or a store becoming a leader of a new class. 2239 assert(NewClass->size() == 1 || 2240 (isa<StoreInst>(I) && NewClass->getStoreCount() == 1)); 2241 NewClass->setMemoryLeader(InstMA); 2242 // Mark it touched if we didn't just create a singleton 2243 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 2244 << NewClass->getID() 2245 << " due to new memory instruction becoming leader\n"); 2246 markMemoryLeaderChangeTouched(NewClass); 2247 } 2248 setMemoryClass(InstMA, NewClass); 2249 // Now, fixup the old class if necessary 2250 if (OldClass->getMemoryLeader() == InstMA) { 2251 if (!OldClass->definesNoMemory()) { 2252 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass)); 2253 LLVM_DEBUG(dbgs() << "Memory class leader change for class " 2254 << OldClass->getID() << " to " 2255 << *OldClass->getMemoryLeader() 2256 << " due to removal of old leader " << *InstMA << "\n"); 2257 markMemoryLeaderChangeTouched(OldClass); 2258 } else 2259 OldClass->setMemoryLeader(nullptr); 2260 } 2261 } 2262 2263 // Move a value, currently in OldClass, to be part of NewClass 2264 // Update OldClass and NewClass for the move (including changing leaders, etc). 2265 void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E, 2266 CongruenceClass *OldClass, 2267 CongruenceClass *NewClass) { 2268 if (I == OldClass->getNextLeader().first) 2269 OldClass->resetNextLeader(); 2270 2271 OldClass->erase(I); 2272 NewClass->insert(I); 2273 2274 // Ensure that the leader has the lowest RPO. If the leader changed notify all 2275 // members of the class. 2276 if (NewClass->getLeader() != I && 2277 NewClass->addPossibleLeader({I, InstrToDFSNum(I)})) { 2278 markValueLeaderChangeTouched(NewClass); 2279 } 2280 2281 // Handle our special casing of stores. 2282 if (auto *SI = dyn_cast<StoreInst>(I)) { 2283 OldClass->decStoreCount(); 2284 // Okay, so when do we want to make a store a leader of a class? 2285 // If we have a store defined by an earlier load, we want the earlier load 2286 // to lead the class. 2287 // If we have a store defined by something else, we want the store to lead 2288 // the class so everything else gets the "something else" as a value. 2289 // If we have a store as the single member of the class, we want the store 2290 // as the leader 2291 if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) { 2292 // If it's a store expression we are using, it means we are not equivalent 2293 // to something earlier. 2294 if (auto *SE = dyn_cast<StoreExpression>(E)) { 2295 NewClass->setStoredValue(SE->getStoredValue()); 2296 markValueLeaderChangeTouched(NewClass); 2297 // Shift the new class leader to be the store 2298 LLVM_DEBUG(dbgs() << "Changing leader of congruence class " 2299 << NewClass->getID() << " from " 2300 << *NewClass->getLeader() << " to " << *SI 2301 << " because store joined class\n"); 2302 // If we changed the leader, we have to mark it changed because we don't 2303 // know what it will do to symbolic evaluation. 2304 NewClass->setLeader({SI, InstrToDFSNum(SI)}); 2305 } 2306 // We rely on the code below handling the MemoryAccess change. 2307 } 2308 NewClass->incStoreCount(); 2309 } 2310 // True if there is no memory instructions left in a class that had memory 2311 // instructions before. 2312 2313 // If it's not a memory use, set the MemoryAccess equivalence 2314 auto *InstMA = dyn_cast_or_null<MemoryDef>(getMemoryAccess(I)); 2315 if (InstMA) 2316 moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass); 2317 ValueToClass[I] = NewClass; 2318 // See if we destroyed the class or need to swap leaders. 2319 if (OldClass->empty() && OldClass != TOPClass) { 2320 if (OldClass->getDefiningExpr()) { 2321 LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr() 2322 << " from table\n"); 2323 // We erase it as an exact expression to make sure we don't just erase an 2324 // equivalent one. 2325 auto Iter = ExpressionToClass.find_as( 2326 ExactEqualsExpression(*OldClass->getDefiningExpr())); 2327 if (Iter != ExpressionToClass.end()) 2328 ExpressionToClass.erase(Iter); 2329 #ifdef EXPENSIVE_CHECKS 2330 assert( 2331 (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) && 2332 "We erased the expression we just inserted, which should not happen"); 2333 #endif 2334 } 2335 } else if (OldClass->getLeader() == I) { 2336 // When the leader changes, the value numbering of 2337 // everything may change due to symbolization changes, so we need to 2338 // reprocess. 2339 LLVM_DEBUG(dbgs() << "Value class leader change for class " 2340 << OldClass->getID() << "\n"); 2341 ++NumGVNLeaderChanges; 2342 // Destroy the stored value if there are no more stores to represent it. 2343 // Note that this is basically clean up for the expression removal that 2344 // happens below. If we remove stores from a class, we may leave it as a 2345 // class of equivalent memory phis. 2346 if (OldClass->getStoreCount() == 0) { 2347 if (OldClass->getStoredValue()) 2348 OldClass->setStoredValue(nullptr); 2349 } 2350 OldClass->setLeader({getNextValueLeader(OldClass), 2351 InstrToDFSNum(getNextValueLeader(OldClass))}); 2352 OldClass->resetNextLeader(); 2353 markValueLeaderChangeTouched(OldClass); 2354 } 2355 } 2356 2357 // For a given expression, mark the phi of ops instructions that could have 2358 // changed as a result. 2359 void NewGVN::markPhiOfOpsChanged(const Expression *E) { 2360 touchAndErase(ExpressionToPhiOfOps, E); 2361 } 2362 2363 // Perform congruence finding on a given value numbering expression. 2364 void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) { 2365 // This is guaranteed to return something, since it will at least find 2366 // TOP. 2367 2368 CongruenceClass *IClass = ValueToClass.lookup(I); 2369 assert(IClass && "Should have found a IClass"); 2370 // Dead classes should have been eliminated from the mapping. 2371 assert(!IClass->isDead() && "Found a dead class"); 2372 2373 CongruenceClass *EClass = nullptr; 2374 if (const auto *VE = dyn_cast<VariableExpression>(E)) { 2375 EClass = ValueToClass.lookup(VE->getVariableValue()); 2376 } else if (isa<DeadExpression>(E)) { 2377 EClass = TOPClass; 2378 } 2379 if (!EClass) { 2380 auto lookupResult = ExpressionToClass.insert({E, nullptr}); 2381 2382 // If it's not in the value table, create a new congruence class. 2383 if (lookupResult.second) { 2384 CongruenceClass *NewClass = createCongruenceClass(nullptr, E); 2385 auto place = lookupResult.first; 2386 place->second = NewClass; 2387 2388 // Constants and variables should always be made the leader. 2389 if (const auto *CE = dyn_cast<ConstantExpression>(E)) { 2390 NewClass->setLeader({CE->getConstantValue(), 0}); 2391 } else if (const auto *SE = dyn_cast<StoreExpression>(E)) { 2392 StoreInst *SI = SE->getStoreInst(); 2393 NewClass->setLeader({SI, InstrToDFSNum(SI)}); 2394 NewClass->setStoredValue(SE->getStoredValue()); 2395 // The RepMemoryAccess field will be filled in properly by the 2396 // moveValueToNewCongruenceClass call. 2397 } else { 2398 NewClass->setLeader({I, InstrToDFSNum(I)}); 2399 } 2400 assert(!isa<VariableExpression>(E) && 2401 "VariableExpression should have been handled already"); 2402 2403 EClass = NewClass; 2404 LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I 2405 << " using expression " << *E << " at " 2406 << NewClass->getID() << " and leader " 2407 << *(NewClass->getLeader())); 2408 if (NewClass->getStoredValue()) 2409 LLVM_DEBUG(dbgs() << " and stored value " 2410 << *(NewClass->getStoredValue())); 2411 LLVM_DEBUG(dbgs() << "\n"); 2412 } else { 2413 EClass = lookupResult.first->second; 2414 if (isa<ConstantExpression>(E)) 2415 assert((isa<Constant>(EClass->getLeader()) || 2416 (EClass->getStoredValue() && 2417 isa<Constant>(EClass->getStoredValue()))) && 2418 "Any class with a constant expression should have a " 2419 "constant leader"); 2420 2421 assert(EClass && "Somehow don't have an eclass"); 2422 2423 assert(!EClass->isDead() && "We accidentally looked up a dead class"); 2424 } 2425 } 2426 bool ClassChanged = IClass != EClass; 2427 bool LeaderChanged = LeaderChanges.erase(I); 2428 if (ClassChanged || LeaderChanged) { 2429 LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression " 2430 << *E << "\n"); 2431 if (ClassChanged) { 2432 moveValueToNewCongruenceClass(I, E, IClass, EClass); 2433 markPhiOfOpsChanged(E); 2434 } 2435 2436 markUsersTouched(I); 2437 if (MemoryAccess *MA = getMemoryAccess(I)) 2438 markMemoryUsersTouched(MA); 2439 if (auto *CI = dyn_cast<CmpInst>(I)) 2440 markPredicateUsersTouched(CI); 2441 } 2442 // If we changed the class of the store, we want to ensure nothing finds the 2443 // old store expression. In particular, loads do not compare against stored 2444 // value, so they will find old store expressions (and associated class 2445 // mappings) if we leave them in the table. 2446 if (ClassChanged && isa<StoreInst>(I)) { 2447 auto *OldE = ValueToExpression.lookup(I); 2448 // It could just be that the old class died. We don't want to erase it if we 2449 // just moved classes. 2450 if (OldE && isa<StoreExpression>(OldE) && *E != *OldE) { 2451 // Erase this as an exact expression to ensure we don't erase expressions 2452 // equivalent to it. 2453 auto Iter = ExpressionToClass.find_as(ExactEqualsExpression(*OldE)); 2454 if (Iter != ExpressionToClass.end()) 2455 ExpressionToClass.erase(Iter); 2456 } 2457 } 2458 ValueToExpression[I] = E; 2459 } 2460 2461 // Process the fact that Edge (from, to) is reachable, including marking 2462 // any newly reachable blocks and instructions for processing. 2463 void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) { 2464 // Check if the Edge was reachable before. 2465 if (ReachableEdges.insert({From, To}).second) { 2466 // If this block wasn't reachable before, all instructions are touched. 2467 if (ReachableBlocks.insert(To).second) { 2468 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) 2469 << " marked reachable\n"); 2470 const auto &InstRange = BlockInstRange.lookup(To); 2471 TouchedInstructions.set(InstRange.first, InstRange.second); 2472 } else { 2473 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To) 2474 << " was reachable, but new edge {" 2475 << getBlockName(From) << "," << getBlockName(To) 2476 << "} to it found\n"); 2477 2478 // We've made an edge reachable to an existing block, which may 2479 // impact predicates. Otherwise, only mark the phi nodes as touched, as 2480 // they are the only thing that depend on new edges. Anything using their 2481 // values will get propagated to if necessary. 2482 if (MemoryAccess *MemPhi = getMemoryAccess(To)) 2483 TouchedInstructions.set(InstrToDFSNum(MemPhi)); 2484 2485 // FIXME: We should just add a union op on a Bitvector and 2486 // SparseBitVector. We can do it word by word faster than we are doing it 2487 // here. 2488 for (auto InstNum : RevisitOnReachabilityChange[To]) 2489 TouchedInstructions.set(InstNum); 2490 } 2491 } 2492 } 2493 2494 // Given a predicate condition (from a switch, cmp, or whatever) and a block, 2495 // see if we know some constant value for it already. 2496 Value *NewGVN::findConditionEquivalence(Value *Cond) const { 2497 auto Result = lookupOperandLeader(Cond); 2498 return isa<Constant>(Result) ? Result : nullptr; 2499 } 2500 2501 // Process the outgoing edges of a block for reachability. 2502 void NewGVN::processOutgoingEdges(Instruction *TI, BasicBlock *B) { 2503 // Evaluate reachability of terminator instruction. 2504 Value *Cond; 2505 BasicBlock *TrueSucc, *FalseSucc; 2506 if (match(TI, m_Br(m_Value(Cond), TrueSucc, FalseSucc))) { 2507 Value *CondEvaluated = findConditionEquivalence(Cond); 2508 if (!CondEvaluated) { 2509 if (auto *I = dyn_cast<Instruction>(Cond)) { 2510 SmallPtrSet<Value *, 4> Visited; 2511 auto Res = performSymbolicEvaluation(I, Visited); 2512 if (const auto *CE = dyn_cast_or_null<ConstantExpression>(Res.Expr)) { 2513 CondEvaluated = CE->getConstantValue(); 2514 addAdditionalUsers(Res, I); 2515 } else { 2516 // Did not use simplification result, no need to add the extra 2517 // dependency. 2518 Res.ExtraDep = nullptr; 2519 } 2520 } else if (isa<ConstantInt>(Cond)) { 2521 CondEvaluated = Cond; 2522 } 2523 } 2524 ConstantInt *CI; 2525 if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) { 2526 if (CI->isOne()) { 2527 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI 2528 << " evaluated to true\n"); 2529 updateReachableEdge(B, TrueSucc); 2530 } else if (CI->isZero()) { 2531 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI 2532 << " evaluated to false\n"); 2533 updateReachableEdge(B, FalseSucc); 2534 } 2535 } else { 2536 updateReachableEdge(B, TrueSucc); 2537 updateReachableEdge(B, FalseSucc); 2538 } 2539 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) { 2540 // For switches, propagate the case values into the case 2541 // destinations. 2542 2543 Value *SwitchCond = SI->getCondition(); 2544 Value *CondEvaluated = findConditionEquivalence(SwitchCond); 2545 // See if we were able to turn this switch statement into a constant. 2546 if (CondEvaluated && isa<ConstantInt>(CondEvaluated)) { 2547 auto *CondVal = cast<ConstantInt>(CondEvaluated); 2548 // We should be able to get case value for this. 2549 auto Case = *SI->findCaseValue(CondVal); 2550 if (Case.getCaseSuccessor() == SI->getDefaultDest()) { 2551 // We proved the value is outside of the range of the case. 2552 // We can't do anything other than mark the default dest as reachable, 2553 // and go home. 2554 updateReachableEdge(B, SI->getDefaultDest()); 2555 return; 2556 } 2557 // Now get where it goes and mark it reachable. 2558 BasicBlock *TargetBlock = Case.getCaseSuccessor(); 2559 updateReachableEdge(B, TargetBlock); 2560 } else { 2561 for (BasicBlock *TargetBlock : successors(SI->getParent())) 2562 updateReachableEdge(B, TargetBlock); 2563 } 2564 } else { 2565 // Otherwise this is either unconditional, or a type we have no 2566 // idea about. Just mark successors as reachable. 2567 for (BasicBlock *TargetBlock : successors(TI->getParent())) 2568 updateReachableEdge(B, TargetBlock); 2569 2570 // This also may be a memory defining terminator, in which case, set it 2571 // equivalent only to itself. 2572 // 2573 auto *MA = getMemoryAccess(TI); 2574 if (MA && !isa<MemoryUse>(MA)) { 2575 auto *CC = ensureLeaderOfMemoryClass(MA); 2576 if (setMemoryClass(MA, CC)) 2577 markMemoryUsersTouched(MA); 2578 } 2579 } 2580 } 2581 2582 // Remove the PHI of Ops PHI for I 2583 void NewGVN::removePhiOfOps(Instruction *I, PHINode *PHITemp) { 2584 InstrDFS.erase(PHITemp); 2585 // It's still a temp instruction. We keep it in the array so it gets erased. 2586 // However, it's no longer used by I, or in the block 2587 TempToBlock.erase(PHITemp); 2588 RealToTemp.erase(I); 2589 // We don't remove the users from the phi node uses. This wastes a little 2590 // time, but such is life. We could use two sets to track which were there 2591 // are the start of NewGVN, and which were added, but right nowt he cost of 2592 // tracking is more than the cost of checking for more phi of ops. 2593 } 2594 2595 // Add PHI Op in BB as a PHI of operations version of ExistingValue. 2596 void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB, 2597 Instruction *ExistingValue) { 2598 InstrDFS[Op] = InstrToDFSNum(ExistingValue); 2599 AllTempInstructions.insert(Op); 2600 TempToBlock[Op] = BB; 2601 RealToTemp[ExistingValue] = Op; 2602 // Add all users to phi node use, as they are now uses of the phi of ops phis 2603 // and may themselves be phi of ops. 2604 for (auto *U : ExistingValue->users()) 2605 if (auto *UI = dyn_cast<Instruction>(U)) 2606 PHINodeUses.insert(UI); 2607 } 2608 2609 static bool okayForPHIOfOps(const Instruction *I) { 2610 if (!EnablePhiOfOps) 2611 return false; 2612 return isa<BinaryOperator>(I) || isa<SelectInst>(I) || isa<CmpInst>(I) || 2613 isa<LoadInst>(I); 2614 } 2615 2616 // Return true if this operand will be safe to use for phi of ops. 2617 // 2618 // The reason some operands are unsafe is that we are not trying to recursively 2619 // translate everything back through phi nodes. We actually expect some lookups 2620 // of expressions to fail. In particular, a lookup where the expression cannot 2621 // exist in the predecessor. This is true even if the expression, as shown, can 2622 // be determined to be constant. 2623 bool NewGVN::OpIsSafeForPHIOfOps(Value *V, const BasicBlock *PHIBlock, 2624 SmallPtrSetImpl<const Value *> &Visited) { 2625 SmallVector<Value *, 4> Worklist; 2626 Worklist.push_back(V); 2627 while (!Worklist.empty()) { 2628 auto *I = Worklist.pop_back_val(); 2629 if (!isa<Instruction>(I)) 2630 continue; 2631 2632 auto OISIt = OpSafeForPHIOfOps.find({I, CacheIdx}); 2633 if (OISIt != OpSafeForPHIOfOps.end()) 2634 return OISIt->second; 2635 2636 // Keep walking until we either dominate the phi block, or hit a phi, or run 2637 // out of things to check. 2638 if (DT->properlyDominates(getBlockForValue(I), PHIBlock)) { 2639 OpSafeForPHIOfOps.insert({{I, CacheIdx}, true}); 2640 continue; 2641 } 2642 // PHI in the same block. 2643 if (isa<PHINode>(I) && getBlockForValue(I) == PHIBlock) { 2644 OpSafeForPHIOfOps.insert({{I, CacheIdx}, false}); 2645 return false; 2646 } 2647 2648 auto *OrigI = cast<Instruction>(I); 2649 // When we hit an instruction that reads memory (load, call, etc), we must 2650 // consider any store that may happen in the loop. For now, we assume the 2651 // worst: there is a store in the loop that alias with this read. 2652 // The case where the load is outside the loop is already covered by the 2653 // dominator check above. 2654 // TODO: relax this condition 2655 if (OrigI->mayReadFromMemory()) 2656 return false; 2657 2658 // Check the operands of the current instruction. 2659 for (auto *Op : OrigI->operand_values()) { 2660 if (!isa<Instruction>(Op)) 2661 continue; 2662 // Stop now if we find an unsafe operand. 2663 auto OISIt = OpSafeForPHIOfOps.find({OrigI, CacheIdx}); 2664 if (OISIt != OpSafeForPHIOfOps.end()) { 2665 if (!OISIt->second) { 2666 OpSafeForPHIOfOps.insert({{I, CacheIdx}, false}); 2667 return false; 2668 } 2669 continue; 2670 } 2671 if (!Visited.insert(Op).second) 2672 continue; 2673 Worklist.push_back(cast<Instruction>(Op)); 2674 } 2675 } 2676 OpSafeForPHIOfOps.insert({{V, CacheIdx}, true}); 2677 return true; 2678 } 2679 2680 // Try to find a leader for instruction TransInst, which is a phi translated 2681 // version of something in our original program. Visited is used to ensure we 2682 // don't infinite loop during translations of cycles. OrigInst is the 2683 // instruction in the original program, and PredBB is the predecessor we 2684 // translated it through. 2685 Value *NewGVN::findLeaderForInst(Instruction *TransInst, 2686 SmallPtrSetImpl<Value *> &Visited, 2687 MemoryAccess *MemAccess, Instruction *OrigInst, 2688 BasicBlock *PredBB) { 2689 unsigned IDFSNum = InstrToDFSNum(OrigInst); 2690 // Make sure it's marked as a temporary instruction. 2691 AllTempInstructions.insert(TransInst); 2692 // and make sure anything that tries to add it's DFS number is 2693 // redirected to the instruction we are making a phi of ops 2694 // for. 2695 TempToBlock.insert({TransInst, PredBB}); 2696 InstrDFS.insert({TransInst, IDFSNum}); 2697 2698 auto Res = performSymbolicEvaluation(TransInst, Visited); 2699 const Expression *E = Res.Expr; 2700 addAdditionalUsers(Res, OrigInst); 2701 InstrDFS.erase(TransInst); 2702 AllTempInstructions.erase(TransInst); 2703 TempToBlock.erase(TransInst); 2704 if (MemAccess) 2705 TempToMemory.erase(TransInst); 2706 if (!E) 2707 return nullptr; 2708 auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB); 2709 if (!FoundVal) { 2710 ExpressionToPhiOfOps[E].insert(OrigInst); 2711 LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst 2712 << " in block " << getBlockName(PredBB) << "\n"); 2713 return nullptr; 2714 } 2715 if (auto *SI = dyn_cast<StoreInst>(FoundVal)) 2716 FoundVal = SI->getValueOperand(); 2717 return FoundVal; 2718 } 2719 2720 // When we see an instruction that is an op of phis, generate the equivalent phi 2721 // of ops form. 2722 const Expression * 2723 NewGVN::makePossiblePHIOfOps(Instruction *I, 2724 SmallPtrSetImpl<Value *> &Visited) { 2725 if (!okayForPHIOfOps(I)) 2726 return nullptr; 2727 2728 if (!Visited.insert(I).second) 2729 return nullptr; 2730 // For now, we require the instruction be cycle free because we don't 2731 // *always* create a phi of ops for instructions that could be done as phi 2732 // of ops, we only do it if we think it is useful. If we did do it all the 2733 // time, we could remove the cycle free check. 2734 if (!isCycleFree(I)) 2735 return nullptr; 2736 2737 SmallPtrSet<const Value *, 8> ProcessedPHIs; 2738 // TODO: We don't do phi translation on memory accesses because it's 2739 // complicated. For a load, we'd need to be able to simulate a new memoryuse, 2740 // which we don't have a good way of doing ATM. 2741 auto *MemAccess = getMemoryAccess(I); 2742 // If the memory operation is defined by a memory operation this block that 2743 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi 2744 // can't help, as it would still be killed by that memory operation. 2745 if (MemAccess && !isa<MemoryPhi>(MemAccess->getDefiningAccess()) && 2746 MemAccess->getDefiningAccess()->getBlock() == I->getParent()) 2747 return nullptr; 2748 2749 // Convert op of phis to phi of ops 2750 SmallPtrSet<const Value *, 10> VisitedOps; 2751 SmallVector<Value *, 4> Ops(I->operand_values()); 2752 BasicBlock *SamePHIBlock = nullptr; 2753 PHINode *OpPHI = nullptr; 2754 if (!DebugCounter::shouldExecute(PHIOfOpsCounter)) 2755 return nullptr; 2756 for (auto *Op : Ops) { 2757 if (!isa<PHINode>(Op)) { 2758 auto *ValuePHI = RealToTemp.lookup(Op); 2759 if (!ValuePHI) 2760 continue; 2761 LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n"); 2762 Op = ValuePHI; 2763 } 2764 OpPHI = cast<PHINode>(Op); 2765 if (!SamePHIBlock) { 2766 SamePHIBlock = getBlockForValue(OpPHI); 2767 } else if (SamePHIBlock != getBlockForValue(OpPHI)) { 2768 LLVM_DEBUG( 2769 dbgs() 2770 << "PHIs for operands are not all in the same block, aborting\n"); 2771 return nullptr; 2772 } 2773 // No point in doing this for one-operand phis. 2774 // Since all PHIs for operands must be in the same block, then they must 2775 // have the same number of operands so we can just abort. 2776 if (OpPHI->getNumOperands() == 1) 2777 return nullptr; 2778 } 2779 2780 if (!OpPHI) 2781 return nullptr; 2782 2783 SmallVector<ValPair, 4> PHIOps; 2784 SmallPtrSet<Value *, 4> Deps; 2785 auto *PHIBlock = getBlockForValue(OpPHI); 2786 RevisitOnReachabilityChange[PHIBlock].reset(InstrToDFSNum(I)); 2787 for (unsigned PredNum = 0; PredNum < OpPHI->getNumOperands(); ++PredNum) { 2788 auto *PredBB = OpPHI->getIncomingBlock(PredNum); 2789 Value *FoundVal = nullptr; 2790 SmallPtrSet<Value *, 4> CurrentDeps; 2791 // We could just skip unreachable edges entirely but it's tricky to do 2792 // with rewriting existing phi nodes. 2793 if (ReachableEdges.count({PredBB, PHIBlock})) { 2794 // Clone the instruction, create an expression from it that is 2795 // translated back into the predecessor, and see if we have a leader. 2796 Instruction *ValueOp = I->clone(); 2797 // Emit the temporal instruction in the predecessor basic block where the 2798 // corresponding value is defined. 2799 ValueOp->insertBefore(PredBB->getTerminator()); 2800 if (MemAccess) 2801 TempToMemory.insert({ValueOp, MemAccess}); 2802 bool SafeForPHIOfOps = true; 2803 VisitedOps.clear(); 2804 for (auto &Op : ValueOp->operands()) { 2805 auto *OrigOp = &*Op; 2806 // When these operand changes, it could change whether there is a 2807 // leader for us or not, so we have to add additional users. 2808 if (isa<PHINode>(Op)) { 2809 Op = Op->DoPHITranslation(PHIBlock, PredBB); 2810 if (Op != OrigOp && Op != I) 2811 CurrentDeps.insert(Op); 2812 } else if (auto *ValuePHI = RealToTemp.lookup(Op)) { 2813 if (getBlockForValue(ValuePHI) == PHIBlock) 2814 Op = ValuePHI->getIncomingValueForBlock(PredBB); 2815 } 2816 // If we phi-translated the op, it must be safe. 2817 SafeForPHIOfOps = 2818 SafeForPHIOfOps && 2819 (Op != OrigOp || OpIsSafeForPHIOfOps(Op, PHIBlock, VisitedOps)); 2820 } 2821 // FIXME: For those things that are not safe we could generate 2822 // expressions all the way down, and see if this comes out to a 2823 // constant. For anything where that is true, and unsafe, we should 2824 // have made a phi-of-ops (or value numbered it equivalent to something) 2825 // for the pieces already. 2826 FoundVal = !SafeForPHIOfOps ? nullptr 2827 : findLeaderForInst(ValueOp, Visited, 2828 MemAccess, I, PredBB); 2829 ValueOp->eraseFromParent(); 2830 if (!FoundVal) { 2831 // We failed to find a leader for the current ValueOp, but this might 2832 // change in case of the translated operands change. 2833 if (SafeForPHIOfOps) 2834 for (auto *Dep : CurrentDeps) 2835 addAdditionalUsers(Dep, I); 2836 2837 return nullptr; 2838 } 2839 Deps.insert(CurrentDeps.begin(), CurrentDeps.end()); 2840 } else { 2841 LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block " 2842 << getBlockName(PredBB) 2843 << " because the block is unreachable\n"); 2844 FoundVal = PoisonValue::get(I->getType()); 2845 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); 2846 } 2847 2848 PHIOps.push_back({FoundVal, PredBB}); 2849 LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in " 2850 << getBlockName(PredBB) << "\n"); 2851 } 2852 for (auto *Dep : Deps) 2853 addAdditionalUsers(Dep, I); 2854 sortPHIOps(PHIOps); 2855 auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock); 2856 if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) { 2857 LLVM_DEBUG( 2858 dbgs() 2859 << "Not creating real PHI of ops because it simplified to existing " 2860 "value or constant\n"); 2861 // We have leaders for all operands, but do not create a real PHI node with 2862 // those leaders as operands, so the link between the operands and the 2863 // PHI-of-ops is not materialized in the IR. If any of those leaders 2864 // changes, the PHI-of-op may change also, so we need to add the operands as 2865 // additional users. 2866 for (auto &O : PHIOps) 2867 addAdditionalUsers(O.first, I); 2868 2869 return E; 2870 } 2871 auto *ValuePHI = RealToTemp.lookup(I); 2872 bool NewPHI = false; 2873 if (!ValuePHI) { 2874 ValuePHI = 2875 PHINode::Create(I->getType(), OpPHI->getNumOperands(), "phiofops"); 2876 addPhiOfOps(ValuePHI, PHIBlock, I); 2877 NewPHI = true; 2878 NumGVNPHIOfOpsCreated++; 2879 } 2880 if (NewPHI) { 2881 for (auto PHIOp : PHIOps) 2882 ValuePHI->addIncoming(PHIOp.first, PHIOp.second); 2883 } else { 2884 TempToBlock[ValuePHI] = PHIBlock; 2885 unsigned int i = 0; 2886 for (auto PHIOp : PHIOps) { 2887 ValuePHI->setIncomingValue(i, PHIOp.first); 2888 ValuePHI->setIncomingBlock(i, PHIOp.second); 2889 ++i; 2890 } 2891 } 2892 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I)); 2893 LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I 2894 << "\n"); 2895 2896 return E; 2897 } 2898 2899 // The algorithm initially places the values of the routine in the TOP 2900 // congruence class. The leader of TOP is the undetermined value `poison`. 2901 // When the algorithm has finished, values still in TOP are unreachable. 2902 void NewGVN::initializeCongruenceClasses(Function &F) { 2903 NextCongruenceNum = 0; 2904 2905 // Note that even though we use the live on entry def as a representative 2906 // MemoryAccess, it is *not* the same as the actual live on entry def. We 2907 // have no real equivalent to poison for MemoryAccesses, and so we really 2908 // should be checking whether the MemoryAccess is top if we want to know if it 2909 // is equivalent to everything. Otherwise, what this really signifies is that 2910 // the access "it reaches all the way back to the beginning of the function" 2911 2912 // Initialize all other instructions to be in TOP class. 2913 TOPClass = createCongruenceClass(nullptr, nullptr); 2914 TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef()); 2915 // The live on entry def gets put into it's own class 2916 MemoryAccessToClass[MSSA->getLiveOnEntryDef()] = 2917 createMemoryClass(MSSA->getLiveOnEntryDef()); 2918 2919 for (auto *DTN : nodes(DT)) { 2920 BasicBlock *BB = DTN->getBlock(); 2921 // All MemoryAccesses are equivalent to live on entry to start. They must 2922 // be initialized to something so that initial changes are noticed. For 2923 // the maximal answer, we initialize them all to be the same as 2924 // liveOnEntry. 2925 auto *MemoryBlockDefs = MSSA->getBlockDefs(BB); 2926 if (MemoryBlockDefs) 2927 for (const auto &Def : *MemoryBlockDefs) { 2928 MemoryAccessToClass[&Def] = TOPClass; 2929 auto *MD = dyn_cast<MemoryDef>(&Def); 2930 // Insert the memory phis into the member list. 2931 if (!MD) { 2932 const MemoryPhi *MP = cast<MemoryPhi>(&Def); 2933 TOPClass->memory_insert(MP); 2934 MemoryPhiState.insert({MP, MPS_TOP}); 2935 } 2936 2937 if (MD && isa<StoreInst>(MD->getMemoryInst())) 2938 TOPClass->incStoreCount(); 2939 } 2940 2941 // FIXME: This is trying to discover which instructions are uses of phi 2942 // nodes. We should move this into one of the myriad of places that walk 2943 // all the operands already. 2944 for (auto &I : *BB) { 2945 if (isa<PHINode>(&I)) 2946 for (auto *U : I.users()) 2947 if (auto *UInst = dyn_cast<Instruction>(U)) 2948 if (InstrToDFSNum(UInst) != 0 && okayForPHIOfOps(UInst)) 2949 PHINodeUses.insert(UInst); 2950 // Don't insert void terminators into the class. We don't value number 2951 // them, and they just end up sitting in TOP. 2952 if (I.isTerminator() && I.getType()->isVoidTy()) 2953 continue; 2954 TOPClass->insert(&I); 2955 ValueToClass[&I] = TOPClass; 2956 } 2957 } 2958 2959 // Initialize arguments to be in their own unique congruence classes 2960 for (auto &FA : F.args()) 2961 createSingletonCongruenceClass(&FA); 2962 } 2963 2964 void NewGVN::cleanupTables() { 2965 for (CongruenceClass *&CC : CongruenceClasses) { 2966 LLVM_DEBUG(dbgs() << "Congruence class " << CC->getID() << " has " 2967 << CC->size() << " members\n"); 2968 // Make sure we delete the congruence class (probably worth switching to 2969 // a unique_ptr at some point. 2970 delete CC; 2971 CC = nullptr; 2972 } 2973 2974 // Destroy the value expressions 2975 SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(), 2976 AllTempInstructions.end()); 2977 AllTempInstructions.clear(); 2978 2979 // We have to drop all references for everything first, so there are no uses 2980 // left as we delete them. 2981 for (auto *I : TempInst) { 2982 I->dropAllReferences(); 2983 } 2984 2985 while (!TempInst.empty()) { 2986 auto *I = TempInst.pop_back_val(); 2987 I->deleteValue(); 2988 } 2989 2990 ValueToClass.clear(); 2991 ArgRecycler.clear(ExpressionAllocator); 2992 ExpressionAllocator.Reset(); 2993 CongruenceClasses.clear(); 2994 ExpressionToClass.clear(); 2995 ValueToExpression.clear(); 2996 RealToTemp.clear(); 2997 AdditionalUsers.clear(); 2998 ExpressionToPhiOfOps.clear(); 2999 TempToBlock.clear(); 3000 TempToMemory.clear(); 3001 PHINodeUses.clear(); 3002 OpSafeForPHIOfOps.clear(); 3003 ReachableBlocks.clear(); 3004 ReachableEdges.clear(); 3005 #ifndef NDEBUG 3006 ProcessedCount.clear(); 3007 #endif 3008 InstrDFS.clear(); 3009 InstructionsToErase.clear(); 3010 DFSToInstr.clear(); 3011 BlockInstRange.clear(); 3012 TouchedInstructions.clear(); 3013 MemoryAccessToClass.clear(); 3014 PredicateToUsers.clear(); 3015 MemoryToUsers.clear(); 3016 RevisitOnReachabilityChange.clear(); 3017 IntrinsicInstPred.clear(); 3018 } 3019 3020 // Assign local DFS number mapping to instructions, and leave space for Value 3021 // PHI's. 3022 std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B, 3023 unsigned Start) { 3024 unsigned End = Start; 3025 if (MemoryAccess *MemPhi = getMemoryAccess(B)) { 3026 InstrDFS[MemPhi] = End++; 3027 DFSToInstr.emplace_back(MemPhi); 3028 } 3029 3030 // Then the real block goes next. 3031 for (auto &I : *B) { 3032 // There's no need to call isInstructionTriviallyDead more than once on 3033 // an instruction. Therefore, once we know that an instruction is dead 3034 // we change its DFS number so that it doesn't get value numbered. 3035 if (isInstructionTriviallyDead(&I, TLI)) { 3036 InstrDFS[&I] = 0; 3037 LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n"); 3038 markInstructionForDeletion(&I); 3039 continue; 3040 } 3041 if (isa<PHINode>(&I)) 3042 RevisitOnReachabilityChange[B].set(End); 3043 InstrDFS[&I] = End++; 3044 DFSToInstr.emplace_back(&I); 3045 } 3046 3047 // All of the range functions taken half-open ranges (open on the end side). 3048 // So we do not subtract one from count, because at this point it is one 3049 // greater than the last instruction. 3050 return std::make_pair(Start, End); 3051 } 3052 3053 void NewGVN::updateProcessedCount(const Value *V) { 3054 #ifndef NDEBUG 3055 if (ProcessedCount.count(V) == 0) { 3056 ProcessedCount.insert({V, 1}); 3057 } else { 3058 ++ProcessedCount[V]; 3059 assert(ProcessedCount[V] < 100 && 3060 "Seem to have processed the same Value a lot"); 3061 } 3062 #endif 3063 } 3064 3065 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes 3066 void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) { 3067 // If all the arguments are the same, the MemoryPhi has the same value as the 3068 // argument. Filter out unreachable blocks and self phis from our operands. 3069 // TODO: We could do cycle-checking on the memory phis to allow valueizing for 3070 // self-phi checking. 3071 const BasicBlock *PHIBlock = MP->getBlock(); 3072 auto Filtered = make_filter_range(MP->operands(), [&](const Use &U) { 3073 return cast<MemoryAccess>(U) != MP && 3074 !isMemoryAccessTOP(cast<MemoryAccess>(U)) && 3075 ReachableEdges.count({MP->getIncomingBlock(U), PHIBlock}); 3076 }); 3077 // If all that is left is nothing, our memoryphi is poison. We keep it as 3078 // InitialClass. Note: The only case this should happen is if we have at 3079 // least one self-argument. 3080 if (Filtered.begin() == Filtered.end()) { 3081 if (setMemoryClass(MP, TOPClass)) 3082 markMemoryUsersTouched(MP); 3083 return; 3084 } 3085 3086 // Transform the remaining operands into operand leaders. 3087 // FIXME: mapped_iterator should have a range version. 3088 auto LookupFunc = [&](const Use &U) { 3089 return lookupMemoryLeader(cast<MemoryAccess>(U)); 3090 }; 3091 auto MappedBegin = map_iterator(Filtered.begin(), LookupFunc); 3092 auto MappedEnd = map_iterator(Filtered.end(), LookupFunc); 3093 3094 // and now check if all the elements are equal. 3095 // Sadly, we can't use std::equals since these are random access iterators. 3096 const auto *AllSameValue = *MappedBegin; 3097 ++MappedBegin; 3098 bool AllEqual = std::all_of( 3099 MappedBegin, MappedEnd, 3100 [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; }); 3101 3102 if (AllEqual) 3103 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue 3104 << "\n"); 3105 else 3106 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n"); 3107 // If it's equal to something, it's in that class. Otherwise, it has to be in 3108 // a class where it is the leader (other things may be equivalent to it, but 3109 // it needs to start off in its own class, which means it must have been the 3110 // leader, and it can't have stopped being the leader because it was never 3111 // removed). 3112 CongruenceClass *CC = 3113 AllEqual ? getMemoryClass(AllSameValue) : ensureLeaderOfMemoryClass(MP); 3114 auto OldState = MemoryPhiState.lookup(MP); 3115 assert(OldState != MPS_Invalid && "Invalid memory phi state"); 3116 auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique; 3117 MemoryPhiState[MP] = NewState; 3118 if (setMemoryClass(MP, CC) || OldState != NewState) 3119 markMemoryUsersTouched(MP); 3120 } 3121 3122 // Value number a single instruction, symbolically evaluating, performing 3123 // congruence finding, and updating mappings. 3124 void NewGVN::valueNumberInstruction(Instruction *I) { 3125 LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n"); 3126 if (!I->isTerminator()) { 3127 const Expression *Symbolized = nullptr; 3128 SmallPtrSet<Value *, 2> Visited; 3129 if (DebugCounter::shouldExecute(VNCounter)) { 3130 auto Res = performSymbolicEvaluation(I, Visited); 3131 Symbolized = Res.Expr; 3132 addAdditionalUsers(Res, I); 3133 3134 // Make a phi of ops if necessary 3135 if (Symbolized && !isa<ConstantExpression>(Symbolized) && 3136 !isa<VariableExpression>(Symbolized) && PHINodeUses.count(I)) { 3137 auto *PHIE = makePossiblePHIOfOps(I, Visited); 3138 // If we created a phi of ops, use it. 3139 // If we couldn't create one, make sure we don't leave one lying around 3140 if (PHIE) { 3141 Symbolized = PHIE; 3142 } else if (auto *Op = RealToTemp.lookup(I)) { 3143 removePhiOfOps(I, Op); 3144 } 3145 } 3146 } else { 3147 // Mark the instruction as unused so we don't value number it again. 3148 InstrDFS[I] = 0; 3149 } 3150 // If we couldn't come up with a symbolic expression, use the unknown 3151 // expression 3152 if (Symbolized == nullptr) 3153 Symbolized = createUnknownExpression(I); 3154 performCongruenceFinding(I, Symbolized); 3155 } else { 3156 // Handle terminators that return values. All of them produce values we 3157 // don't currently understand. We don't place non-value producing 3158 // terminators in a class. 3159 if (!I->getType()->isVoidTy()) { 3160 auto *Symbolized = createUnknownExpression(I); 3161 performCongruenceFinding(I, Symbolized); 3162 } 3163 processOutgoingEdges(I, I->getParent()); 3164 } 3165 } 3166 3167 // Check if there is a path, using single or equal argument phi nodes, from 3168 // First to Second. 3169 bool NewGVN::singleReachablePHIPath( 3170 SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First, 3171 const MemoryAccess *Second) const { 3172 if (First == Second) 3173 return true; 3174 if (MSSA->isLiveOnEntryDef(First)) 3175 return false; 3176 3177 // This is not perfect, but as we're just verifying here, we can live with 3178 // the loss of precision. The real solution would be that of doing strongly 3179 // connected component finding in this routine, and it's probably not worth 3180 // the complexity for the time being. So, we just keep a set of visited 3181 // MemoryAccess and return true when we hit a cycle. 3182 if (!Visited.insert(First).second) 3183 return true; 3184 3185 const auto *EndDef = First; 3186 for (const auto *ChainDef : optimized_def_chain(First)) { 3187 if (ChainDef == Second) 3188 return true; 3189 if (MSSA->isLiveOnEntryDef(ChainDef)) 3190 return false; 3191 EndDef = ChainDef; 3192 } 3193 auto *MP = cast<MemoryPhi>(EndDef); 3194 auto ReachableOperandPred = [&](const Use &U) { 3195 return ReachableEdges.count({MP->getIncomingBlock(U), MP->getBlock()}); 3196 }; 3197 auto FilteredPhiArgs = 3198 make_filter_range(MP->operands(), ReachableOperandPred); 3199 SmallVector<const Value *, 32> OperandList; 3200 llvm::copy(FilteredPhiArgs, std::back_inserter(OperandList)); 3201 bool Okay = all_equal(OperandList); 3202 if (Okay) 3203 return singleReachablePHIPath(Visited, cast<MemoryAccess>(OperandList[0]), 3204 Second); 3205 return false; 3206 } 3207 3208 // Verify the that the memory equivalence table makes sense relative to the 3209 // congruence classes. Note that this checking is not perfect, and is currently 3210 // subject to very rare false negatives. It is only useful for 3211 // testing/debugging. 3212 void NewGVN::verifyMemoryCongruency() const { 3213 #ifndef NDEBUG 3214 // Verify that the memory table equivalence and memory member set match 3215 for (const auto *CC : CongruenceClasses) { 3216 if (CC == TOPClass || CC->isDead()) 3217 continue; 3218 if (CC->getStoreCount() != 0) { 3219 assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) && 3220 "Any class with a store as a leader should have a " 3221 "representative stored value"); 3222 assert(CC->getMemoryLeader() && 3223 "Any congruence class with a store should have a " 3224 "representative access"); 3225 } 3226 3227 if (CC->getMemoryLeader()) 3228 assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC && 3229 "Representative MemoryAccess does not appear to be reverse " 3230 "mapped properly"); 3231 for (const auto *M : CC->memory()) 3232 assert(MemoryAccessToClass.lookup(M) == CC && 3233 "Memory member does not appear to be reverse mapped properly"); 3234 } 3235 3236 // Anything equivalent in the MemoryAccess table should be in the same 3237 // congruence class. 3238 3239 // Filter out the unreachable and trivially dead entries, because they may 3240 // never have been updated if the instructions were not processed. 3241 auto ReachableAccessPred = 3242 [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) { 3243 bool Result = ReachableBlocks.count(Pair.first->getBlock()); 3244 if (!Result || MSSA->isLiveOnEntryDef(Pair.first) || 3245 MemoryToDFSNum(Pair.first) == 0) 3246 return false; 3247 if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first)) 3248 return !isInstructionTriviallyDead(MemDef->getMemoryInst()); 3249 3250 // We could have phi nodes which operands are all trivially dead, 3251 // so we don't process them. 3252 if (auto *MemPHI = dyn_cast<MemoryPhi>(Pair.first)) { 3253 for (const auto &U : MemPHI->incoming_values()) { 3254 if (auto *I = dyn_cast<Instruction>(&*U)) { 3255 if (!isInstructionTriviallyDead(I)) 3256 return true; 3257 } 3258 } 3259 return false; 3260 } 3261 3262 return true; 3263 }; 3264 3265 auto Filtered = make_filter_range(MemoryAccessToClass, ReachableAccessPred); 3266 for (auto KV : Filtered) { 3267 if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(KV.first)) { 3268 auto *SecondMUD = dyn_cast<MemoryUseOrDef>(KV.second->getMemoryLeader()); 3269 if (FirstMUD && SecondMUD) { 3270 SmallPtrSet<const MemoryAccess *, 8> VisitedMAS; 3271 assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) || 3272 ValueToClass.lookup(FirstMUD->getMemoryInst()) == 3273 ValueToClass.lookup(SecondMUD->getMemoryInst())) && 3274 "The instructions for these memory operations should have " 3275 "been in the same congruence class or reachable through" 3276 "a single argument phi"); 3277 } 3278 } else if (auto *FirstMP = dyn_cast<MemoryPhi>(KV.first)) { 3279 // We can only sanely verify that MemoryDefs in the operand list all have 3280 // the same class. 3281 auto ReachableOperandPred = [&](const Use &U) { 3282 return ReachableEdges.count( 3283 {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) && 3284 isa<MemoryDef>(U); 3285 }; 3286 // All arguments should in the same class, ignoring unreachable arguments 3287 auto FilteredPhiArgs = 3288 make_filter_range(FirstMP->operands(), ReachableOperandPred); 3289 SmallVector<const CongruenceClass *, 16> PhiOpClasses; 3290 std::transform(FilteredPhiArgs.begin(), FilteredPhiArgs.end(), 3291 std::back_inserter(PhiOpClasses), [&](const Use &U) { 3292 const MemoryDef *MD = cast<MemoryDef>(U); 3293 return ValueToClass.lookup(MD->getMemoryInst()); 3294 }); 3295 assert(all_equal(PhiOpClasses) && 3296 "All MemoryPhi arguments should be in the same class"); 3297 } 3298 } 3299 #endif 3300 } 3301 3302 // Verify that the sparse propagation we did actually found the maximal fixpoint 3303 // We do this by storing the value to class mapping, touching all instructions, 3304 // and redoing the iteration to see if anything changed. 3305 void NewGVN::verifyIterationSettled(Function &F) { 3306 #ifndef NDEBUG 3307 LLVM_DEBUG(dbgs() << "Beginning iteration verification\n"); 3308 if (DebugCounter::isCounterSet(VNCounter)) 3309 DebugCounter::setCounterState(VNCounter, StartingVNCounter); 3310 3311 // Note that we have to store the actual classes, as we may change existing 3312 // classes during iteration. This is because our memory iteration propagation 3313 // is not perfect, and so may waste a little work. But it should generate 3314 // exactly the same congruence classes we have now, with different IDs. 3315 std::map<const Value *, CongruenceClass> BeforeIteration; 3316 3317 for (auto &KV : ValueToClass) { 3318 if (auto *I = dyn_cast<Instruction>(KV.first)) 3319 // Skip unused/dead instructions. 3320 if (InstrToDFSNum(I) == 0) 3321 continue; 3322 BeforeIteration.insert({KV.first, *KV.second}); 3323 } 3324 3325 TouchedInstructions.set(); 3326 TouchedInstructions.reset(0); 3327 OpSafeForPHIOfOps.clear(); 3328 CacheIdx = 0; 3329 iterateTouchedInstructions(); 3330 DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>> 3331 EqualClasses; 3332 for (const auto &KV : ValueToClass) { 3333 if (auto *I = dyn_cast<Instruction>(KV.first)) 3334 // Skip unused/dead instructions. 3335 if (InstrToDFSNum(I) == 0) 3336 continue; 3337 // We could sink these uses, but i think this adds a bit of clarity here as 3338 // to what we are comparing. 3339 auto *BeforeCC = &BeforeIteration.find(KV.first)->second; 3340 auto *AfterCC = KV.second; 3341 // Note that the classes can't change at this point, so we memoize the set 3342 // that are equal. 3343 if (!EqualClasses.count({BeforeCC, AfterCC})) { 3344 assert(BeforeCC->isEquivalentTo(AfterCC) && 3345 "Value number changed after main loop completed!"); 3346 EqualClasses.insert({BeforeCC, AfterCC}); 3347 } 3348 } 3349 #endif 3350 } 3351 3352 // Verify that for each store expression in the expression to class mapping, 3353 // only the latest appears, and multiple ones do not appear. 3354 // Because loads do not use the stored value when doing equality with stores, 3355 // if we don't erase the old store expressions from the table, a load can find 3356 // a no-longer valid StoreExpression. 3357 void NewGVN::verifyStoreExpressions() const { 3358 #ifndef NDEBUG 3359 // This is the only use of this, and it's not worth defining a complicated 3360 // densemapinfo hash/equality function for it. 3361 std::set< 3362 std::pair<const Value *, 3363 std::tuple<const Value *, const CongruenceClass *, Value *>>> 3364 StoreExpressionSet; 3365 for (const auto &KV : ExpressionToClass) { 3366 if (auto *SE = dyn_cast<StoreExpression>(KV.first)) { 3367 // Make sure a version that will conflict with loads is not already there 3368 auto Res = StoreExpressionSet.insert( 3369 {SE->getOperand(0), std::make_tuple(SE->getMemoryLeader(), KV.second, 3370 SE->getStoredValue())}); 3371 bool Okay = Res.second; 3372 // It's okay to have the same expression already in there if it is 3373 // identical in nature. 3374 // This can happen when the leader of the stored value changes over time. 3375 if (!Okay) 3376 Okay = (std::get<1>(Res.first->second) == KV.second) && 3377 (lookupOperandLeader(std::get<2>(Res.first->second)) == 3378 lookupOperandLeader(SE->getStoredValue())); 3379 assert(Okay && "Stored expression conflict exists in expression table"); 3380 auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst()); 3381 assert(ValueExpr && ValueExpr->equals(*SE) && 3382 "StoreExpression in ExpressionToClass is not latest " 3383 "StoreExpression for value"); 3384 } 3385 } 3386 #endif 3387 } 3388 3389 // This is the main value numbering loop, it iterates over the initial touched 3390 // instruction set, propagating value numbers, marking things touched, etc, 3391 // until the set of touched instructions is completely empty. 3392 void NewGVN::iterateTouchedInstructions() { 3393 uint64_t Iterations = 0; 3394 // Figure out where touchedinstructions starts 3395 int FirstInstr = TouchedInstructions.find_first(); 3396 // Nothing set, nothing to iterate, just return. 3397 if (FirstInstr == -1) 3398 return; 3399 const BasicBlock *LastBlock = getBlockForValue(InstrFromDFSNum(FirstInstr)); 3400 while (TouchedInstructions.any()) { 3401 ++Iterations; 3402 // Walk through all the instructions in all the blocks in RPO. 3403 // TODO: As we hit a new block, we should push and pop equalities into a 3404 // table lookupOperandLeader can use, to catch things PredicateInfo 3405 // might miss, like edge-only equivalences. 3406 for (unsigned InstrNum : TouchedInstructions.set_bits()) { 3407 3408 // This instruction was found to be dead. We don't bother looking 3409 // at it again. 3410 if (InstrNum == 0) { 3411 TouchedInstructions.reset(InstrNum); 3412 continue; 3413 } 3414 3415 Value *V = InstrFromDFSNum(InstrNum); 3416 const BasicBlock *CurrBlock = getBlockForValue(V); 3417 3418 // If we hit a new block, do reachability processing. 3419 if (CurrBlock != LastBlock) { 3420 LastBlock = CurrBlock; 3421 bool BlockReachable = ReachableBlocks.count(CurrBlock); 3422 const auto &CurrInstRange = BlockInstRange.lookup(CurrBlock); 3423 3424 // If it's not reachable, erase any touched instructions and move on. 3425 if (!BlockReachable) { 3426 TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second); 3427 LLVM_DEBUG(dbgs() << "Skipping instructions in block " 3428 << getBlockName(CurrBlock) 3429 << " because it is unreachable\n"); 3430 continue; 3431 } 3432 // Use the appropriate cache for "OpIsSafeForPHIOfOps". 3433 CacheIdx = RPOOrdering.lookup(DT->getNode(CurrBlock)) - 1; 3434 updateProcessedCount(CurrBlock); 3435 } 3436 // Reset after processing (because we may mark ourselves as touched when 3437 // we propagate equalities). 3438 TouchedInstructions.reset(InstrNum); 3439 3440 if (auto *MP = dyn_cast<MemoryPhi>(V)) { 3441 LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n"); 3442 valueNumberMemoryPhi(MP); 3443 } else if (auto *I = dyn_cast<Instruction>(V)) { 3444 valueNumberInstruction(I); 3445 } else { 3446 llvm_unreachable("Should have been a MemoryPhi or Instruction"); 3447 } 3448 updateProcessedCount(V); 3449 } 3450 } 3451 NumGVNMaxIterations = std::max(NumGVNMaxIterations.getValue(), Iterations); 3452 } 3453 3454 // This is the main transformation entry point. 3455 bool NewGVN::runGVN() { 3456 if (DebugCounter::isCounterSet(VNCounter)) 3457 StartingVNCounter = DebugCounter::getCounterState(VNCounter); 3458 bool Changed = false; 3459 NumFuncArgs = F.arg_size(); 3460 MSSAWalker = MSSA->getWalker(); 3461 SingletonDeadExpression = new (ExpressionAllocator) DeadExpression(); 3462 3463 // Count number of instructions for sizing of hash tables, and come 3464 // up with a global dfs numbering for instructions. 3465 unsigned ICount = 1; 3466 // Add an empty instruction to account for the fact that we start at 1 3467 DFSToInstr.emplace_back(nullptr); 3468 // Note: We want ideal RPO traversal of the blocks, which is not quite the 3469 // same as dominator tree order, particularly with regard whether backedges 3470 // get visited first or second, given a block with multiple successors. 3471 // If we visit in the wrong order, we will end up performing N times as many 3472 // iterations. 3473 // The dominator tree does guarantee that, for a given dom tree node, it's 3474 // parent must occur before it in the RPO ordering. Thus, we only need to sort 3475 // the siblings. 3476 ReversePostOrderTraversal<Function *> RPOT(&F); 3477 unsigned Counter = 0; 3478 for (auto &B : RPOT) { 3479 auto *Node = DT->getNode(B); 3480 assert(Node && "RPO and Dominator tree should have same reachability"); 3481 RPOOrdering[Node] = ++Counter; 3482 } 3483 // Sort dominator tree children arrays into RPO. 3484 for (auto &B : RPOT) { 3485 auto *Node = DT->getNode(B); 3486 if (Node->getNumChildren() > 1) 3487 llvm::sort(*Node, [&](const DomTreeNode *A, const DomTreeNode *B) { 3488 return RPOOrdering[A] < RPOOrdering[B]; 3489 }); 3490 } 3491 3492 // Now a standard depth first ordering of the domtree is equivalent to RPO. 3493 for (auto *DTN : depth_first(DT->getRootNode())) { 3494 BasicBlock *B = DTN->getBlock(); 3495 const auto &BlockRange = assignDFSNumbers(B, ICount); 3496 BlockInstRange.insert({B, BlockRange}); 3497 ICount += BlockRange.second - BlockRange.first; 3498 } 3499 initializeCongruenceClasses(F); 3500 3501 TouchedInstructions.resize(ICount); 3502 // Ensure we don't end up resizing the expressionToClass map, as 3503 // that can be quite expensive. At most, we have one expression per 3504 // instruction. 3505 ExpressionToClass.reserve(ICount); 3506 3507 // Initialize the touched instructions to include the entry block. 3508 const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock()); 3509 TouchedInstructions.set(InstRange.first, InstRange.second); 3510 LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock()) 3511 << " marked reachable\n"); 3512 ReachableBlocks.insert(&F.getEntryBlock()); 3513 // Use index corresponding to entry block. 3514 CacheIdx = 0; 3515 3516 iterateTouchedInstructions(); 3517 verifyMemoryCongruency(); 3518 verifyIterationSettled(F); 3519 verifyStoreExpressions(); 3520 3521 Changed |= eliminateInstructions(F); 3522 3523 // Delete all instructions marked for deletion. 3524 for (Instruction *ToErase : InstructionsToErase) { 3525 if (!ToErase->use_empty()) 3526 ToErase->replaceAllUsesWith(PoisonValue::get(ToErase->getType())); 3527 3528 assert(ToErase->getParent() && 3529 "BB containing ToErase deleted unexpectedly!"); 3530 ToErase->eraseFromParent(); 3531 } 3532 Changed |= !InstructionsToErase.empty(); 3533 3534 // Delete all unreachable blocks. 3535 auto UnreachableBlockPred = [&](const BasicBlock &BB) { 3536 return !ReachableBlocks.count(&BB); 3537 }; 3538 3539 for (auto &BB : make_filter_range(F, UnreachableBlockPred)) { 3540 LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB) 3541 << " is unreachable\n"); 3542 deleteInstructionsInBlock(&BB); 3543 Changed = true; 3544 } 3545 3546 cleanupTables(); 3547 return Changed; 3548 } 3549 3550 struct NewGVN::ValueDFS { 3551 int DFSIn = 0; 3552 int DFSOut = 0; 3553 int LocalNum = 0; 3554 3555 // Only one of Def and U will be set. 3556 // The bool in the Def tells us whether the Def is the stored value of a 3557 // store. 3558 PointerIntPair<Value *, 1, bool> Def; 3559 Use *U = nullptr; 3560 3561 bool operator<(const ValueDFS &Other) const { 3562 // It's not enough that any given field be less than - we have sets 3563 // of fields that need to be evaluated together to give a proper ordering. 3564 // For example, if you have; 3565 // DFS (1, 3) 3566 // Val 0 3567 // DFS (1, 2) 3568 // Val 50 3569 // We want the second to be less than the first, but if we just go field 3570 // by field, we will get to Val 0 < Val 50 and say the first is less than 3571 // the second. We only want it to be less than if the DFS orders are equal. 3572 // 3573 // Each LLVM instruction only produces one value, and thus the lowest-level 3574 // differentiator that really matters for the stack (and what we use as a 3575 // replacement) is the local dfs number. 3576 // Everything else in the structure is instruction level, and only affects 3577 // the order in which we will replace operands of a given instruction. 3578 // 3579 // For a given instruction (IE things with equal dfsin, dfsout, localnum), 3580 // the order of replacement of uses does not matter. 3581 // IE given, 3582 // a = 5 3583 // b = a + a 3584 // When you hit b, you will have two valuedfs with the same dfsin, out, and 3585 // localnum. 3586 // The .val will be the same as well. 3587 // The .u's will be different. 3588 // You will replace both, and it does not matter what order you replace them 3589 // in (IE whether you replace operand 2, then operand 1, or operand 1, then 3590 // operand 2). 3591 // Similarly for the case of same dfsin, dfsout, localnum, but different 3592 // .val's 3593 // a = 5 3594 // b = 6 3595 // c = a + b 3596 // in c, we will a valuedfs for a, and one for b,with everything the same 3597 // but .val and .u. 3598 // It does not matter what order we replace these operands in. 3599 // You will always end up with the same IR, and this is guaranteed. 3600 return std::tie(DFSIn, DFSOut, LocalNum, Def, U) < 3601 std::tie(Other.DFSIn, Other.DFSOut, Other.LocalNum, Other.Def, 3602 Other.U); 3603 } 3604 }; 3605 3606 // This function converts the set of members for a congruence class from values, 3607 // to sets of defs and uses with associated DFS info. The total number of 3608 // reachable uses for each value is stored in UseCount, and instructions that 3609 // seem 3610 // dead (have no non-dead uses) are stored in ProbablyDead. 3611 void NewGVN::convertClassToDFSOrdered( 3612 const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet, 3613 DenseMap<const Value *, unsigned int> &UseCounts, 3614 SmallPtrSetImpl<Instruction *> &ProbablyDead) const { 3615 for (auto *D : Dense) { 3616 // First add the value. 3617 BasicBlock *BB = getBlockForValue(D); 3618 // Constants are handled prior to ever calling this function, so 3619 // we should only be left with instructions as members. 3620 assert(BB && "Should have figured out a basic block for value"); 3621 ValueDFS VDDef; 3622 DomTreeNode *DomNode = DT->getNode(BB); 3623 VDDef.DFSIn = DomNode->getDFSNumIn(); 3624 VDDef.DFSOut = DomNode->getDFSNumOut(); 3625 // If it's a store, use the leader of the value operand, if it's always 3626 // available, or the value operand. TODO: We could do dominance checks to 3627 // find a dominating leader, but not worth it ATM. 3628 if (auto *SI = dyn_cast<StoreInst>(D)) { 3629 auto Leader = lookupOperandLeader(SI->getValueOperand()); 3630 if (alwaysAvailable(Leader)) { 3631 VDDef.Def.setPointer(Leader); 3632 } else { 3633 VDDef.Def.setPointer(SI->getValueOperand()); 3634 VDDef.Def.setInt(true); 3635 } 3636 } else { 3637 VDDef.Def.setPointer(D); 3638 } 3639 assert(isa<Instruction>(D) && 3640 "The dense set member should always be an instruction"); 3641 Instruction *Def = cast<Instruction>(D); 3642 VDDef.LocalNum = InstrToDFSNum(D); 3643 DFSOrderedSet.push_back(VDDef); 3644 // If there is a phi node equivalent, add it 3645 if (auto *PN = RealToTemp.lookup(Def)) { 3646 auto *PHIE = 3647 dyn_cast_or_null<PHIExpression>(ValueToExpression.lookup(Def)); 3648 if (PHIE) { 3649 VDDef.Def.setInt(false); 3650 VDDef.Def.setPointer(PN); 3651 VDDef.LocalNum = 0; 3652 DFSOrderedSet.push_back(VDDef); 3653 } 3654 } 3655 3656 unsigned int UseCount = 0; 3657 // Now add the uses. 3658 for (auto &U : Def->uses()) { 3659 if (auto *I = dyn_cast<Instruction>(U.getUser())) { 3660 // Don't try to replace into dead uses 3661 if (InstructionsToErase.count(I)) 3662 continue; 3663 ValueDFS VDUse; 3664 // Put the phi node uses in the incoming block. 3665 BasicBlock *IBlock; 3666 if (auto *P = dyn_cast<PHINode>(I)) { 3667 IBlock = P->getIncomingBlock(U); 3668 // Make phi node users appear last in the incoming block 3669 // they are from. 3670 VDUse.LocalNum = InstrDFS.size() + 1; 3671 } else { 3672 IBlock = getBlockForValue(I); 3673 VDUse.LocalNum = InstrToDFSNum(I); 3674 } 3675 3676 // Skip uses in unreachable blocks, as we're going 3677 // to delete them. 3678 if (!ReachableBlocks.contains(IBlock)) 3679 continue; 3680 3681 DomTreeNode *DomNode = DT->getNode(IBlock); 3682 VDUse.DFSIn = DomNode->getDFSNumIn(); 3683 VDUse.DFSOut = DomNode->getDFSNumOut(); 3684 VDUse.U = &U; 3685 ++UseCount; 3686 DFSOrderedSet.emplace_back(VDUse); 3687 } 3688 } 3689 3690 // If there are no uses, it's probably dead (but it may have side-effects, 3691 // so not definitely dead. Otherwise, store the number of uses so we can 3692 // track if it becomes dead later). 3693 if (UseCount == 0) 3694 ProbablyDead.insert(Def); 3695 else 3696 UseCounts[Def] = UseCount; 3697 } 3698 } 3699 3700 // This function converts the set of members for a congruence class from values, 3701 // to the set of defs for loads and stores, with associated DFS info. 3702 void NewGVN::convertClassToLoadsAndStores( 3703 const CongruenceClass &Dense, 3704 SmallVectorImpl<ValueDFS> &LoadsAndStores) const { 3705 for (auto *D : Dense) { 3706 if (!isa<LoadInst>(D) && !isa<StoreInst>(D)) 3707 continue; 3708 3709 BasicBlock *BB = getBlockForValue(D); 3710 ValueDFS VD; 3711 DomTreeNode *DomNode = DT->getNode(BB); 3712 VD.DFSIn = DomNode->getDFSNumIn(); 3713 VD.DFSOut = DomNode->getDFSNumOut(); 3714 VD.Def.setPointer(D); 3715 3716 // If it's an instruction, use the real local dfs number. 3717 if (auto *I = dyn_cast<Instruction>(D)) 3718 VD.LocalNum = InstrToDFSNum(I); 3719 else 3720 llvm_unreachable("Should have been an instruction"); 3721 3722 LoadsAndStores.emplace_back(VD); 3723 } 3724 } 3725 3726 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) { 3727 patchReplacementInstruction(I, Repl); 3728 I->replaceAllUsesWith(Repl); 3729 } 3730 3731 void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) { 3732 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB); 3733 ++NumGVNBlocksDeleted; 3734 3735 // Delete the instructions backwards, as it has a reduced likelihood of having 3736 // to update as many def-use and use-def chains. Start after the terminator. 3737 auto StartPoint = BB->rbegin(); 3738 ++StartPoint; 3739 // Note that we explicitly recalculate BB->rend() on each iteration, 3740 // as it may change when we remove the first instruction. 3741 for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) { 3742 Instruction &Inst = *I++; 3743 if (!Inst.use_empty()) 3744 Inst.replaceAllUsesWith(PoisonValue::get(Inst.getType())); 3745 if (isa<LandingPadInst>(Inst)) 3746 continue; 3747 salvageKnowledge(&Inst, AC); 3748 3749 Inst.eraseFromParent(); 3750 ++NumGVNInstrDeleted; 3751 } 3752 // Now insert something that simplifycfg will turn into an unreachable. 3753 Type *Int8Ty = Type::getInt8Ty(BB->getContext()); 3754 new StoreInst( 3755 PoisonValue::get(Int8Ty), 3756 Constant::getNullValue(PointerType::getUnqual(BB->getContext())), 3757 BB->getTerminator()->getIterator()); 3758 } 3759 3760 void NewGVN::markInstructionForDeletion(Instruction *I) { 3761 LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n"); 3762 InstructionsToErase.insert(I); 3763 } 3764 3765 void NewGVN::replaceInstruction(Instruction *I, Value *V) { 3766 LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n"); 3767 patchAndReplaceAllUsesWith(I, V); 3768 // We save the actual erasing to avoid invalidating memory 3769 // dependencies until we are done with everything. 3770 markInstructionForDeletion(I); 3771 } 3772 3773 namespace { 3774 3775 // This is a stack that contains both the value and dfs info of where 3776 // that value is valid. 3777 class ValueDFSStack { 3778 public: 3779 Value *back() const { return ValueStack.back(); } 3780 std::pair<int, int> dfs_back() const { return DFSStack.back(); } 3781 3782 void push_back(Value *V, int DFSIn, int DFSOut) { 3783 ValueStack.emplace_back(V); 3784 DFSStack.emplace_back(DFSIn, DFSOut); 3785 } 3786 3787 bool empty() const { return DFSStack.empty(); } 3788 3789 bool isInScope(int DFSIn, int DFSOut) const { 3790 if (empty()) 3791 return false; 3792 return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second; 3793 } 3794 3795 void popUntilDFSScope(int DFSIn, int DFSOut) { 3796 3797 // These two should always be in sync at this point. 3798 assert(ValueStack.size() == DFSStack.size() && 3799 "Mismatch between ValueStack and DFSStack"); 3800 while ( 3801 !DFSStack.empty() && 3802 !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) { 3803 DFSStack.pop_back(); 3804 ValueStack.pop_back(); 3805 } 3806 } 3807 3808 private: 3809 SmallVector<Value *, 8> ValueStack; 3810 SmallVector<std::pair<int, int>, 8> DFSStack; 3811 }; 3812 3813 } // end anonymous namespace 3814 3815 // Given an expression, get the congruence class for it. 3816 CongruenceClass *NewGVN::getClassForExpression(const Expression *E) const { 3817 if (auto *VE = dyn_cast<VariableExpression>(E)) 3818 return ValueToClass.lookup(VE->getVariableValue()); 3819 else if (isa<DeadExpression>(E)) 3820 return TOPClass; 3821 return ExpressionToClass.lookup(E); 3822 } 3823 3824 // Given a value and a basic block we are trying to see if it is available in, 3825 // see if the value has a leader available in that block. 3826 Value *NewGVN::findPHIOfOpsLeader(const Expression *E, 3827 const Instruction *OrigInst, 3828 const BasicBlock *BB) const { 3829 // It would already be constant if we could make it constant 3830 if (auto *CE = dyn_cast<ConstantExpression>(E)) 3831 return CE->getConstantValue(); 3832 if (auto *VE = dyn_cast<VariableExpression>(E)) { 3833 auto *V = VE->getVariableValue(); 3834 if (alwaysAvailable(V) || DT->dominates(getBlockForValue(V), BB)) 3835 return VE->getVariableValue(); 3836 } 3837 3838 auto *CC = getClassForExpression(E); 3839 if (!CC) 3840 return nullptr; 3841 if (alwaysAvailable(CC->getLeader())) 3842 return CC->getLeader(); 3843 3844 for (auto *Member : *CC) { 3845 auto *MemberInst = dyn_cast<Instruction>(Member); 3846 if (MemberInst == OrigInst) 3847 continue; 3848 // Anything that isn't an instruction is always available. 3849 if (!MemberInst) 3850 return Member; 3851 if (DT->dominates(getBlockForValue(MemberInst), BB)) 3852 return Member; 3853 } 3854 return nullptr; 3855 } 3856 3857 // Return true iff V1 can be replaced with V2. 3858 static bool canBeReplacedBy(Value *V1, Value *V2) { 3859 if (auto *CB1 = dyn_cast<CallBase>(V1)) 3860 if (auto *CB2 = dyn_cast<CallBase>(V2)) 3861 return CB1->getAttributes() 3862 .intersectWith(CB2->getContext(), CB2->getAttributes()) 3863 .has_value(); 3864 return true; 3865 } 3866 3867 bool NewGVN::eliminateInstructions(Function &F) { 3868 // This is a non-standard eliminator. The normal way to eliminate is 3869 // to walk the dominator tree in order, keeping track of available 3870 // values, and eliminating them. However, this is mildly 3871 // pointless. It requires doing lookups on every instruction, 3872 // regardless of whether we will ever eliminate it. For 3873 // instructions part of most singleton congruence classes, we know we 3874 // will never eliminate them. 3875 3876 // Instead, this eliminator looks at the congruence classes directly, sorts 3877 // them into a DFS ordering of the dominator tree, and then we just 3878 // perform elimination straight on the sets by walking the congruence 3879 // class member uses in order, and eliminate the ones dominated by the 3880 // last member. This is worst case O(E log E) where E = number of 3881 // instructions in a single congruence class. In theory, this is all 3882 // instructions. In practice, it is much faster, as most instructions are 3883 // either in singleton congruence classes or can't possibly be eliminated 3884 // anyway (if there are no overlapping DFS ranges in class). 3885 // When we find something not dominated, it becomes the new leader 3886 // for elimination purposes. 3887 // TODO: If we wanted to be faster, We could remove any members with no 3888 // overlapping ranges while sorting, as we will never eliminate anything 3889 // with those members, as they don't dominate anything else in our set. 3890 3891 bool AnythingReplaced = false; 3892 3893 // Since we are going to walk the domtree anyway, and we can't guarantee the 3894 // DFS numbers are updated, we compute some ourselves. 3895 DT->updateDFSNumbers(); 3896 3897 // Go through all of our phi nodes, and kill the arguments associated with 3898 // unreachable edges. 3899 auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) { 3900 for (auto &Operand : PHI->incoming_values()) 3901 if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) { 3902 LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI 3903 << " for block " 3904 << getBlockName(PHI->getIncomingBlock(Operand)) 3905 << " with poison due to it being unreachable\n"); 3906 Operand.set(PoisonValue::get(PHI->getType())); 3907 } 3908 }; 3909 // Replace unreachable phi arguments. 3910 // At this point, RevisitOnReachabilityChange only contains: 3911 // 3912 // 1. PHIs 3913 // 2. Temporaries that will convert to PHIs 3914 // 3. Operations that are affected by an unreachable edge but do not fit into 3915 // 1 or 2 (rare). 3916 // So it is a slight overshoot of what we want. We could make it exact by 3917 // using two SparseBitVectors per block. 3918 DenseMap<const BasicBlock *, unsigned> ReachablePredCount; 3919 for (auto &KV : ReachableEdges) 3920 ReachablePredCount[KV.getEnd()]++; 3921 for (auto &BBPair : RevisitOnReachabilityChange) { 3922 for (auto InstNum : BBPair.second) { 3923 auto *Inst = InstrFromDFSNum(InstNum); 3924 auto *PHI = dyn_cast<PHINode>(Inst); 3925 PHI = PHI ? PHI : dyn_cast_or_null<PHINode>(RealToTemp.lookup(Inst)); 3926 if (!PHI) 3927 continue; 3928 auto *BB = BBPair.first; 3929 if (ReachablePredCount.lookup(BB) != PHI->getNumIncomingValues()) 3930 ReplaceUnreachablePHIArgs(PHI, BB); 3931 } 3932 } 3933 3934 // Map to store the use counts 3935 DenseMap<const Value *, unsigned int> UseCounts; 3936 for (auto *CC : reverse(CongruenceClasses)) { 3937 LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID() 3938 << "\n"); 3939 // Track the equivalent store info so we can decide whether to try 3940 // dead store elimination. 3941 SmallVector<ValueDFS, 8> PossibleDeadStores; 3942 SmallPtrSet<Instruction *, 8> ProbablyDead; 3943 if (CC->isDead() || CC->empty()) 3944 continue; 3945 // Everything still in the TOP class is unreachable or dead. 3946 if (CC == TOPClass) { 3947 for (auto *M : *CC) { 3948 auto *VTE = ValueToExpression.lookup(M); 3949 if (VTE && isa<DeadExpression>(VTE)) 3950 markInstructionForDeletion(cast<Instruction>(M)); 3951 assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) || 3952 InstructionsToErase.count(cast<Instruction>(M))) && 3953 "Everything in TOP should be unreachable or dead at this " 3954 "point"); 3955 } 3956 continue; 3957 } 3958 3959 assert(CC->getLeader() && "We should have had a leader"); 3960 // If this is a leader that is always available, and it's a 3961 // constant or has no equivalences, just replace everything with 3962 // it. We then update the congruence class with whatever members 3963 // are left. 3964 Value *Leader = 3965 CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader(); 3966 if (alwaysAvailable(Leader)) { 3967 CongruenceClass::MemberSet MembersLeft; 3968 for (auto *M : *CC) { 3969 Value *Member = M; 3970 // Void things have no uses we can replace. 3971 if (Member == Leader || !isa<Instruction>(Member) || 3972 Member->getType()->isVoidTy()) { 3973 MembersLeft.insert(Member); 3974 continue; 3975 } 3976 if (!canBeReplacedBy(Member, Leader)) 3977 continue; 3978 3979 LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for " 3980 << *Member << "\n"); 3981 auto *I = cast<Instruction>(Member); 3982 assert(Leader != I && "About to accidentally remove our leader"); 3983 replaceInstruction(I, Leader); 3984 AnythingReplaced = true; 3985 } 3986 CC->swap(MembersLeft); 3987 } else { 3988 // If this is a singleton, we can skip it. 3989 if (CC->size() != 1 || RealToTemp.count(Leader)) { 3990 // This is a stack because equality replacement/etc may place 3991 // constants in the middle of the member list, and we want to use 3992 // those constant values in preference to the current leader, over 3993 // the scope of those constants. 3994 ValueDFSStack EliminationStack; 3995 3996 // Convert the members to DFS ordered sets and then merge them. 3997 SmallVector<ValueDFS, 8> DFSOrderedSet; 3998 convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead); 3999 4000 // Sort the whole thing. 4001 llvm::sort(DFSOrderedSet); 4002 for (auto &VD : DFSOrderedSet) { 4003 int MemberDFSIn = VD.DFSIn; 4004 int MemberDFSOut = VD.DFSOut; 4005 Value *Def = VD.Def.getPointer(); 4006 bool FromStore = VD.Def.getInt(); 4007 Use *U = VD.U; 4008 // We ignore void things because we can't get a value from them. 4009 if (Def && Def->getType()->isVoidTy()) 4010 continue; 4011 auto *DefInst = dyn_cast_or_null<Instruction>(Def); 4012 if (DefInst && AllTempInstructions.count(DefInst)) { 4013 auto *PN = cast<PHINode>(DefInst); 4014 4015 // If this is a value phi and that's the expression we used, insert 4016 // it into the program 4017 // remove from temp instruction list. 4018 AllTempInstructions.erase(PN); 4019 auto *DefBlock = getBlockForValue(Def); 4020 LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def 4021 << " into block " 4022 << getBlockName(getBlockForValue(Def)) << "\n"); 4023 PN->insertBefore(&DefBlock->front()); 4024 Def = PN; 4025 NumGVNPHIOfOpsEliminations++; 4026 } 4027 4028 if (EliminationStack.empty()) { 4029 LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n"); 4030 } else { 4031 LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are (" 4032 << EliminationStack.dfs_back().first << "," 4033 << EliminationStack.dfs_back().second << ")\n"); 4034 } 4035 4036 LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << "," 4037 << MemberDFSOut << ")\n"); 4038 // First, we see if we are out of scope or empty. If so, 4039 // and there equivalences, we try to replace the top of 4040 // stack with equivalences (if it's on the stack, it must 4041 // not have been eliminated yet). 4042 // Then we synchronize to our current scope, by 4043 // popping until we are back within a DFS scope that 4044 // dominates the current member. 4045 // Then, what happens depends on a few factors 4046 // If the stack is now empty, we need to push 4047 // If we have a constant or a local equivalence we want to 4048 // start using, we also push. 4049 // Otherwise, we walk along, processing members who are 4050 // dominated by this scope, and eliminate them. 4051 bool ShouldPush = Def && EliminationStack.empty(); 4052 bool OutOfScope = 4053 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut); 4054 4055 if (OutOfScope || ShouldPush) { 4056 // Sync to our current scope. 4057 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut); 4058 bool ShouldPush = Def && EliminationStack.empty(); 4059 if (ShouldPush) { 4060 EliminationStack.push_back(Def, MemberDFSIn, MemberDFSOut); 4061 } 4062 } 4063 4064 // Skip the Def's, we only want to eliminate on their uses. But mark 4065 // dominated defs as dead. 4066 if (Def) { 4067 // For anything in this case, what and how we value number 4068 // guarantees that any side-effects that would have occurred (ie 4069 // throwing, etc) can be proven to either still occur (because it's 4070 // dominated by something that has the same side-effects), or never 4071 // occur. Otherwise, we would not have been able to prove it value 4072 // equivalent to something else. For these things, we can just mark 4073 // it all dead. Note that this is different from the "ProbablyDead" 4074 // set, which may not be dominated by anything, and thus, are only 4075 // easy to prove dead if they are also side-effect free. Note that 4076 // because stores are put in terms of the stored value, we skip 4077 // stored values here. If the stored value is really dead, it will 4078 // still be marked for deletion when we process it in its own class. 4079 auto *DefI = dyn_cast<Instruction>(Def); 4080 if (!EliminationStack.empty() && DefI && !FromStore) { 4081 Value *DominatingLeader = EliminationStack.back(); 4082 if (DominatingLeader != Def) { 4083 // Even if the instruction is removed, we still need to update 4084 // flags/metadata due to downstreams users of the leader. 4085 if (!match(DefI, m_Intrinsic<Intrinsic::ssa_copy>())) { 4086 if (!canBeReplacedBy(DefI, DominatingLeader)) 4087 continue; 4088 patchReplacementInstruction(DefI, DominatingLeader); 4089 } 4090 4091 markInstructionForDeletion(DefI); 4092 } 4093 } 4094 continue; 4095 } 4096 // At this point, we know it is a Use we are trying to possibly 4097 // replace. 4098 4099 assert(isa<Instruction>(U->get()) && 4100 "Current def should have been an instruction"); 4101 assert(isa<Instruction>(U->getUser()) && 4102 "Current user should have been an instruction"); 4103 4104 // If the thing we are replacing into is already marked to be dead, 4105 // this use is dead. Note that this is true regardless of whether 4106 // we have anything dominating the use or not. We do this here 4107 // because we are already walking all the uses anyway. 4108 Instruction *InstUse = cast<Instruction>(U->getUser()); 4109 if (InstructionsToErase.count(InstUse)) { 4110 auto &UseCount = UseCounts[U->get()]; 4111 if (--UseCount == 0) { 4112 ProbablyDead.insert(cast<Instruction>(U->get())); 4113 } 4114 } 4115 4116 // If we get to this point, and the stack is empty we must have a use 4117 // with nothing we can use to eliminate this use, so just skip it. 4118 if (EliminationStack.empty()) 4119 continue; 4120 4121 Value *DominatingLeader = EliminationStack.back(); 4122 4123 auto *II = dyn_cast<IntrinsicInst>(DominatingLeader); 4124 bool isSSACopy = II && II->getIntrinsicID() == Intrinsic::ssa_copy; 4125 if (isSSACopy) 4126 DominatingLeader = II->getOperand(0); 4127 4128 // Don't replace our existing users with ourselves. 4129 if (U->get() == DominatingLeader) 4130 continue; 4131 4132 // If we replaced something in an instruction, handle the patching of 4133 // metadata. Skip this if we are replacing predicateinfo with its 4134 // original operand, as we already know we can just drop it. 4135 auto *ReplacedInst = cast<Instruction>(U->get()); 4136 auto *PI = PredInfo->getPredicateInfoFor(ReplacedInst); 4137 if (!PI || DominatingLeader != PI->OriginalOp) { 4138 if (!canBeReplacedBy(ReplacedInst, DominatingLeader)) 4139 continue; 4140 patchReplacementInstruction(ReplacedInst, DominatingLeader); 4141 } 4142 4143 LLVM_DEBUG(dbgs() 4144 << "Found replacement " << *DominatingLeader << " for " 4145 << *U->get() << " in " << *(U->getUser()) << "\n"); 4146 U->set(DominatingLeader); 4147 // This is now a use of the dominating leader, which means if the 4148 // dominating leader was dead, it's now live! 4149 auto &LeaderUseCount = UseCounts[DominatingLeader]; 4150 // It's about to be alive again. 4151 if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader)) 4152 ProbablyDead.erase(cast<Instruction>(DominatingLeader)); 4153 // For copy instructions, we use their operand as a leader, 4154 // which means we remove a user of the copy and it may become dead. 4155 if (isSSACopy) { 4156 auto It = UseCounts.find(II); 4157 if (It != UseCounts.end()) { 4158 unsigned &IIUseCount = It->second; 4159 if (--IIUseCount == 0) 4160 ProbablyDead.insert(II); 4161 } 4162 } 4163 ++LeaderUseCount; 4164 AnythingReplaced = true; 4165 } 4166 } 4167 } 4168 4169 // At this point, anything still in the ProbablyDead set is actually dead if 4170 // would be trivially dead. 4171 for (auto *I : ProbablyDead) 4172 if (wouldInstructionBeTriviallyDead(I)) 4173 markInstructionForDeletion(I); 4174 4175 // Cleanup the congruence class. 4176 CongruenceClass::MemberSet MembersLeft; 4177 for (auto *Member : *CC) 4178 if (!isa<Instruction>(Member) || 4179 !InstructionsToErase.count(cast<Instruction>(Member))) 4180 MembersLeft.insert(Member); 4181 CC->swap(MembersLeft); 4182 4183 // If we have possible dead stores to look at, try to eliminate them. 4184 if (CC->getStoreCount() > 0) { 4185 convertClassToLoadsAndStores(*CC, PossibleDeadStores); 4186 llvm::sort(PossibleDeadStores); 4187 ValueDFSStack EliminationStack; 4188 for (auto &VD : PossibleDeadStores) { 4189 int MemberDFSIn = VD.DFSIn; 4190 int MemberDFSOut = VD.DFSOut; 4191 Instruction *Member = cast<Instruction>(VD.Def.getPointer()); 4192 if (EliminationStack.empty() || 4193 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut)) { 4194 // Sync to our current scope. 4195 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut); 4196 if (EliminationStack.empty()) { 4197 EliminationStack.push_back(Member, MemberDFSIn, MemberDFSOut); 4198 continue; 4199 } 4200 } 4201 // We already did load elimination, so nothing to do here. 4202 if (isa<LoadInst>(Member)) 4203 continue; 4204 assert(!EliminationStack.empty()); 4205 Instruction *Leader = cast<Instruction>(EliminationStack.back()); 4206 (void)Leader; 4207 assert(DT->dominates(Leader->getParent(), Member->getParent())); 4208 // Member is dominater by Leader, and thus dead 4209 LLVM_DEBUG(dbgs() << "Marking dead store " << *Member 4210 << " that is dominated by " << *Leader << "\n"); 4211 markInstructionForDeletion(Member); 4212 CC->erase(Member); 4213 ++NumGVNDeadStores; 4214 } 4215 } 4216 } 4217 return AnythingReplaced; 4218 } 4219 4220 // This function provides global ranking of operations so that we can place them 4221 // in a canonical order. Note that rank alone is not necessarily enough for a 4222 // complete ordering, as constants all have the same rank. However, generally, 4223 // we will simplify an operation with all constants so that it doesn't matter 4224 // what order they appear in. 4225 unsigned int NewGVN::getRank(const Value *V) const { 4226 // Prefer constants to undef to anything else 4227 // Undef is a constant, have to check it first. 4228 // Prefer poison to undef as it's less defined. 4229 // Prefer smaller constants to constantexprs 4230 // Note that the order here matters because of class inheritance 4231 if (isa<ConstantExpr>(V)) 4232 return 3; 4233 if (isa<PoisonValue>(V)) 4234 return 1; 4235 if (isa<UndefValue>(V)) 4236 return 2; 4237 if (isa<Constant>(V)) 4238 return 0; 4239 if (auto *A = dyn_cast<Argument>(V)) 4240 return 4 + A->getArgNo(); 4241 4242 // Need to shift the instruction DFS by number of arguments + 5 to account for 4243 // the constant and argument ranking above. 4244 unsigned Result = InstrToDFSNum(V); 4245 if (Result > 0) 4246 return 5 + NumFuncArgs + Result; 4247 // Unreachable or something else, just return a really large number. 4248 return ~0; 4249 } 4250 4251 // This is a function that says whether two commutative operations should 4252 // have their order swapped when canonicalizing. 4253 bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const { 4254 // Because we only care about a total ordering, and don't rewrite expressions 4255 // in this order, we order by rank, which will give a strict weak ordering to 4256 // everything but constants, and then we order by pointer address. 4257 return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B); 4258 } 4259 4260 bool NewGVN::shouldSwapOperandsForIntrinsic(const Value *A, const Value *B, 4261 const IntrinsicInst *I) const { 4262 auto LookupResult = IntrinsicInstPred.find(I); 4263 if (shouldSwapOperands(A, B)) { 4264 if (LookupResult == IntrinsicInstPred.end()) 4265 IntrinsicInstPred.insert({I, B}); 4266 else 4267 LookupResult->second = B; 4268 return true; 4269 } 4270 4271 if (LookupResult != IntrinsicInstPred.end()) { 4272 auto *SeenPredicate = LookupResult->second; 4273 if (SeenPredicate) { 4274 if (SeenPredicate == B) 4275 return true; 4276 else 4277 LookupResult->second = nullptr; 4278 } 4279 } 4280 return false; 4281 } 4282 4283 PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) { 4284 // Apparently the order in which we get these results matter for 4285 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep 4286 // the same order here, just in case. 4287 auto &AC = AM.getResult<AssumptionAnalysis>(F); 4288 auto &DT = AM.getResult<DominatorTreeAnalysis>(F); 4289 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F); 4290 auto &AA = AM.getResult<AAManager>(F); 4291 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA(); 4292 bool Changed = 4293 NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getDataLayout()) 4294 .runGVN(); 4295 if (!Changed) 4296 return PreservedAnalyses::all(); 4297 PreservedAnalyses PA; 4298 PA.preserve<DominatorTreeAnalysis>(); 4299 return PA; 4300 } 4301