1 //===- GVNHoist.cpp - Hoist scalar and load expressions -------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This pass hoists expressions from branches to a common dominator. It uses 11 // GVN (global value numbering) to discover expressions computing the same 12 // values. The primary goal is to reduce the code size, and in some 13 // cases reduce critical path (by exposing more ILP). 14 // Hoisting may affect the performance in some cases. To mitigate that, hoisting 15 // is disabled in the following cases. 16 // 1. Scalars across calls. 17 // 2. geps when corresponding load/store cannot be hoisted. 18 //===----------------------------------------------------------------------===// 19 20 #include "llvm/ADT/DenseMap.h" 21 #include "llvm/ADT/SmallPtrSet.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/Analysis/ValueTracking.h" 24 #include "llvm/Transforms/Scalar.h" 25 #include "llvm/Transforms/Scalar/GVN.h" 26 #include "llvm/Transforms/Utils/Local.h" 27 #include "llvm/Transforms/Utils/MemorySSA.h" 28 29 using namespace llvm; 30 31 #define DEBUG_TYPE "gvn-hoist" 32 33 STATISTIC(NumHoisted, "Number of instructions hoisted"); 34 STATISTIC(NumRemoved, "Number of instructions removed"); 35 STATISTIC(NumLoadsHoisted, "Number of loads hoisted"); 36 STATISTIC(NumLoadsRemoved, "Number of loads removed"); 37 STATISTIC(NumStoresHoisted, "Number of stores hoisted"); 38 STATISTIC(NumStoresRemoved, "Number of stores removed"); 39 STATISTIC(NumCallsHoisted, "Number of calls hoisted"); 40 STATISTIC(NumCallsRemoved, "Number of calls removed"); 41 42 static cl::opt<int> 43 MaxHoistedThreshold("gvn-max-hoisted", cl::Hidden, cl::init(-1), 44 cl::desc("Max number of instructions to hoist " 45 "(default unlimited = -1)")); 46 static cl::opt<int> MaxNumberOfBBSInPath( 47 "gvn-hoist-max-bbs", cl::Hidden, cl::init(4), 48 cl::desc("Max number of basic blocks on the path between " 49 "hoisting locations (default = 4, unlimited = -1)")); 50 51 static cl::opt<int> MaxDepthInBB( 52 "gvn-hoist-max-depth", cl::Hidden, cl::init(100), 53 cl::desc("Hoist instructions from the beginning of the BB up to the " 54 "maximum specified depth (default = 100, unlimited = -1)")); 55 56 namespace { 57 58 // Provides a sorting function based on the execution order of two instructions. 59 struct SortByDFSIn { 60 private: 61 DenseMap<const Value *, unsigned> &DFSNumber; 62 63 public: 64 SortByDFSIn(DenseMap<const Value *, unsigned> &D) : DFSNumber(D) {} 65 66 // Returns true when A executes before B. 67 bool operator()(const Instruction *A, const Instruction *B) const { 68 // FIXME: libc++ has a std::sort() algorithm that will call the compare 69 // function on the same element. Once PR20837 is fixed and some more years 70 // pass by and all the buildbots have moved to a corrected std::sort(), 71 // enable the following assert: 72 // 73 // assert(A != B); 74 75 unsigned ADFS = DFSNumber.lookup(A); 76 unsigned BDFS = DFSNumber.lookup(B); 77 assert (ADFS && BDFS); 78 return ADFS < BDFS; 79 } 80 }; 81 82 // A map from a pair of VNs to all the instructions with those VNs. 83 typedef DenseMap<std::pair<unsigned, unsigned>, SmallVector<Instruction *, 4>> 84 VNtoInsns; 85 // An invalid value number Used when inserting a single value number into 86 // VNtoInsns. 87 enum : unsigned { InvalidVN = ~2U }; 88 89 // Records all scalar instructions candidate for code hoisting. 90 class InsnInfo { 91 VNtoInsns VNtoScalars; 92 93 public: 94 // Inserts I and its value number in VNtoScalars. 95 void insert(Instruction *I, GVN::ValueTable &VN) { 96 // Scalar instruction. 97 unsigned V = VN.lookupOrAdd(I); 98 VNtoScalars[{V, InvalidVN}].push_back(I); 99 } 100 101 const VNtoInsns &getVNTable() const { return VNtoScalars; } 102 }; 103 104 // Records all load instructions candidate for code hoisting. 105 class LoadInfo { 106 VNtoInsns VNtoLoads; 107 108 public: 109 // Insert Load and the value number of its memory address in VNtoLoads. 110 void insert(LoadInst *Load, GVN::ValueTable &VN) { 111 if (Load->isSimple()) { 112 unsigned V = VN.lookupOrAdd(Load->getPointerOperand()); 113 VNtoLoads[{V, InvalidVN}].push_back(Load); 114 } 115 } 116 117 const VNtoInsns &getVNTable() const { return VNtoLoads; } 118 }; 119 120 // Records all store instructions candidate for code hoisting. 121 class StoreInfo { 122 VNtoInsns VNtoStores; 123 124 public: 125 // Insert the Store and a hash number of the store address and the stored 126 // value in VNtoStores. 127 void insert(StoreInst *Store, GVN::ValueTable &VN) { 128 if (!Store->isSimple()) 129 return; 130 // Hash the store address and the stored value. 131 Value *Ptr = Store->getPointerOperand(); 132 Value *Val = Store->getValueOperand(); 133 VNtoStores[{VN.lookupOrAdd(Ptr), VN.lookupOrAdd(Val)}].push_back(Store); 134 } 135 136 const VNtoInsns &getVNTable() const { return VNtoStores; } 137 }; 138 139 // Records all call instructions candidate for code hoisting. 140 class CallInfo { 141 VNtoInsns VNtoCallsScalars; 142 VNtoInsns VNtoCallsLoads; 143 VNtoInsns VNtoCallsStores; 144 145 public: 146 // Insert Call and its value numbering in one of the VNtoCalls* containers. 147 void insert(CallInst *Call, GVN::ValueTable &VN) { 148 // A call that doesNotAccessMemory is handled as a Scalar, 149 // onlyReadsMemory will be handled as a Load instruction, 150 // all other calls will be handled as stores. 151 unsigned V = VN.lookupOrAdd(Call); 152 auto Entry = std::make_pair(V, InvalidVN); 153 154 if (Call->doesNotAccessMemory()) 155 VNtoCallsScalars[Entry].push_back(Call); 156 else if (Call->onlyReadsMemory()) 157 VNtoCallsLoads[Entry].push_back(Call); 158 else 159 VNtoCallsStores[Entry].push_back(Call); 160 } 161 162 const VNtoInsns &getScalarVNTable() const { return VNtoCallsScalars; } 163 164 const VNtoInsns &getLoadVNTable() const { return VNtoCallsLoads; } 165 166 const VNtoInsns &getStoreVNTable() const { return VNtoCallsStores; } 167 }; 168 169 typedef DenseMap<const BasicBlock *, bool> BBSideEffectsSet; 170 typedef SmallVector<Instruction *, 4> SmallVecInsn; 171 typedef SmallVectorImpl<Instruction *> SmallVecImplInsn; 172 173 static void combineKnownMetadata(Instruction *ReplInst, Instruction *I) { 174 static const unsigned KnownIDs[] = { 175 LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope, 176 LLVMContext::MD_noalias, LLVMContext::MD_range, 177 LLVMContext::MD_fpmath, LLVMContext::MD_invariant_load, 178 LLVMContext::MD_invariant_group}; 179 combineMetadata(ReplInst, I, KnownIDs); 180 } 181 182 // This pass hoists common computations across branches sharing common 183 // dominator. The primary goal is to reduce the code size, and in some 184 // cases reduce critical path (by exposing more ILP). 185 class GVNHoist { 186 public: 187 GVNHoist(DominatorTree *Dt, AliasAnalysis *Aa, MemoryDependenceResults *Md, 188 bool OptForMinSize) 189 : DT(Dt), AA(Aa), MD(Md), OptForMinSize(OptForMinSize), 190 HoistingGeps(OptForMinSize), HoistedCtr(0) {} 191 bool run(Function &F) { 192 VN.setDomTree(DT); 193 VN.setAliasAnalysis(AA); 194 VN.setMemDep(MD); 195 bool Res = false; 196 MemorySSA M(F, AA, DT); 197 MSSA = &M; 198 199 // FIXME: use lazy evaluation of VN to avoid the fix-point computation. 200 while (1) { 201 // Perform DFS Numbering of instructions. 202 unsigned I = 0; 203 for (const BasicBlock *BB : depth_first(&F.getEntryBlock())) 204 for (auto &Inst: *BB) 205 DFSNumber.insert({&Inst, ++I}); 206 207 auto HoistStat = hoistExpressions(F); 208 if (HoistStat.first + HoistStat.second == 0) 209 return Res; 210 211 if (HoistStat.second > 0) 212 // To address a limitation of the current GVN, we need to rerun the 213 // hoisting after we hoisted loads or stores in order to be able to 214 // hoist all scalars dependent on the hoisted ld/st. 215 VN.clear(); 216 217 Res = true; 218 219 // DFS numbers change when instructions are hoisted: clear and recompute. 220 DFSNumber.clear(); 221 } 222 223 return Res; 224 } 225 private: 226 GVN::ValueTable VN; 227 DominatorTree *DT; 228 AliasAnalysis *AA; 229 MemoryDependenceResults *MD; 230 const bool OptForMinSize; 231 const bool HoistingGeps; 232 DenseMap<const Value *, unsigned> DFSNumber; 233 BBSideEffectsSet BBSideEffects; 234 MemorySSA *MSSA; 235 int HoistedCtr; 236 237 enum InsKind { Unknown, Scalar, Load, Store }; 238 239 // Return true when there are exception handling in BB. 240 bool hasEH(const BasicBlock *BB) { 241 auto It = BBSideEffects.find(BB); 242 if (It != BBSideEffects.end()) 243 return It->second; 244 245 if (BB->isEHPad() || BB->hasAddressTaken()) { 246 BBSideEffects[BB] = true; 247 return true; 248 } 249 250 if (BB->getTerminator()->mayThrow()) { 251 BBSideEffects[BB] = true; 252 return true; 253 } 254 255 BBSideEffects[BB] = false; 256 return false; 257 } 258 259 // Return true when all paths from A to the end of the function pass through 260 // either B or C. 261 bool hoistingFromAllPaths(const BasicBlock *A, const BasicBlock *B, 262 const BasicBlock *C) { 263 // We fully copy the WL in order to be able to remove items from it. 264 SmallPtrSet<const BasicBlock *, 2> WL; 265 WL.insert(B); 266 WL.insert(C); 267 268 for (auto It = df_begin(A), E = df_end(A); It != E;) { 269 // There exists a path from A to the exit of the function if we are still 270 // iterating in DF traversal and we removed all instructions from the work 271 // list. 272 if (WL.empty()) 273 return false; 274 275 const BasicBlock *BB = *It; 276 if (WL.erase(BB)) { 277 // Stop DFS traversal when BB is in the work list. 278 It.skipChildren(); 279 continue; 280 } 281 282 // Check for end of function, calls that do not return, etc. 283 if (!isGuaranteedToTransferExecutionToSuccessor(BB->getTerminator())) 284 return false; 285 286 // Increment DFS traversal when not skipping children. 287 ++It; 288 } 289 290 return true; 291 } 292 293 /* Return true when I1 appears before I2 in the instructions of BB. */ 294 bool firstInBB(const Instruction *I1, const Instruction *I2) { 295 assert (I1->getParent() == I2->getParent()); 296 unsigned I1DFS = DFSNumber.lookup(I1); 297 unsigned I2DFS = DFSNumber.lookup(I2); 298 assert (I1DFS && I2DFS); 299 return I1DFS < I2DFS; 300 } 301 302 // Return true when there are users of Def in BB. 303 bool hasMemoryUseOnPath(MemoryAccess *Def, const BasicBlock *BB, 304 const Instruction *OldPt) { 305 const BasicBlock *DefBB = Def->getBlock(); 306 const BasicBlock *OldBB = OldPt->getParent(); 307 308 for (User *U : Def->users()) 309 if (auto *MU = dyn_cast<MemoryUse>(U)) { 310 // FIXME: MU->getBlock() does not get updated when we move the instruction. 311 BasicBlock *UBB = MU->getMemoryInst()->getParent(); 312 // Only analyze uses in BB. 313 if (BB != UBB) 314 continue; 315 316 // A use in the same block as the Def is on the path. 317 if (UBB == DefBB) { 318 assert(MSSA->locallyDominates(Def, MU) && "def not dominating use"); 319 return true; 320 } 321 322 if (UBB != OldBB) 323 return true; 324 325 // It is only harmful to hoist when the use is before OldPt. 326 if (firstInBB(MU->getMemoryInst(), OldPt)) 327 return true; 328 } 329 330 return false; 331 } 332 333 // Return true when there are exception handling or loads of memory Def 334 // between OldPt and NewPt. 335 336 // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and 337 // return true when the counter NBBsOnAllPaths reaces 0, except when it is 338 // initialized to -1 which is unlimited. 339 bool hasEHOrLoadsOnPath(const Instruction *NewPt, const Instruction *OldPt, 340 MemoryAccess *Def, int &NBBsOnAllPaths) { 341 const BasicBlock *NewBB = NewPt->getParent(); 342 const BasicBlock *OldBB = OldPt->getParent(); 343 assert(DT->dominates(NewBB, OldBB) && "invalid path"); 344 assert(DT->dominates(Def->getBlock(), NewBB) && 345 "def does not dominate new hoisting point"); 346 347 // Walk all basic blocks reachable in depth-first iteration on the inverse 348 // CFG from OldBB to NewBB. These blocks are all the blocks that may be 349 // executed between the execution of NewBB and OldBB. Hoisting an expression 350 // from OldBB into NewBB has to be safe on all execution paths. 351 for (auto I = idf_begin(OldBB), E = idf_end(OldBB); I != E;) { 352 if (*I == NewBB) { 353 // Stop traversal when reaching HoistPt. 354 I.skipChildren(); 355 continue; 356 } 357 358 // Impossible to hoist with exceptions on the path. 359 if (hasEH(*I)) 360 return true; 361 362 // Check that we do not move a store past loads. 363 if (hasMemoryUseOnPath(Def, *I, OldPt)) 364 return true; 365 366 // Stop walk once the limit is reached. 367 if (NBBsOnAllPaths == 0) 368 return true; 369 370 // -1 is unlimited number of blocks on all paths. 371 if (NBBsOnAllPaths != -1) 372 --NBBsOnAllPaths; 373 374 ++I; 375 } 376 377 return false; 378 } 379 380 // Return true when there are exception handling between HoistPt and BB. 381 // Decrement by 1 NBBsOnAllPaths for each block between HoistPt and BB, and 382 // return true when the counter NBBsOnAllPaths reaches 0, except when it is 383 // initialized to -1 which is unlimited. 384 bool hasEHOnPath(const BasicBlock *HoistPt, const BasicBlock *BB, 385 int &NBBsOnAllPaths) { 386 assert(DT->dominates(HoistPt, BB) && "Invalid path"); 387 388 // Walk all basic blocks reachable in depth-first iteration on 389 // the inverse CFG from BBInsn to NewHoistPt. These blocks are all the 390 // blocks that may be executed between the execution of NewHoistPt and 391 // BBInsn. Hoisting an expression from BBInsn into NewHoistPt has to be safe 392 // on all execution paths. 393 for (auto I = idf_begin(BB), E = idf_end(BB); I != E;) { 394 if (*I == HoistPt) { 395 // Stop traversal when reaching NewHoistPt. 396 I.skipChildren(); 397 continue; 398 } 399 400 // Impossible to hoist with exceptions on the path. 401 if (hasEH(*I)) 402 return true; 403 404 // Stop walk once the limit is reached. 405 if (NBBsOnAllPaths == 0) 406 return true; 407 408 // -1 is unlimited number of blocks on all paths. 409 if (NBBsOnAllPaths != -1) 410 --NBBsOnAllPaths; 411 412 ++I; 413 } 414 415 return false; 416 } 417 418 // Return true when it is safe to hoist a memory load or store U from OldPt 419 // to NewPt. 420 bool safeToHoistLdSt(const Instruction *NewPt, const Instruction *OldPt, 421 MemoryUseOrDef *U, InsKind K, int &NBBsOnAllPaths) { 422 423 // In place hoisting is safe. 424 if (NewPt == OldPt) 425 return true; 426 427 const BasicBlock *NewBB = NewPt->getParent(); 428 const BasicBlock *OldBB = OldPt->getParent(); 429 const BasicBlock *UBB = U->getBlock(); 430 431 // Check for dependences on the Memory SSA. 432 MemoryAccess *D = U->getDefiningAccess(); 433 BasicBlock *DBB = D->getBlock(); 434 if (DT->properlyDominates(NewBB, DBB)) 435 // Cannot move the load or store to NewBB above its definition in DBB. 436 return false; 437 438 if (NewBB == DBB && !MSSA->isLiveOnEntryDef(D)) 439 if (auto *UD = dyn_cast<MemoryUseOrDef>(D)) 440 if (firstInBB(NewPt, UD->getMemoryInst())) 441 // Cannot move the load or store to NewPt above its definition in D. 442 return false; 443 444 // Check for unsafe hoistings due to side effects. 445 if (K == InsKind::Store) { 446 if (hasEHOrLoadsOnPath(NewPt, OldPt, D, NBBsOnAllPaths)) 447 return false; 448 } else if (hasEHOnPath(NewBB, OldBB, NBBsOnAllPaths)) 449 return false; 450 451 if (UBB == NewBB) { 452 if (DT->properlyDominates(DBB, NewBB)) 453 return true; 454 assert(UBB == DBB); 455 assert(MSSA->locallyDominates(D, U)); 456 } 457 458 // No side effects: it is safe to hoist. 459 return true; 460 } 461 462 // Return true when it is safe to hoist scalar instructions from BB1 and BB2 463 // to HoistBB. 464 bool safeToHoistScalar(const BasicBlock *HoistBB, const BasicBlock *BB1, 465 const BasicBlock *BB2, int &NBBsOnAllPaths) { 466 // Check that the hoisted expression is needed on all paths. When HoistBB 467 // already contains an instruction to be hoisted, the expression is needed 468 // on all paths. Enable scalar hoisting at -Oz as it is safe to hoist 469 // scalars to a place where they are partially needed. 470 if (!OptForMinSize && BB1 != HoistBB && 471 !hoistingFromAllPaths(HoistBB, BB1, BB2)) 472 return false; 473 474 if (hasEHOnPath(HoistBB, BB1, NBBsOnAllPaths) || 475 hasEHOnPath(HoistBB, BB2, NBBsOnAllPaths)) 476 return false; 477 478 // Safe to hoist scalars from BB1 and BB2 to HoistBB. 479 return true; 480 } 481 482 // Each element of a hoisting list contains the basic block where to hoist and 483 // a list of instructions to be hoisted. 484 typedef std::pair<BasicBlock *, SmallVecInsn> HoistingPointInfo; 485 typedef SmallVector<HoistingPointInfo, 4> HoistingPointList; 486 487 // Partition InstructionsToHoist into a set of candidates which can share a 488 // common hoisting point. The partitions are collected in HPL. IsScalar is 489 // true when the instructions in InstructionsToHoist are scalars. IsLoad is 490 // true when the InstructionsToHoist are loads, false when they are stores. 491 void partitionCandidates(SmallVecImplInsn &InstructionsToHoist, 492 HoistingPointList &HPL, InsKind K) { 493 // No need to sort for two instructions. 494 if (InstructionsToHoist.size() > 2) { 495 SortByDFSIn Pred(DFSNumber); 496 std::sort(InstructionsToHoist.begin(), InstructionsToHoist.end(), Pred); 497 } 498 499 int NBBsOnAllPaths = MaxNumberOfBBSInPath; 500 501 SmallVecImplInsn::iterator II = InstructionsToHoist.begin(); 502 SmallVecImplInsn::iterator Start = II; 503 Instruction *HoistPt = *II; 504 BasicBlock *HoistBB = HoistPt->getParent(); 505 MemoryUseOrDef *UD; 506 if (K != InsKind::Scalar) 507 UD = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(HoistPt)); 508 509 for (++II; II != InstructionsToHoist.end(); ++II) { 510 Instruction *Insn = *II; 511 BasicBlock *BB = Insn->getParent(); 512 BasicBlock *NewHoistBB; 513 Instruction *NewHoistPt; 514 515 if (BB == HoistBB) { 516 NewHoistBB = HoistBB; 517 NewHoistPt = firstInBB(Insn, HoistPt) ? Insn : HoistPt; 518 } else { 519 NewHoistBB = DT->findNearestCommonDominator(HoistBB, BB); 520 if (NewHoistBB == BB) 521 NewHoistPt = Insn; 522 else if (NewHoistBB == HoistBB) 523 NewHoistPt = HoistPt; 524 else 525 NewHoistPt = NewHoistBB->getTerminator(); 526 } 527 528 if (K == InsKind::Scalar) { 529 if (safeToHoistScalar(NewHoistBB, HoistBB, BB, NBBsOnAllPaths)) { 530 // Extend HoistPt to NewHoistPt. 531 HoistPt = NewHoistPt; 532 HoistBB = NewHoistBB; 533 continue; 534 } 535 } else { 536 // When NewBB already contains an instruction to be hoisted, the 537 // expression is needed on all paths. 538 // Check that the hoisted expression is needed on all paths: it is 539 // unsafe to hoist loads to a place where there may be a path not 540 // loading from the same address: for instance there may be a branch on 541 // which the address of the load may not be initialized. 542 if ((HoistBB == NewHoistBB || BB == NewHoistBB || 543 hoistingFromAllPaths(NewHoistBB, HoistBB, BB)) && 544 // Also check that it is safe to move the load or store from HoistPt 545 // to NewHoistPt, and from Insn to NewHoistPt. 546 safeToHoistLdSt(NewHoistPt, HoistPt, UD, K, NBBsOnAllPaths) && 547 safeToHoistLdSt(NewHoistPt, Insn, 548 cast<MemoryUseOrDef>(MSSA->getMemoryAccess(Insn)), 549 K, NBBsOnAllPaths)) { 550 // Extend HoistPt to NewHoistPt. 551 HoistPt = NewHoistPt; 552 HoistBB = NewHoistBB; 553 continue; 554 } 555 } 556 557 // At this point it is not safe to extend the current hoisting to 558 // NewHoistPt: save the hoisting list so far. 559 if (std::distance(Start, II) > 1) 560 HPL.push_back({HoistBB, SmallVecInsn(Start, II)}); 561 562 // Start over from BB. 563 Start = II; 564 if (K != InsKind::Scalar) 565 UD = cast<MemoryUseOrDef>(MSSA->getMemoryAccess(*Start)); 566 HoistPt = Insn; 567 HoistBB = BB; 568 NBBsOnAllPaths = MaxNumberOfBBSInPath; 569 } 570 571 // Save the last partition. 572 if (std::distance(Start, II) > 1) 573 HPL.push_back({HoistBB, SmallVecInsn(Start, II)}); 574 } 575 576 // Initialize HPL from Map. 577 void computeInsertionPoints(const VNtoInsns &Map, HoistingPointList &HPL, 578 InsKind K) { 579 for (const auto &Entry : Map) { 580 if (MaxHoistedThreshold != -1 && ++HoistedCtr > MaxHoistedThreshold) 581 return; 582 583 const SmallVecInsn &V = Entry.second; 584 if (V.size() < 2) 585 continue; 586 587 // Compute the insertion point and the list of expressions to be hoisted. 588 SmallVecInsn InstructionsToHoist; 589 for (auto I : V) 590 if (!hasEH(I->getParent())) 591 InstructionsToHoist.push_back(I); 592 593 if (!InstructionsToHoist.empty()) 594 partitionCandidates(InstructionsToHoist, HPL, K); 595 } 596 } 597 598 // Return true when all operands of Instr are available at insertion point 599 // HoistPt. When limiting the number of hoisted expressions, one could hoist 600 // a load without hoisting its access function. So before hoisting any 601 // expression, make sure that all its operands are available at insert point. 602 bool allOperandsAvailable(const Instruction *I, 603 const BasicBlock *HoistPt) const { 604 for (const Use &Op : I->operands()) 605 if (const auto *Inst = dyn_cast<Instruction>(&Op)) 606 if (!DT->dominates(Inst->getParent(), HoistPt)) 607 return false; 608 609 return true; 610 } 611 612 // Same as allOperandsAvailable with recursive check for GEP operands. 613 bool allGepOperandsAvailable(const Instruction *I, 614 const BasicBlock *HoistPt) const { 615 for (const Use &Op : I->operands()) 616 if (const auto *Inst = dyn_cast<Instruction>(&Op)) 617 if (!DT->dominates(Inst->getParent(), HoistPt)) { 618 if (const GetElementPtrInst *GepOp = dyn_cast<GetElementPtrInst>(Inst)) { 619 if (!allGepOperandsAvailable(GepOp, HoistPt)) 620 return false; 621 // Gep is available if all operands of GepOp are available. 622 } else { 623 // Gep is not available if it has operands other than GEPs that are 624 // defined in blocks not dominating HoistPt. 625 return false; 626 } 627 } 628 return true; 629 } 630 631 // Make all operands of the GEP available. 632 void makeGepsAvailable(Instruction *Repl, BasicBlock *HoistPt, 633 const SmallVecInsn &InstructionsToHoist, 634 Instruction *Gep) const { 635 assert(allGepOperandsAvailable(Gep, HoistPt) && "GEP operands not available"); 636 637 Instruction *ClonedGep = Gep->clone(); 638 for (unsigned i = 0, e = Gep->getNumOperands(); i != e; ++i) 639 if (Instruction *Op = dyn_cast<Instruction>(Gep->getOperand(i))) { 640 641 // Check whether the operand is already available. 642 if (DT->dominates(Op->getParent(), HoistPt)) 643 continue; 644 645 // As a GEP can refer to other GEPs, recursively make all the operands 646 // of this GEP available at HoistPt. 647 if (GetElementPtrInst *GepOp = dyn_cast<GetElementPtrInst>(Op)) 648 makeGepsAvailable(ClonedGep, HoistPt, InstructionsToHoist, GepOp); 649 } 650 651 // Copy Gep and replace its uses in Repl with ClonedGep. 652 ClonedGep->insertBefore(HoistPt->getTerminator()); 653 654 // Conservatively discard any optimization hints, they may differ on the 655 // other paths. 656 ClonedGep->dropUnknownNonDebugMetadata(); 657 658 // If we have optimization hints which agree with each other along different 659 // paths, preserve them. 660 for (const Instruction *OtherInst : InstructionsToHoist) { 661 const GetElementPtrInst *OtherGep; 662 if (auto *OtherLd = dyn_cast<LoadInst>(OtherInst)) 663 OtherGep = cast<GetElementPtrInst>(OtherLd->getPointerOperand()); 664 else 665 OtherGep = cast<GetElementPtrInst>( 666 cast<StoreInst>(OtherInst)->getPointerOperand()); 667 ClonedGep->intersectOptionalDataWith(OtherGep); 668 } 669 670 // Replace uses of Gep with ClonedGep in Repl. 671 Repl->replaceUsesOfWith(Gep, ClonedGep); 672 } 673 674 // In the case Repl is a load or a store, we make all their GEPs 675 // available: GEPs are not hoisted by default to avoid the address 676 // computations to be hoisted without the associated load or store. 677 bool makeGepOperandsAvailable(Instruction *Repl, BasicBlock *HoistPt, 678 const SmallVecInsn &InstructionsToHoist) const { 679 // Check whether the GEP of a ld/st can be synthesized at HoistPt. 680 GetElementPtrInst *Gep = nullptr; 681 Instruction *Val = nullptr; 682 if (auto *Ld = dyn_cast<LoadInst>(Repl)) { 683 Gep = dyn_cast<GetElementPtrInst>(Ld->getPointerOperand()); 684 } else if (auto *St = dyn_cast<StoreInst>(Repl)) { 685 Gep = dyn_cast<GetElementPtrInst>(St->getPointerOperand()); 686 Val = dyn_cast<Instruction>(St->getValueOperand()); 687 // Check that the stored value is available. 688 if (Val) { 689 if (isa<GetElementPtrInst>(Val)) { 690 // Check whether we can compute the GEP at HoistPt. 691 if (!allGepOperandsAvailable(Val, HoistPt)) 692 return false; 693 } else if (!DT->dominates(Val->getParent(), HoistPt)) 694 return false; 695 } 696 } 697 698 // Check whether we can compute the Gep at HoistPt. 699 if (!Gep || !allGepOperandsAvailable(Gep, HoistPt)) 700 return false; 701 702 makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Gep); 703 704 if (Val && isa<GetElementPtrInst>(Val)) 705 makeGepsAvailable(Repl, HoistPt, InstructionsToHoist, Val); 706 707 return true; 708 } 709 710 std::pair<unsigned, unsigned> hoist(HoistingPointList &HPL) { 711 unsigned NI = 0, NL = 0, NS = 0, NC = 0, NR = 0; 712 for (const HoistingPointInfo &HP : HPL) { 713 // Find out whether we already have one of the instructions in HoistPt, 714 // in which case we do not have to move it. 715 BasicBlock *HoistPt = HP.first; 716 const SmallVecInsn &InstructionsToHoist = HP.second; 717 Instruction *Repl = nullptr; 718 for (Instruction *I : InstructionsToHoist) 719 if (I->getParent() == HoistPt) 720 // If there are two instructions in HoistPt to be hoisted in place: 721 // update Repl to be the first one, such that we can rename the uses 722 // of the second based on the first. 723 if (!Repl || firstInBB(I, Repl)) 724 Repl = I; 725 726 if (Repl) { 727 // Repl is already in HoistPt: it remains in place. 728 assert(allOperandsAvailable(Repl, HoistPt) && 729 "instruction depends on operands that are not available"); 730 } else { 731 // When we do not find Repl in HoistPt, select the first in the list 732 // and move it to HoistPt. 733 Repl = InstructionsToHoist.front(); 734 735 // We can move Repl in HoistPt only when all operands are available. 736 // When not HoistingGeps we need to copy the GEPs now. 737 // The order in which hoistings are done may influence the availability 738 // of operands. 739 if (!allOperandsAvailable(Repl, HoistPt) && !HoistingGeps && 740 !makeGepOperandsAvailable(Repl, HoistPt, InstructionsToHoist)) 741 continue; 742 743 // Move the instruction at the end of HoistPt. 744 Repl->moveBefore(HoistPt->getTerminator()); 745 } 746 747 MemoryAccess *NewMemAcc = nullptr; 748 if (MemoryAccess *MA = MSSA->getMemoryAccess(Repl)) { 749 if (MemoryUseOrDef *OldMemAcc = dyn_cast<MemoryUseOrDef>(MA)) { 750 // The definition of this ld/st will not change: ld/st hoisting is 751 // legal when the ld/st is not moved past its current definition. 752 MemoryAccess *Def = OldMemAcc->getDefiningAccess(); 753 NewMemAcc = MSSA->createMemoryAccessInBB(Repl, Def, HoistPt, 754 MemorySSA::End); 755 OldMemAcc->replaceAllUsesWith(NewMemAcc); 756 MSSA->removeMemoryAccess(OldMemAcc); 757 } 758 } 759 760 if (isa<LoadInst>(Repl)) 761 ++NL; 762 else if (isa<StoreInst>(Repl)) 763 ++NS; 764 else if (isa<CallInst>(Repl)) 765 ++NC; 766 else // Scalar 767 ++NI; 768 769 // Remove and rename all other instructions. 770 for (Instruction *I : InstructionsToHoist) 771 if (I != Repl) { 772 ++NR; 773 if (auto *ReplacementLoad = dyn_cast<LoadInst>(Repl)) { 774 ReplacementLoad->setAlignment( 775 std::min(ReplacementLoad->getAlignment(), 776 cast<LoadInst>(I)->getAlignment())); 777 ++NumLoadsRemoved; 778 } else if (auto *ReplacementStore = dyn_cast<StoreInst>(Repl)) { 779 ReplacementStore->setAlignment( 780 std::min(ReplacementStore->getAlignment(), 781 cast<StoreInst>(I)->getAlignment())); 782 ++NumStoresRemoved; 783 } else if (auto *ReplacementAlloca = dyn_cast<AllocaInst>(Repl)) { 784 ReplacementAlloca->setAlignment( 785 std::max(ReplacementAlloca->getAlignment(), 786 cast<AllocaInst>(I)->getAlignment())); 787 } else if (isa<CallInst>(Repl)) { 788 ++NumCallsRemoved; 789 } 790 791 if (NewMemAcc) { 792 // Update the uses of the old MSSA access with NewMemAcc. 793 MemoryAccess *OldMA = MSSA->getMemoryAccess(I); 794 OldMA->replaceAllUsesWith(NewMemAcc); 795 MSSA->removeMemoryAccess(OldMA); 796 } 797 798 Repl->intersectOptionalDataWith(I); 799 combineKnownMetadata(Repl, I); 800 I->replaceAllUsesWith(Repl); 801 I->eraseFromParent(); 802 } 803 804 // Remove MemorySSA phi nodes with the same arguments. 805 if (NewMemAcc) { 806 SmallPtrSet<MemoryPhi *, 4> UsePhis; 807 for (User *U : NewMemAcc->users()) 808 if (MemoryPhi *Phi = dyn_cast<MemoryPhi>(U)) 809 UsePhis.insert(Phi); 810 811 for (auto *Phi : UsePhis) { 812 auto In = Phi->incoming_values(); 813 if (std::all_of(In.begin(), In.end(), 814 [&](Use &U){return U == NewMemAcc;})) { 815 Phi->replaceAllUsesWith(NewMemAcc); 816 MSSA->removeMemoryAccess(Phi); 817 } 818 } 819 } 820 } 821 822 NumHoisted += NL + NS + NC + NI; 823 NumRemoved += NR; 824 NumLoadsHoisted += NL; 825 NumStoresHoisted += NS; 826 NumCallsHoisted += NC; 827 return {NI, NL + NC + NS}; 828 } 829 830 // Hoist all expressions. Returns Number of scalars hoisted 831 // and number of non-scalars hoisted. 832 std::pair<unsigned, unsigned> hoistExpressions(Function &F) { 833 InsnInfo II; 834 LoadInfo LI; 835 StoreInfo SI; 836 CallInfo CI; 837 for (BasicBlock *BB : depth_first(&F.getEntryBlock())) { 838 int InstructionNb = 0; 839 for (Instruction &I1 : *BB) { 840 // Only hoist the first instructions in BB up to MaxDepthInBB. Hoisting 841 // deeper may increase the register pressure and compilation time. 842 if (MaxDepthInBB != -1 && InstructionNb++ >= MaxDepthInBB) 843 break; 844 845 if (auto *Load = dyn_cast<LoadInst>(&I1)) 846 LI.insert(Load, VN); 847 else if (auto *Store = dyn_cast<StoreInst>(&I1)) 848 SI.insert(Store, VN); 849 else if (auto *Call = dyn_cast<CallInst>(&I1)) { 850 if (auto *Intr = dyn_cast<IntrinsicInst>(Call)) { 851 if (isa<DbgInfoIntrinsic>(Intr) || 852 Intr->getIntrinsicID() == Intrinsic::assume) 853 continue; 854 } 855 if (Call->mayHaveSideEffects()) { 856 if (!OptForMinSize) 857 break; 858 // We may continue hoisting across calls which write to memory. 859 if (Call->mayThrow()) 860 break; 861 } 862 CI.insert(Call, VN); 863 } else if (HoistingGeps || !isa<GetElementPtrInst>(&I1)) 864 // Do not hoist scalars past calls that may write to memory because 865 // that could result in spills later. geps are handled separately. 866 // TODO: We can relax this for targets like AArch64 as they have more 867 // registers than X86. 868 II.insert(&I1, VN); 869 } 870 } 871 872 HoistingPointList HPL; 873 computeInsertionPoints(II.getVNTable(), HPL, InsKind::Scalar); 874 computeInsertionPoints(LI.getVNTable(), HPL, InsKind::Load); 875 computeInsertionPoints(SI.getVNTable(), HPL, InsKind::Store); 876 computeInsertionPoints(CI.getScalarVNTable(), HPL, InsKind::Scalar); 877 computeInsertionPoints(CI.getLoadVNTable(), HPL, InsKind::Load); 878 computeInsertionPoints(CI.getStoreVNTable(), HPL, InsKind::Store); 879 return hoist(HPL); 880 } 881 }; 882 883 class GVNHoistLegacyPass : public FunctionPass { 884 public: 885 static char ID; 886 887 GVNHoistLegacyPass() : FunctionPass(ID) { 888 initializeGVNHoistLegacyPassPass(*PassRegistry::getPassRegistry()); 889 } 890 891 bool runOnFunction(Function &F) override { 892 if (skipFunction(F)) 893 return false; 894 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree(); 895 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults(); 896 auto &MD = getAnalysis<MemoryDependenceWrapperPass>().getMemDep(); 897 898 GVNHoist G(&DT, &AA, &MD, F.optForMinSize()); 899 return G.run(F); 900 } 901 902 void getAnalysisUsage(AnalysisUsage &AU) const override { 903 AU.addRequired<DominatorTreeWrapperPass>(); 904 AU.addRequired<AAResultsWrapperPass>(); 905 AU.addRequired<MemoryDependenceWrapperPass>(); 906 AU.addPreserved<DominatorTreeWrapperPass>(); 907 } 908 }; 909 } // namespace 910 911 PreservedAnalyses GVNHoistPass::run(Function &F, 912 AnalysisManager<Function> &AM) { 913 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F); 914 AliasAnalysis &AA = AM.getResult<AAManager>(F); 915 MemoryDependenceResults &MD = AM.getResult<MemoryDependenceAnalysis>(F); 916 917 GVNHoist G(&DT, &AA, &MD, F.optForMinSize()); 918 if (!G.run(F)) 919 return PreservedAnalyses::all(); 920 921 PreservedAnalyses PA; 922 PA.preserve<DominatorTreeAnalysis>(); 923 return PA; 924 } 925 926 char GVNHoistLegacyPass::ID = 0; 927 INITIALIZE_PASS_BEGIN(GVNHoistLegacyPass, "gvn-hoist", 928 "Early GVN Hoisting of Expressions", false, false) 929 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass) 930 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass) 931 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) 932 INITIALIZE_PASS_END(GVNHoistLegacyPass, "gvn-hoist", 933 "Early GVN Hoisting of Expressions", false, false) 934 935 FunctionPass *llvm::createGVNHoistPass() { return new GVNHoistLegacyPass(); } 936