1 //===- PromoteMemoryToRegister.cpp - Convert allocas to registers ---------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file promotes memory references to be register references. It promotes 10 // alloca instructions which only have loads and stores as uses. An alloca is 11 // transformed by using iterated dominator frontiers to place PHI nodes, then 12 // traversing the function in depth-first order to rewrite loads and stores as 13 // appropriate. 14 // 15 //===----------------------------------------------------------------------===// 16 17 #include "llvm/ADT/ArrayRef.h" 18 #include "llvm/ADT/DenseMap.h" 19 #include "llvm/ADT/STLExtras.h" 20 #include "llvm/ADT/SmallPtrSet.h" 21 #include "llvm/ADT/SmallVector.h" 22 #include "llvm/ADT/Statistic.h" 23 #include "llvm/ADT/TinyPtrVector.h" 24 #include "llvm/ADT/Twine.h" 25 #include "llvm/Analysis/AssumptionCache.h" 26 #include "llvm/Analysis/InstructionSimplify.h" 27 #include "llvm/Analysis/IteratedDominanceFrontier.h" 28 #include "llvm/Transforms/Utils/Local.h" 29 #include "llvm/Analysis/ValueTracking.h" 30 #include "llvm/IR/BasicBlock.h" 31 #include "llvm/IR/CFG.h" 32 #include "llvm/IR/Constant.h" 33 #include "llvm/IR/Constants.h" 34 #include "llvm/IR/DIBuilder.h" 35 #include "llvm/IR/DerivedTypes.h" 36 #include "llvm/IR/Dominators.h" 37 #include "llvm/IR/Function.h" 38 #include "llvm/IR/InstrTypes.h" 39 #include "llvm/IR/Instruction.h" 40 #include "llvm/IR/Instructions.h" 41 #include "llvm/IR/IntrinsicInst.h" 42 #include "llvm/IR/Intrinsics.h" 43 #include "llvm/IR/LLVMContext.h" 44 #include "llvm/IR/Module.h" 45 #include "llvm/IR/Type.h" 46 #include "llvm/IR/User.h" 47 #include "llvm/Support/Casting.h" 48 #include "llvm/Transforms/Utils/PromoteMemToReg.h" 49 #include <algorithm> 50 #include <cassert> 51 #include <iterator> 52 #include <utility> 53 #include <vector> 54 55 using namespace llvm; 56 57 #define DEBUG_TYPE "mem2reg" 58 59 STATISTIC(NumLocalPromoted, "Number of alloca's promoted within one block"); 60 STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); 61 STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); 62 STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); 63 64 bool llvm::isAllocaPromotable(const AllocaInst *AI) { 65 // Only allow direct and non-volatile loads and stores... 66 for (const User *U : AI->users()) { 67 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { 68 // Note that atomic loads can be transformed; atomic semantics do 69 // not have any meaning for a local alloca. 70 if (LI->isVolatile()) 71 return false; 72 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { 73 if (SI->getValueOperand() == AI || 74 SI->getValueOperand()->getType() != AI->getAllocatedType()) 75 return false; // Don't allow a store OF the AI, only INTO the AI. 76 // Note that atomic stores can be transformed; atomic semantics do 77 // not have any meaning for a local alloca. 78 if (SI->isVolatile()) 79 return false; 80 } else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) { 81 if (!II->isLifetimeStartOrEnd() && !II->isDroppable()) 82 return false; 83 } else if (const BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { 84 if (!onlyUsedByLifetimeMarkersOrDroppableInsts(BCI)) 85 return false; 86 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { 87 if (!GEPI->hasAllZeroIndices()) 88 return false; 89 if (!onlyUsedByLifetimeMarkersOrDroppableInsts(GEPI)) 90 return false; 91 } else if (const AddrSpaceCastInst *ASCI = dyn_cast<AddrSpaceCastInst>(U)) { 92 if (!onlyUsedByLifetimeMarkers(ASCI)) 93 return false; 94 } else { 95 return false; 96 } 97 } 98 99 return true; 100 } 101 102 namespace { 103 104 struct AllocaInfo { 105 using DbgUserVec = SmallVector<DbgVariableIntrinsic *, 1>; 106 107 SmallVector<BasicBlock *, 32> DefiningBlocks; 108 SmallVector<BasicBlock *, 32> UsingBlocks; 109 110 StoreInst *OnlyStore; 111 BasicBlock *OnlyBlock; 112 bool OnlyUsedInOneBlock; 113 114 DbgUserVec DbgUsers; 115 116 void clear() { 117 DefiningBlocks.clear(); 118 UsingBlocks.clear(); 119 OnlyStore = nullptr; 120 OnlyBlock = nullptr; 121 OnlyUsedInOneBlock = true; 122 DbgUsers.clear(); 123 } 124 125 /// Scan the uses of the specified alloca, filling in the AllocaInfo used 126 /// by the rest of the pass to reason about the uses of this alloca. 127 void AnalyzeAlloca(AllocaInst *AI) { 128 clear(); 129 130 // As we scan the uses of the alloca instruction, keep track of stores, 131 // and decide whether all of the loads and stores to the alloca are within 132 // the same basic block. 133 for (User *U : AI->users()) { 134 Instruction *User = cast<Instruction>(U); 135 136 if (StoreInst *SI = dyn_cast<StoreInst>(User)) { 137 // Remember the basic blocks which define new values for the alloca 138 DefiningBlocks.push_back(SI->getParent()); 139 OnlyStore = SI; 140 } else { 141 LoadInst *LI = cast<LoadInst>(User); 142 // Otherwise it must be a load instruction, keep track of variable 143 // reads. 144 UsingBlocks.push_back(LI->getParent()); 145 } 146 147 if (OnlyUsedInOneBlock) { 148 if (!OnlyBlock) 149 OnlyBlock = User->getParent(); 150 else if (OnlyBlock != User->getParent()) 151 OnlyUsedInOneBlock = false; 152 } 153 } 154 155 findDbgUsers(DbgUsers, AI); 156 } 157 }; 158 159 /// Data package used by RenamePass(). 160 struct RenamePassData { 161 using ValVector = std::vector<Value *>; 162 using LocationVector = std::vector<DebugLoc>; 163 164 RenamePassData(BasicBlock *B, BasicBlock *P, ValVector V, LocationVector L) 165 : BB(B), Pred(P), Values(std::move(V)), Locations(std::move(L)) {} 166 167 BasicBlock *BB; 168 BasicBlock *Pred; 169 ValVector Values; 170 LocationVector Locations; 171 }; 172 173 /// This assigns and keeps a per-bb relative ordering of load/store 174 /// instructions in the block that directly load or store an alloca. 175 /// 176 /// This functionality is important because it avoids scanning large basic 177 /// blocks multiple times when promoting many allocas in the same block. 178 class LargeBlockInfo { 179 /// For each instruction that we track, keep the index of the 180 /// instruction. 181 /// 182 /// The index starts out as the number of the instruction from the start of 183 /// the block. 184 DenseMap<const Instruction *, unsigned> InstNumbers; 185 186 public: 187 188 /// This code only looks at accesses to allocas. 189 static bool isInterestingInstruction(const Instruction *I) { 190 return (isa<LoadInst>(I) && isa<AllocaInst>(I->getOperand(0))) || 191 (isa<StoreInst>(I) && isa<AllocaInst>(I->getOperand(1))); 192 } 193 194 /// Get or calculate the index of the specified instruction. 195 unsigned getInstructionIndex(const Instruction *I) { 196 assert(isInterestingInstruction(I) && 197 "Not a load/store to/from an alloca?"); 198 199 // If we already have this instruction number, return it. 200 DenseMap<const Instruction *, unsigned>::iterator It = InstNumbers.find(I); 201 if (It != InstNumbers.end()) 202 return It->second; 203 204 // Scan the whole block to get the instruction. This accumulates 205 // information for every interesting instruction in the block, in order to 206 // avoid gratuitus rescans. 207 const BasicBlock *BB = I->getParent(); 208 unsigned InstNo = 0; 209 for (const Instruction &BBI : *BB) 210 if (isInterestingInstruction(&BBI)) 211 InstNumbers[&BBI] = InstNo++; 212 It = InstNumbers.find(I); 213 214 assert(It != InstNumbers.end() && "Didn't insert instruction?"); 215 return It->second; 216 } 217 218 void deleteValue(const Instruction *I) { InstNumbers.erase(I); } 219 220 void clear() { InstNumbers.clear(); } 221 }; 222 223 struct PromoteMem2Reg { 224 /// The alloca instructions being promoted. 225 std::vector<AllocaInst *> Allocas; 226 227 DominatorTree &DT; 228 DIBuilder DIB; 229 230 /// A cache of @llvm.assume intrinsics used by SimplifyInstruction. 231 AssumptionCache *AC; 232 233 const SimplifyQuery SQ; 234 235 /// Reverse mapping of Allocas. 236 DenseMap<AllocaInst *, unsigned> AllocaLookup; 237 238 /// The PhiNodes we're adding. 239 /// 240 /// That map is used to simplify some Phi nodes as we iterate over it, so 241 /// it should have deterministic iterators. We could use a MapVector, but 242 /// since we already maintain a map from BasicBlock* to a stable numbering 243 /// (BBNumbers), the DenseMap is more efficient (also supports removal). 244 DenseMap<std::pair<unsigned, unsigned>, PHINode *> NewPhiNodes; 245 246 /// For each PHI node, keep track of which entry in Allocas it corresponds 247 /// to. 248 DenseMap<PHINode *, unsigned> PhiToAllocaMap; 249 250 /// For each alloca, we keep track of the dbg.declare intrinsic that 251 /// describes it, if any, so that we can convert it to a dbg.value 252 /// intrinsic if the alloca gets promoted. 253 SmallVector<AllocaInfo::DbgUserVec, 8> AllocaDbgUsers; 254 255 /// The set of basic blocks the renamer has already visited. 256 SmallPtrSet<BasicBlock *, 16> Visited; 257 258 /// Contains a stable numbering of basic blocks to avoid non-determinstic 259 /// behavior. 260 DenseMap<BasicBlock *, unsigned> BBNumbers; 261 262 /// Lazily compute the number of predecessors a block has. 263 DenseMap<const BasicBlock *, unsigned> BBNumPreds; 264 265 public: 266 PromoteMem2Reg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 267 AssumptionCache *AC) 268 : Allocas(Allocas.begin(), Allocas.end()), DT(DT), 269 DIB(*DT.getRoot()->getParent()->getParent(), /*AllowUnresolved*/ false), 270 AC(AC), SQ(DT.getRoot()->getParent()->getParent()->getDataLayout(), 271 nullptr, &DT, AC) {} 272 273 void run(); 274 275 private: 276 void RemoveFromAllocasList(unsigned &AllocaIdx) { 277 Allocas[AllocaIdx] = Allocas.back(); 278 Allocas.pop_back(); 279 --AllocaIdx; 280 } 281 282 unsigned getNumPreds(const BasicBlock *BB) { 283 unsigned &NP = BBNumPreds[BB]; 284 if (NP == 0) 285 NP = pred_size(BB) + 1; 286 return NP - 1; 287 } 288 289 void ComputeLiveInBlocks(AllocaInst *AI, AllocaInfo &Info, 290 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 291 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks); 292 void RenamePass(BasicBlock *BB, BasicBlock *Pred, 293 RenamePassData::ValVector &IncVals, 294 RenamePassData::LocationVector &IncLocs, 295 std::vector<RenamePassData> &Worklist); 296 bool QueuePhiNode(BasicBlock *BB, unsigned AllocaIdx, unsigned &Version); 297 }; 298 299 } // end anonymous namespace 300 301 /// Given a LoadInst LI this adds assume(LI != null) after it. 302 static void addAssumeNonNull(AssumptionCache *AC, LoadInst *LI) { 303 Function *AssumeIntrinsic = 304 Intrinsic::getDeclaration(LI->getModule(), Intrinsic::assume); 305 ICmpInst *LoadNotNull = new ICmpInst(ICmpInst::ICMP_NE, LI, 306 Constant::getNullValue(LI->getType())); 307 LoadNotNull->insertAfter(LI); 308 CallInst *CI = CallInst::Create(AssumeIntrinsic, {LoadNotNull}); 309 CI->insertAfter(LoadNotNull); 310 AC->registerAssumption(cast<AssumeInst>(CI)); 311 } 312 313 static void removeIntrinsicUsers(AllocaInst *AI) { 314 // Knowing that this alloca is promotable, we know that it's safe to kill all 315 // instructions except for load and store. 316 317 for (Use &U : llvm::make_early_inc_range(AI->uses())) { 318 Instruction *I = cast<Instruction>(U.getUser()); 319 if (isa<LoadInst>(I) || isa<StoreInst>(I)) 320 continue; 321 322 // Drop the use of AI in droppable instructions. 323 if (I->isDroppable()) { 324 I->dropDroppableUse(U); 325 continue; 326 } 327 328 if (!I->getType()->isVoidTy()) { 329 // The only users of this bitcast/GEP instruction are lifetime intrinsics. 330 // Follow the use/def chain to erase them now instead of leaving it for 331 // dead code elimination later. 332 for (Use &UU : llvm::make_early_inc_range(I->uses())) { 333 Instruction *Inst = cast<Instruction>(UU.getUser()); 334 335 // Drop the use of I in droppable instructions. 336 if (Inst->isDroppable()) { 337 Inst->dropDroppableUse(UU); 338 continue; 339 } 340 Inst->eraseFromParent(); 341 } 342 } 343 I->eraseFromParent(); 344 } 345 } 346 347 /// Rewrite as many loads as possible given a single store. 348 /// 349 /// When there is only a single store, we can use the domtree to trivially 350 /// replace all of the dominated loads with the stored value. Do so, and return 351 /// true if this has successfully promoted the alloca entirely. If this returns 352 /// false there were some loads which were not dominated by the single store 353 /// and thus must be phi-ed with undef. We fall back to the standard alloca 354 /// promotion algorithm in that case. 355 static bool rewriteSingleStoreAlloca(AllocaInst *AI, AllocaInfo &Info, 356 LargeBlockInfo &LBI, const DataLayout &DL, 357 DominatorTree &DT, AssumptionCache *AC) { 358 StoreInst *OnlyStore = Info.OnlyStore; 359 bool StoringGlobalVal = !isa<Instruction>(OnlyStore->getOperand(0)); 360 BasicBlock *StoreBB = OnlyStore->getParent(); 361 int StoreIndex = -1; 362 363 // Clear out UsingBlocks. We will reconstruct it here if needed. 364 Info.UsingBlocks.clear(); 365 366 for (User *U : make_early_inc_range(AI->users())) { 367 Instruction *UserInst = cast<Instruction>(U); 368 if (UserInst == OnlyStore) 369 continue; 370 LoadInst *LI = cast<LoadInst>(UserInst); 371 372 // Okay, if we have a load from the alloca, we want to replace it with the 373 // only value stored to the alloca. We can do this if the value is 374 // dominated by the store. If not, we use the rest of the mem2reg machinery 375 // to insert the phi nodes as needed. 376 if (!StoringGlobalVal) { // Non-instructions are always dominated. 377 if (LI->getParent() == StoreBB) { 378 // If we have a use that is in the same block as the store, compare the 379 // indices of the two instructions to see which one came first. If the 380 // load came before the store, we can't handle it. 381 if (StoreIndex == -1) 382 StoreIndex = LBI.getInstructionIndex(OnlyStore); 383 384 if (unsigned(StoreIndex) > LBI.getInstructionIndex(LI)) { 385 // Can't handle this load, bail out. 386 Info.UsingBlocks.push_back(StoreBB); 387 continue; 388 } 389 } else if (!DT.dominates(StoreBB, LI->getParent())) { 390 // If the load and store are in different blocks, use BB dominance to 391 // check their relationships. If the store doesn't dom the use, bail 392 // out. 393 Info.UsingBlocks.push_back(LI->getParent()); 394 continue; 395 } 396 } 397 398 // Otherwise, we *can* safely rewrite this load. 399 Value *ReplVal = OnlyStore->getOperand(0); 400 // If the replacement value is the load, this must occur in unreachable 401 // code. 402 if (ReplVal == LI) 403 ReplVal = PoisonValue::get(LI->getType()); 404 405 // If the load was marked as nonnull we don't want to lose 406 // that information when we erase this Load. So we preserve 407 // it with an assume. 408 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 409 !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) 410 addAssumeNonNull(AC, LI); 411 412 LI->replaceAllUsesWith(ReplVal); 413 LI->eraseFromParent(); 414 LBI.deleteValue(LI); 415 } 416 417 // Finally, after the scan, check to see if the store is all that is left. 418 if (!Info.UsingBlocks.empty()) 419 return false; // If not, we'll have to fall back for the remainder. 420 421 // Record debuginfo for the store and remove the declaration's 422 // debuginfo. 423 for (DbgVariableIntrinsic *DII : Info.DbgUsers) { 424 if (DII->isAddressOfVariable()) { 425 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 426 ConvertDebugDeclareToDebugValue(DII, Info.OnlyStore, DIB); 427 DII->eraseFromParent(); 428 } else if (DII->getExpression()->startsWithDeref()) { 429 DII->eraseFromParent(); 430 } 431 } 432 // Remove the (now dead) store and alloca. 433 Info.OnlyStore->eraseFromParent(); 434 LBI.deleteValue(Info.OnlyStore); 435 436 AI->eraseFromParent(); 437 return true; 438 } 439 440 /// Many allocas are only used within a single basic block. If this is the 441 /// case, avoid traversing the CFG and inserting a lot of potentially useless 442 /// PHI nodes by just performing a single linear pass over the basic block 443 /// using the Alloca. 444 /// 445 /// If we cannot promote this alloca (because it is read before it is written), 446 /// return false. This is necessary in cases where, due to control flow, the 447 /// alloca is undefined only on some control flow paths. e.g. code like 448 /// this is correct in LLVM IR: 449 /// // A is an alloca with no stores so far 450 /// for (...) { 451 /// int t = *A; 452 /// if (!first_iteration) 453 /// use(t); 454 /// *A = 42; 455 /// } 456 static bool promoteSingleBlockAlloca(AllocaInst *AI, const AllocaInfo &Info, 457 LargeBlockInfo &LBI, 458 const DataLayout &DL, 459 DominatorTree &DT, 460 AssumptionCache *AC) { 461 // The trickiest case to handle is when we have large blocks. Because of this, 462 // this code is optimized assuming that large blocks happen. This does not 463 // significantly pessimize the small block case. This uses LargeBlockInfo to 464 // make it efficient to get the index of various operations in the block. 465 466 // Walk the use-def list of the alloca, getting the locations of all stores. 467 using StoresByIndexTy = SmallVector<std::pair<unsigned, StoreInst *>, 64>; 468 StoresByIndexTy StoresByIndex; 469 470 for (User *U : AI->users()) 471 if (StoreInst *SI = dyn_cast<StoreInst>(U)) 472 StoresByIndex.push_back(std::make_pair(LBI.getInstructionIndex(SI), SI)); 473 474 // Sort the stores by their index, making it efficient to do a lookup with a 475 // binary search. 476 llvm::sort(StoresByIndex, less_first()); 477 478 // Walk all of the loads from this alloca, replacing them with the nearest 479 // store above them, if any. 480 for (User *U : make_early_inc_range(AI->users())) { 481 LoadInst *LI = dyn_cast<LoadInst>(U); 482 if (!LI) 483 continue; 484 485 unsigned LoadIdx = LBI.getInstructionIndex(LI); 486 487 // Find the nearest store that has a lower index than this load. 488 StoresByIndexTy::iterator I = llvm::lower_bound( 489 StoresByIndex, 490 std::make_pair(LoadIdx, static_cast<StoreInst *>(nullptr)), 491 less_first()); 492 if (I == StoresByIndex.begin()) { 493 if (StoresByIndex.empty()) 494 // If there are no stores, the load takes the undef value. 495 LI->replaceAllUsesWith(UndefValue::get(LI->getType())); 496 else 497 // There is no store before this load, bail out (load may be affected 498 // by the following stores - see main comment). 499 return false; 500 } else { 501 // Otherwise, there was a store before this load, the load takes its value. 502 // Note, if the load was marked as nonnull we don't want to lose that 503 // information when we erase it. So we preserve it with an assume. 504 Value *ReplVal = std::prev(I)->second->getOperand(0); 505 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 506 !isKnownNonZero(ReplVal, DL, 0, AC, LI, &DT)) 507 addAssumeNonNull(AC, LI); 508 509 // If the replacement value is the load, this must occur in unreachable 510 // code. 511 if (ReplVal == LI) 512 ReplVal = PoisonValue::get(LI->getType()); 513 514 LI->replaceAllUsesWith(ReplVal); 515 } 516 517 LI->eraseFromParent(); 518 LBI.deleteValue(LI); 519 } 520 521 // Remove the (now dead) stores and alloca. 522 while (!AI->use_empty()) { 523 StoreInst *SI = cast<StoreInst>(AI->user_back()); 524 // Record debuginfo for the store before removing it. 525 for (DbgVariableIntrinsic *DII : Info.DbgUsers) { 526 if (DII->isAddressOfVariable()) { 527 DIBuilder DIB(*AI->getModule(), /*AllowUnresolved*/ false); 528 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 529 } 530 } 531 SI->eraseFromParent(); 532 LBI.deleteValue(SI); 533 } 534 535 AI->eraseFromParent(); 536 537 // The alloca's debuginfo can be removed as well. 538 for (DbgVariableIntrinsic *DII : Info.DbgUsers) 539 if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref()) 540 DII->eraseFromParent(); 541 542 ++NumLocalPromoted; 543 return true; 544 } 545 546 void PromoteMem2Reg::run() { 547 Function &F = *DT.getRoot()->getParent(); 548 549 AllocaDbgUsers.resize(Allocas.size()); 550 551 AllocaInfo Info; 552 LargeBlockInfo LBI; 553 ForwardIDFCalculator IDF(DT); 554 555 for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { 556 AllocaInst *AI = Allocas[AllocaNum]; 557 558 assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!"); 559 assert(AI->getParent()->getParent() == &F && 560 "All allocas should be in the same function, which is same as DF!"); 561 562 removeIntrinsicUsers(AI); 563 564 if (AI->use_empty()) { 565 // If there are no uses of the alloca, just delete it now. 566 AI->eraseFromParent(); 567 568 // Remove the alloca from the Allocas list, since it has been processed 569 RemoveFromAllocasList(AllocaNum); 570 ++NumDeadAlloca; 571 continue; 572 } 573 574 // Calculate the set of read and write-locations for each alloca. This is 575 // analogous to finding the 'uses' and 'definitions' of each variable. 576 Info.AnalyzeAlloca(AI); 577 578 // If there is only a single store to this value, replace any loads of 579 // it that are directly dominated by the definition with the value stored. 580 if (Info.DefiningBlocks.size() == 1) { 581 if (rewriteSingleStoreAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { 582 // The alloca has been processed, move on. 583 RemoveFromAllocasList(AllocaNum); 584 ++NumSingleStore; 585 continue; 586 } 587 } 588 589 // If the alloca is only read and written in one basic block, just perform a 590 // linear sweep over the block to eliminate it. 591 if (Info.OnlyUsedInOneBlock && 592 promoteSingleBlockAlloca(AI, Info, LBI, SQ.DL, DT, AC)) { 593 // The alloca has been processed, move on. 594 RemoveFromAllocasList(AllocaNum); 595 continue; 596 } 597 598 // If we haven't computed a numbering for the BB's in the function, do so 599 // now. 600 if (BBNumbers.empty()) { 601 unsigned ID = 0; 602 for (auto &BB : F) 603 BBNumbers[&BB] = ID++; 604 } 605 606 // Remember the dbg.declare intrinsic describing this alloca, if any. 607 if (!Info.DbgUsers.empty()) 608 AllocaDbgUsers[AllocaNum] = Info.DbgUsers; 609 610 // Keep the reverse mapping of the 'Allocas' array for the rename pass. 611 AllocaLookup[Allocas[AllocaNum]] = AllocaNum; 612 613 // Unique the set of defining blocks for efficient lookup. 614 SmallPtrSet<BasicBlock *, 32> DefBlocks(Info.DefiningBlocks.begin(), 615 Info.DefiningBlocks.end()); 616 617 // Determine which blocks the value is live in. These are blocks which lead 618 // to uses. 619 SmallPtrSet<BasicBlock *, 32> LiveInBlocks; 620 ComputeLiveInBlocks(AI, Info, DefBlocks, LiveInBlocks); 621 622 // At this point, we're committed to promoting the alloca using IDF's, and 623 // the standard SSA construction algorithm. Determine which blocks need phi 624 // nodes and see if we can optimize out some work by avoiding insertion of 625 // dead phi nodes. 626 IDF.setLiveInBlocks(LiveInBlocks); 627 IDF.setDefiningBlocks(DefBlocks); 628 SmallVector<BasicBlock *, 32> PHIBlocks; 629 IDF.calculate(PHIBlocks); 630 llvm::sort(PHIBlocks, [this](BasicBlock *A, BasicBlock *B) { 631 return BBNumbers.find(A)->second < BBNumbers.find(B)->second; 632 }); 633 634 unsigned CurrentVersion = 0; 635 for (BasicBlock *BB : PHIBlocks) 636 QueuePhiNode(BB, AllocaNum, CurrentVersion); 637 } 638 639 if (Allocas.empty()) 640 return; // All of the allocas must have been trivial! 641 642 LBI.clear(); 643 644 // Set the incoming values for the basic block to be null values for all of 645 // the alloca's. We do this in case there is a load of a value that has not 646 // been stored yet. In this case, it will get this null value. 647 RenamePassData::ValVector Values(Allocas.size()); 648 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) 649 Values[i] = UndefValue::get(Allocas[i]->getAllocatedType()); 650 651 // When handling debug info, treat all incoming values as if they have unknown 652 // locations until proven otherwise. 653 RenamePassData::LocationVector Locations(Allocas.size()); 654 655 // Walks all basic blocks in the function performing the SSA rename algorithm 656 // and inserting the phi nodes we marked as necessary 657 std::vector<RenamePassData> RenamePassWorkList; 658 RenamePassWorkList.emplace_back(&F.front(), nullptr, std::move(Values), 659 std::move(Locations)); 660 do { 661 RenamePassData RPD = std::move(RenamePassWorkList.back()); 662 RenamePassWorkList.pop_back(); 663 // RenamePass may add new worklist entries. 664 RenamePass(RPD.BB, RPD.Pred, RPD.Values, RPD.Locations, RenamePassWorkList); 665 } while (!RenamePassWorkList.empty()); 666 667 // The renamer uses the Visited set to avoid infinite loops. Clear it now. 668 Visited.clear(); 669 670 // Remove the allocas themselves from the function. 671 for (Instruction *A : Allocas) { 672 // If there are any uses of the alloca instructions left, they must be in 673 // unreachable basic blocks that were not processed by walking the dominator 674 // tree. Just delete the users now. 675 if (!A->use_empty()) 676 A->replaceAllUsesWith(PoisonValue::get(A->getType())); 677 A->eraseFromParent(); 678 } 679 680 // Remove alloca's dbg.declare instrinsics from the function. 681 for (auto &DbgUsers : AllocaDbgUsers) { 682 for (auto *DII : DbgUsers) 683 if (DII->isAddressOfVariable() || DII->getExpression()->startsWithDeref()) 684 DII->eraseFromParent(); 685 } 686 687 // Loop over all of the PHI nodes and see if there are any that we can get 688 // rid of because they merge all of the same incoming values. This can 689 // happen due to undef values coming into the PHI nodes. This process is 690 // iterative, because eliminating one PHI node can cause others to be removed. 691 bool EliminatedAPHI = true; 692 while (EliminatedAPHI) { 693 EliminatedAPHI = false; 694 695 // Iterating over NewPhiNodes is deterministic, so it is safe to try to 696 // simplify and RAUW them as we go. If it was not, we could add uses to 697 // the values we replace with in a non-deterministic order, thus creating 698 // non-deterministic def->use chains. 699 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 700 I = NewPhiNodes.begin(), 701 E = NewPhiNodes.end(); 702 I != E;) { 703 PHINode *PN = I->second; 704 705 // If this PHI node merges one value and/or undefs, get the value. 706 if (Value *V = SimplifyInstruction(PN, SQ)) { 707 PN->replaceAllUsesWith(V); 708 PN->eraseFromParent(); 709 NewPhiNodes.erase(I++); 710 EliminatedAPHI = true; 711 continue; 712 } 713 ++I; 714 } 715 } 716 717 // At this point, the renamer has added entries to PHI nodes for all reachable 718 // code. Unfortunately, there may be unreachable blocks which the renamer 719 // hasn't traversed. If this is the case, the PHI nodes may not 720 // have incoming values for all predecessors. Loop over all PHI nodes we have 721 // created, inserting undef values if they are missing any incoming values. 722 for (DenseMap<std::pair<unsigned, unsigned>, PHINode *>::iterator 723 I = NewPhiNodes.begin(), 724 E = NewPhiNodes.end(); 725 I != E; ++I) { 726 // We want to do this once per basic block. As such, only process a block 727 // when we find the PHI that is the first entry in the block. 728 PHINode *SomePHI = I->second; 729 BasicBlock *BB = SomePHI->getParent(); 730 if (&BB->front() != SomePHI) 731 continue; 732 733 // Only do work here if there the PHI nodes are missing incoming values. We 734 // know that all PHI nodes that were inserted in a block will have the same 735 // number of incoming values, so we can just check any of them. 736 if (SomePHI->getNumIncomingValues() == getNumPreds(BB)) 737 continue; 738 739 // Get the preds for BB. 740 SmallVector<BasicBlock *, 16> Preds(predecessors(BB)); 741 742 // Ok, now we know that all of the PHI nodes are missing entries for some 743 // basic blocks. Start by sorting the incoming predecessors for efficient 744 // access. 745 auto CompareBBNumbers = [this](BasicBlock *A, BasicBlock *B) { 746 return BBNumbers.find(A)->second < BBNumbers.find(B)->second; 747 }; 748 llvm::sort(Preds, CompareBBNumbers); 749 750 // Now we loop through all BB's which have entries in SomePHI and remove 751 // them from the Preds list. 752 for (unsigned i = 0, e = SomePHI->getNumIncomingValues(); i != e; ++i) { 753 // Do a log(n) search of the Preds list for the entry we want. 754 SmallVectorImpl<BasicBlock *>::iterator EntIt = llvm::lower_bound( 755 Preds, SomePHI->getIncomingBlock(i), CompareBBNumbers); 756 assert(EntIt != Preds.end() && *EntIt == SomePHI->getIncomingBlock(i) && 757 "PHI node has entry for a block which is not a predecessor!"); 758 759 // Remove the entry 760 Preds.erase(EntIt); 761 } 762 763 // At this point, the blocks left in the preds list must have dummy 764 // entries inserted into every PHI nodes for the block. Update all the phi 765 // nodes in this block that we are inserting (there could be phis before 766 // mem2reg runs). 767 unsigned NumBadPreds = SomePHI->getNumIncomingValues(); 768 BasicBlock::iterator BBI = BB->begin(); 769 while ((SomePHI = dyn_cast<PHINode>(BBI++)) && 770 SomePHI->getNumIncomingValues() == NumBadPreds) { 771 Value *UndefVal = UndefValue::get(SomePHI->getType()); 772 for (BasicBlock *Pred : Preds) 773 SomePHI->addIncoming(UndefVal, Pred); 774 } 775 } 776 777 NewPhiNodes.clear(); 778 } 779 780 /// Determine which blocks the value is live in. 781 /// 782 /// These are blocks which lead to uses. Knowing this allows us to avoid 783 /// inserting PHI nodes into blocks which don't lead to uses (thus, the 784 /// inserted phi nodes would be dead). 785 void PromoteMem2Reg::ComputeLiveInBlocks( 786 AllocaInst *AI, AllocaInfo &Info, 787 const SmallPtrSetImpl<BasicBlock *> &DefBlocks, 788 SmallPtrSetImpl<BasicBlock *> &LiveInBlocks) { 789 // To determine liveness, we must iterate through the predecessors of blocks 790 // where the def is live. Blocks are added to the worklist if we need to 791 // check their predecessors. Start with all the using blocks. 792 SmallVector<BasicBlock *, 64> LiveInBlockWorklist(Info.UsingBlocks.begin(), 793 Info.UsingBlocks.end()); 794 795 // If any of the using blocks is also a definition block, check to see if the 796 // definition occurs before or after the use. If it happens before the use, 797 // the value isn't really live-in. 798 for (unsigned i = 0, e = LiveInBlockWorklist.size(); i != e; ++i) { 799 BasicBlock *BB = LiveInBlockWorklist[i]; 800 if (!DefBlocks.count(BB)) 801 continue; 802 803 // Okay, this is a block that both uses and defines the value. If the first 804 // reference to the alloca is a def (store), then we know it isn't live-in. 805 for (BasicBlock::iterator I = BB->begin();; ++I) { 806 if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 807 if (SI->getOperand(1) != AI) 808 continue; 809 810 // We found a store to the alloca before a load. The alloca is not 811 // actually live-in here. 812 LiveInBlockWorklist[i] = LiveInBlockWorklist.back(); 813 LiveInBlockWorklist.pop_back(); 814 --i; 815 --e; 816 break; 817 } 818 819 if (LoadInst *LI = dyn_cast<LoadInst>(I)) 820 // Okay, we found a load before a store to the alloca. It is actually 821 // live into this block. 822 if (LI->getOperand(0) == AI) 823 break; 824 } 825 } 826 827 // Now that we have a set of blocks where the phi is live-in, recursively add 828 // their predecessors until we find the full region the value is live. 829 while (!LiveInBlockWorklist.empty()) { 830 BasicBlock *BB = LiveInBlockWorklist.pop_back_val(); 831 832 // The block really is live in here, insert it into the set. If already in 833 // the set, then it has already been processed. 834 if (!LiveInBlocks.insert(BB).second) 835 continue; 836 837 // Since the value is live into BB, it is either defined in a predecessor or 838 // live into it to. Add the preds to the worklist unless they are a 839 // defining block. 840 for (BasicBlock *P : predecessors(BB)) { 841 // The value is not live into a predecessor if it defines the value. 842 if (DefBlocks.count(P)) 843 continue; 844 845 // Otherwise it is, add to the worklist. 846 LiveInBlockWorklist.push_back(P); 847 } 848 } 849 } 850 851 /// Queue a phi-node to be added to a basic-block for a specific Alloca. 852 /// 853 /// Returns true if there wasn't already a phi-node for that variable 854 bool PromoteMem2Reg::QueuePhiNode(BasicBlock *BB, unsigned AllocaNo, 855 unsigned &Version) { 856 // Look up the basic-block in question. 857 PHINode *&PN = NewPhiNodes[std::make_pair(BBNumbers[BB], AllocaNo)]; 858 859 // If the BB already has a phi node added for the i'th alloca then we're done! 860 if (PN) 861 return false; 862 863 // Create a PhiNode using the dereferenced type... and add the phi-node to the 864 // BasicBlock. 865 PN = PHINode::Create(Allocas[AllocaNo]->getAllocatedType(), getNumPreds(BB), 866 Allocas[AllocaNo]->getName() + "." + Twine(Version++), 867 &BB->front()); 868 ++NumPHIInsert; 869 PhiToAllocaMap[PN] = AllocaNo; 870 return true; 871 } 872 873 /// Update the debug location of a phi. \p ApplyMergedLoc indicates whether to 874 /// create a merged location incorporating \p DL, or to set \p DL directly. 875 static void updateForIncomingValueLocation(PHINode *PN, DebugLoc DL, 876 bool ApplyMergedLoc) { 877 if (ApplyMergedLoc) 878 PN->applyMergedLocation(PN->getDebugLoc(), DL); 879 else 880 PN->setDebugLoc(DL); 881 } 882 883 /// Recursively traverse the CFG of the function, renaming loads and 884 /// stores to the allocas which we are promoting. 885 /// 886 /// IncomingVals indicates what value each Alloca contains on exit from the 887 /// predecessor block Pred. 888 void PromoteMem2Reg::RenamePass(BasicBlock *BB, BasicBlock *Pred, 889 RenamePassData::ValVector &IncomingVals, 890 RenamePassData::LocationVector &IncomingLocs, 891 std::vector<RenamePassData> &Worklist) { 892 NextIteration: 893 // If we are inserting any phi nodes into this BB, they will already be in the 894 // block. 895 if (PHINode *APN = dyn_cast<PHINode>(BB->begin())) { 896 // If we have PHI nodes to update, compute the number of edges from Pred to 897 // BB. 898 if (PhiToAllocaMap.count(APN)) { 899 // We want to be able to distinguish between PHI nodes being inserted by 900 // this invocation of mem2reg from those phi nodes that already existed in 901 // the IR before mem2reg was run. We determine that APN is being inserted 902 // because it is missing incoming edges. All other PHI nodes being 903 // inserted by this pass of mem2reg will have the same number of incoming 904 // operands so far. Remember this count. 905 unsigned NewPHINumOperands = APN->getNumOperands(); 906 907 unsigned NumEdges = llvm::count(successors(Pred), BB); 908 assert(NumEdges && "Must be at least one edge from Pred to BB!"); 909 910 // Add entries for all the phis. 911 BasicBlock::iterator PNI = BB->begin(); 912 do { 913 unsigned AllocaNo = PhiToAllocaMap[APN]; 914 915 // Update the location of the phi node. 916 updateForIncomingValueLocation(APN, IncomingLocs[AllocaNo], 917 APN->getNumIncomingValues() > 0); 918 919 // Add N incoming values to the PHI node. 920 for (unsigned i = 0; i != NumEdges; ++i) 921 APN->addIncoming(IncomingVals[AllocaNo], Pred); 922 923 // The currently active variable for this block is now the PHI. 924 IncomingVals[AllocaNo] = APN; 925 for (DbgVariableIntrinsic *DII : AllocaDbgUsers[AllocaNo]) 926 if (DII->isAddressOfVariable()) 927 ConvertDebugDeclareToDebugValue(DII, APN, DIB); 928 929 // Get the next phi node. 930 ++PNI; 931 APN = dyn_cast<PHINode>(PNI); 932 if (!APN) 933 break; 934 935 // Verify that it is missing entries. If not, it is not being inserted 936 // by this mem2reg invocation so we want to ignore it. 937 } while (APN->getNumOperands() == NewPHINumOperands); 938 } 939 } 940 941 // Don't revisit blocks. 942 if (!Visited.insert(BB).second) 943 return; 944 945 for (BasicBlock::iterator II = BB->begin(); !II->isTerminator();) { 946 Instruction *I = &*II++; // get the instruction, increment iterator 947 948 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { 949 AllocaInst *Src = dyn_cast<AllocaInst>(LI->getPointerOperand()); 950 if (!Src) 951 continue; 952 953 DenseMap<AllocaInst *, unsigned>::iterator AI = AllocaLookup.find(Src); 954 if (AI == AllocaLookup.end()) 955 continue; 956 957 Value *V = IncomingVals[AI->second]; 958 959 // If the load was marked as nonnull we don't want to lose 960 // that information when we erase this Load. So we preserve 961 // it with an assume. 962 if (AC && LI->getMetadata(LLVMContext::MD_nonnull) && 963 !isKnownNonZero(V, SQ.DL, 0, AC, LI, &DT)) 964 addAssumeNonNull(AC, LI); 965 966 // Anything using the load now uses the current value. 967 LI->replaceAllUsesWith(V); 968 BB->getInstList().erase(LI); 969 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { 970 // Delete this instruction and mark the name as the current holder of the 971 // value 972 AllocaInst *Dest = dyn_cast<AllocaInst>(SI->getPointerOperand()); 973 if (!Dest) 974 continue; 975 976 DenseMap<AllocaInst *, unsigned>::iterator ai = AllocaLookup.find(Dest); 977 if (ai == AllocaLookup.end()) 978 continue; 979 980 // what value were we writing? 981 unsigned AllocaNo = ai->second; 982 IncomingVals[AllocaNo] = SI->getOperand(0); 983 984 // Record debuginfo for the store before removing it. 985 IncomingLocs[AllocaNo] = SI->getDebugLoc(); 986 for (DbgVariableIntrinsic *DII : AllocaDbgUsers[ai->second]) 987 if (DII->isAddressOfVariable()) 988 ConvertDebugDeclareToDebugValue(DII, SI, DIB); 989 BB->getInstList().erase(SI); 990 } 991 } 992 993 // 'Recurse' to our successors. 994 succ_iterator I = succ_begin(BB), E = succ_end(BB); 995 if (I == E) 996 return; 997 998 // Keep track of the successors so we don't visit the same successor twice 999 SmallPtrSet<BasicBlock *, 8> VisitedSuccs; 1000 1001 // Handle the first successor without using the worklist. 1002 VisitedSuccs.insert(*I); 1003 Pred = BB; 1004 BB = *I; 1005 ++I; 1006 1007 for (; I != E; ++I) 1008 if (VisitedSuccs.insert(*I).second) 1009 Worklist.emplace_back(*I, Pred, IncomingVals, IncomingLocs); 1010 1011 goto NextIteration; 1012 } 1013 1014 void llvm::PromoteMemToReg(ArrayRef<AllocaInst *> Allocas, DominatorTree &DT, 1015 AssumptionCache *AC) { 1016 // If there is nothing to do, bail out... 1017 if (Allocas.empty()) 1018 return; 1019 1020 PromoteMem2Reg(Allocas, DT, AC).run(); 1021 } 1022