1 //===-- Local.cpp - Functions to perform local transformations ------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This family of functions perform various local transformations to the 11 // program. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "llvm/Transforms/Utils/Local.h" 16 #include "llvm/ADT/DenseMap.h" 17 #include "llvm/ADT/STLExtras.h" 18 #include "llvm/ADT/SmallPtrSet.h" 19 #include "llvm/ADT/Statistic.h" 20 #include "llvm/Analysis/Dominators.h" 21 #include "llvm/Analysis/InstructionSimplify.h" 22 #include "llvm/Analysis/MemoryBuiltins.h" 23 #include "llvm/Analysis/ProfileInfo.h" 24 #include "llvm/Analysis/ValueTracking.h" 25 #include "llvm/DIBuilder.h" 26 #include "llvm/DebugInfo.h" 27 #include "llvm/IR/Constants.h" 28 #include "llvm/IR/DataLayout.h" 29 #include "llvm/IR/DerivedTypes.h" 30 #include "llvm/IR/GlobalAlias.h" 31 #include "llvm/IR/GlobalVariable.h" 32 #include "llvm/IR/IRBuilder.h" 33 #include "llvm/IR/Instructions.h" 34 #include "llvm/IR/IntrinsicInst.h" 35 #include "llvm/IR/Intrinsics.h" 36 #include "llvm/IR/MDBuilder.h" 37 #include "llvm/IR/Metadata.h" 38 #include "llvm/IR/Operator.h" 39 #include "llvm/Support/CFG.h" 40 #include "llvm/Support/Debug.h" 41 #include "llvm/Support/GetElementPtrTypeIterator.h" 42 #include "llvm/Support/MathExtras.h" 43 #include "llvm/Support/ValueHandle.h" 44 #include "llvm/Support/raw_ostream.h" 45 using namespace llvm; 46 47 STATISTIC(NumRemoved, "Number of unreachable basic blocks removed"); 48 49 //===----------------------------------------------------------------------===// 50 // Local constant propagation. 51 // 52 53 /// ConstantFoldTerminator - If a terminator instruction is predicated on a 54 /// constant value, convert it into an unconditional branch to the constant 55 /// destination. This is a nontrivial operation because the successors of this 56 /// basic block must have their PHI nodes updated. 57 /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch 58 /// conditions and indirectbr addresses this might make dead if 59 /// DeleteDeadConditions is true. 60 bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions, 61 const TargetLibraryInfo *TLI) { 62 TerminatorInst *T = BB->getTerminator(); 63 IRBuilder<> Builder(T); 64 65 // Branch - See if we are conditional jumping on constant 66 if (BranchInst *BI = dyn_cast<BranchInst>(T)) { 67 if (BI->isUnconditional()) return false; // Can't optimize uncond branch 68 BasicBlock *Dest1 = BI->getSuccessor(0); 69 BasicBlock *Dest2 = BI->getSuccessor(1); 70 71 if (ConstantInt *Cond = dyn_cast<ConstantInt>(BI->getCondition())) { 72 // Are we branching on constant? 73 // YES. Change to unconditional branch... 74 BasicBlock *Destination = Cond->getZExtValue() ? Dest1 : Dest2; 75 BasicBlock *OldDest = Cond->getZExtValue() ? Dest2 : Dest1; 76 77 //cerr << "Function: " << T->getParent()->getParent() 78 // << "\nRemoving branch from " << T->getParent() 79 // << "\n\nTo: " << OldDest << endl; 80 81 // Let the basic block know that we are letting go of it. Based on this, 82 // it will adjust it's PHI nodes. 83 OldDest->removePredecessor(BB); 84 85 // Replace the conditional branch with an unconditional one. 86 Builder.CreateBr(Destination); 87 BI->eraseFromParent(); 88 return true; 89 } 90 91 if (Dest2 == Dest1) { // Conditional branch to same location? 92 // This branch matches something like this: 93 // br bool %cond, label %Dest, label %Dest 94 // and changes it into: br label %Dest 95 96 // Let the basic block know that we are letting go of one copy of it. 97 assert(BI->getParent() && "Terminator not inserted in block!"); 98 Dest1->removePredecessor(BI->getParent()); 99 100 // Replace the conditional branch with an unconditional one. 101 Builder.CreateBr(Dest1); 102 Value *Cond = BI->getCondition(); 103 BI->eraseFromParent(); 104 if (DeleteDeadConditions) 105 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 106 return true; 107 } 108 return false; 109 } 110 111 if (SwitchInst *SI = dyn_cast<SwitchInst>(T)) { 112 // If we are switching on a constant, we can convert the switch into a 113 // single branch instruction! 114 ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition()); 115 BasicBlock *TheOnlyDest = SI->getDefaultDest(); 116 BasicBlock *DefaultDest = TheOnlyDest; 117 118 // Figure out which case it goes to. 119 for (SwitchInst::CaseIt i = SI->case_begin(), e = SI->case_end(); 120 i != e; ++i) { 121 // Found case matching a constant operand? 122 if (i.getCaseValue() == CI) { 123 TheOnlyDest = i.getCaseSuccessor(); 124 break; 125 } 126 127 // Check to see if this branch is going to the same place as the default 128 // dest. If so, eliminate it as an explicit compare. 129 if (i.getCaseSuccessor() == DefaultDest) { 130 MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); 131 // MD should have 2 + NumCases operands. 132 if (MD && MD->getNumOperands() == 2 + SI->getNumCases()) { 133 // Collect branch weights into a vector. 134 SmallVector<uint32_t, 8> Weights; 135 for (unsigned MD_i = 1, MD_e = MD->getNumOperands(); MD_i < MD_e; 136 ++MD_i) { 137 ConstantInt* CI = dyn_cast<ConstantInt>(MD->getOperand(MD_i)); 138 assert(CI); 139 Weights.push_back(CI->getValue().getZExtValue()); 140 } 141 // Merge weight of this case to the default weight. 142 unsigned idx = i.getCaseIndex(); 143 Weights[0] += Weights[idx+1]; 144 // Remove weight for this case. 145 std::swap(Weights[idx+1], Weights.back()); 146 Weights.pop_back(); 147 SI->setMetadata(LLVMContext::MD_prof, 148 MDBuilder(BB->getContext()). 149 createBranchWeights(Weights)); 150 } 151 // Remove this entry. 152 DefaultDest->removePredecessor(SI->getParent()); 153 SI->removeCase(i); 154 --i; --e; 155 continue; 156 } 157 158 // Otherwise, check to see if the switch only branches to one destination. 159 // We do this by reseting "TheOnlyDest" to null when we find two non-equal 160 // destinations. 161 if (i.getCaseSuccessor() != TheOnlyDest) TheOnlyDest = 0; 162 } 163 164 if (CI && !TheOnlyDest) { 165 // Branching on a constant, but not any of the cases, go to the default 166 // successor. 167 TheOnlyDest = SI->getDefaultDest(); 168 } 169 170 // If we found a single destination that we can fold the switch into, do so 171 // now. 172 if (TheOnlyDest) { 173 // Insert the new branch. 174 Builder.CreateBr(TheOnlyDest); 175 BasicBlock *BB = SI->getParent(); 176 177 // Remove entries from PHI nodes which we no longer branch to... 178 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) { 179 // Found case matching a constant operand? 180 BasicBlock *Succ = SI->getSuccessor(i); 181 if (Succ == TheOnlyDest) 182 TheOnlyDest = 0; // Don't modify the first branch to TheOnlyDest 183 else 184 Succ->removePredecessor(BB); 185 } 186 187 // Delete the old switch. 188 Value *Cond = SI->getCondition(); 189 SI->eraseFromParent(); 190 if (DeleteDeadConditions) 191 RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI); 192 return true; 193 } 194 195 if (SI->getNumCases() == 1) { 196 // Otherwise, we can fold this switch into a conditional branch 197 // instruction if it has only one non-default destination. 198 SwitchInst::CaseIt FirstCase = SI->case_begin(); 199 Value *Cond = Builder.CreateICmpEQ(SI->getCondition(), 200 FirstCase.getCaseValue(), "cond"); 201 202 // Insert the new branch. 203 BranchInst *NewBr = Builder.CreateCondBr(Cond, 204 FirstCase.getCaseSuccessor(), 205 SI->getDefaultDest()); 206 MDNode* MD = SI->getMetadata(LLVMContext::MD_prof); 207 if (MD && MD->getNumOperands() == 3) { 208 ConstantInt *SICase = dyn_cast<ConstantInt>(MD->getOperand(2)); 209 ConstantInt *SIDef = dyn_cast<ConstantInt>(MD->getOperand(1)); 210 assert(SICase && SIDef); 211 // The TrueWeight should be the weight for the single case of SI. 212 NewBr->setMetadata(LLVMContext::MD_prof, 213 MDBuilder(BB->getContext()). 214 createBranchWeights(SICase->getValue().getZExtValue(), 215 SIDef->getValue().getZExtValue())); 216 } 217 218 // Delete the old switch. 219 SI->eraseFromParent(); 220 return true; 221 } 222 return false; 223 } 224 225 if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(T)) { 226 // indirectbr blockaddress(@F, @BB) -> br label @BB 227 if (BlockAddress *BA = 228 dyn_cast<BlockAddress>(IBI->getAddress()->stripPointerCasts())) { 229 BasicBlock *TheOnlyDest = BA->getBasicBlock(); 230 // Insert the new branch. 231 Builder.CreateBr(TheOnlyDest); 232 233 for (unsigned i = 0, e = IBI->getNumDestinations(); i != e; ++i) { 234 if (IBI->getDestination(i) == TheOnlyDest) 235 TheOnlyDest = 0; 236 else 237 IBI->getDestination(i)->removePredecessor(IBI->getParent()); 238 } 239 Value *Address = IBI->getAddress(); 240 IBI->eraseFromParent(); 241 if (DeleteDeadConditions) 242 RecursivelyDeleteTriviallyDeadInstructions(Address, TLI); 243 244 // If we didn't find our destination in the IBI successor list, then we 245 // have undefined behavior. Replace the unconditional branch with an 246 // 'unreachable' instruction. 247 if (TheOnlyDest) { 248 BB->getTerminator()->eraseFromParent(); 249 new UnreachableInst(BB->getContext(), BB); 250 } 251 252 return true; 253 } 254 } 255 256 return false; 257 } 258 259 260 //===----------------------------------------------------------------------===// 261 // Local dead code elimination. 262 // 263 264 /// isInstructionTriviallyDead - Return true if the result produced by the 265 /// instruction is not used, and the instruction has no side effects. 266 /// 267 bool llvm::isInstructionTriviallyDead(Instruction *I, 268 const TargetLibraryInfo *TLI) { 269 if (!I->use_empty() || isa<TerminatorInst>(I)) return false; 270 271 // We don't want the landingpad instruction removed by anything this general. 272 if (isa<LandingPadInst>(I)) 273 return false; 274 275 // We don't want debug info removed by anything this general, unless 276 // debug info is empty. 277 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) { 278 if (DDI->getAddress()) 279 return false; 280 return true; 281 } 282 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) { 283 if (DVI->getValue()) 284 return false; 285 return true; 286 } 287 288 if (!I->mayHaveSideEffects()) return true; 289 290 // Special case intrinsics that "may have side effects" but can be deleted 291 // when dead. 292 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) { 293 // Safe to delete llvm.stacksave if dead. 294 if (II->getIntrinsicID() == Intrinsic::stacksave) 295 return true; 296 297 // Lifetime intrinsics are dead when their right-hand is undef. 298 if (II->getIntrinsicID() == Intrinsic::lifetime_start || 299 II->getIntrinsicID() == Intrinsic::lifetime_end) 300 return isa<UndefValue>(II->getArgOperand(1)); 301 } 302 303 if (isAllocLikeFn(I, TLI)) return true; 304 305 if (CallInst *CI = isFreeCall(I, TLI)) 306 if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) 307 return C->isNullValue() || isa<UndefValue>(C); 308 309 return false; 310 } 311 312 /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a 313 /// trivially dead instruction, delete it. If that makes any of its operands 314 /// trivially dead, delete them too, recursively. Return true if any 315 /// instructions were deleted. 316 bool 317 llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V, 318 const TargetLibraryInfo *TLI) { 319 Instruction *I = dyn_cast<Instruction>(V); 320 if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI)) 321 return false; 322 323 SmallVector<Instruction*, 16> DeadInsts; 324 DeadInsts.push_back(I); 325 326 do { 327 I = DeadInsts.pop_back_val(); 328 329 // Null out all of the instruction's operands to see if any operand becomes 330 // dead as we go. 331 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { 332 Value *OpV = I->getOperand(i); 333 I->setOperand(i, 0); 334 335 if (!OpV->use_empty()) continue; 336 337 // If the operand is an instruction that became dead as we nulled out the 338 // operand, and if it is 'trivially' dead, delete it in a future loop 339 // iteration. 340 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) 341 if (isInstructionTriviallyDead(OpI, TLI)) 342 DeadInsts.push_back(OpI); 343 } 344 345 I->eraseFromParent(); 346 } while (!DeadInsts.empty()); 347 348 return true; 349 } 350 351 /// areAllUsesEqual - Check whether the uses of a value are all the same. 352 /// This is similar to Instruction::hasOneUse() except this will also return 353 /// true when there are no uses or multiple uses that all refer to the same 354 /// value. 355 static bool areAllUsesEqual(Instruction *I) { 356 Value::use_iterator UI = I->use_begin(); 357 Value::use_iterator UE = I->use_end(); 358 if (UI == UE) 359 return true; 360 361 User *TheUse = *UI; 362 for (++UI; UI != UE; ++UI) { 363 if (*UI != TheUse) 364 return false; 365 } 366 return true; 367 } 368 369 /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively 370 /// dead PHI node, due to being a def-use chain of single-use nodes that 371 /// either forms a cycle or is terminated by a trivially dead instruction, 372 /// delete it. If that makes any of its operands trivially dead, delete them 373 /// too, recursively. Return true if a change was made. 374 bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN, 375 const TargetLibraryInfo *TLI) { 376 SmallPtrSet<Instruction*, 4> Visited; 377 for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); 378 I = cast<Instruction>(*I->use_begin())) { 379 if (I->use_empty()) 380 return RecursivelyDeleteTriviallyDeadInstructions(I, TLI); 381 382 // If we find an instruction more than once, we're on a cycle that 383 // won't prove fruitful. 384 if (!Visited.insert(I)) { 385 // Break the cycle and delete the instruction and its operands. 386 I->replaceAllUsesWith(UndefValue::get(I->getType())); 387 (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI); 388 return true; 389 } 390 } 391 return false; 392 } 393 394 /// SimplifyInstructionsInBlock - Scan the specified basic block and try to 395 /// simplify any instructions in it and recursively delete dead instructions. 396 /// 397 /// This returns true if it changed the code, note that it can delete 398 /// instructions in other blocks as well in this block. 399 bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD, 400 const TargetLibraryInfo *TLI) { 401 bool MadeChange = false; 402 403 #ifndef NDEBUG 404 // In debug builds, ensure that the terminator of the block is never replaced 405 // or deleted by these simplifications. The idea of simplification is that it 406 // cannot introduce new instructions, and there is no way to replace the 407 // terminator of a block without introducing a new instruction. 408 AssertingVH<Instruction> TerminatorVH(--BB->end()); 409 #endif 410 411 for (BasicBlock::iterator BI = BB->begin(), E = --BB->end(); BI != E; ) { 412 assert(!BI->isTerminator()); 413 Instruction *Inst = BI++; 414 415 WeakVH BIHandle(BI); 416 if (recursivelySimplifyInstruction(Inst, TD)) { 417 MadeChange = true; 418 if (BIHandle != BI) 419 BI = BB->begin(); 420 continue; 421 } 422 423 MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI); 424 if (BIHandle != BI) 425 BI = BB->begin(); 426 } 427 return MadeChange; 428 } 429 430 //===----------------------------------------------------------------------===// 431 // Control Flow Graph Restructuring. 432 // 433 434 435 /// RemovePredecessorAndSimplify - Like BasicBlock::removePredecessor, this 436 /// method is called when we're about to delete Pred as a predecessor of BB. If 437 /// BB contains any PHI nodes, this drops the entries in the PHI nodes for Pred. 438 /// 439 /// Unlike the removePredecessor method, this attempts to simplify uses of PHI 440 /// nodes that collapse into identity values. For example, if we have: 441 /// x = phi(1, 0, 0, 0) 442 /// y = and x, z 443 /// 444 /// .. and delete the predecessor corresponding to the '1', this will attempt to 445 /// recursively fold the and to 0. 446 void llvm::RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, 447 DataLayout *TD) { 448 // This only adjusts blocks with PHI nodes. 449 if (!isa<PHINode>(BB->begin())) 450 return; 451 452 // Remove the entries for Pred from the PHI nodes in BB, but do not simplify 453 // them down. This will leave us with single entry phi nodes and other phis 454 // that can be removed. 455 BB->removePredecessor(Pred, true); 456 457 WeakVH PhiIt = &BB->front(); 458 while (PHINode *PN = dyn_cast<PHINode>(PhiIt)) { 459 PhiIt = &*++BasicBlock::iterator(cast<Instruction>(PhiIt)); 460 Value *OldPhiIt = PhiIt; 461 462 if (!recursivelySimplifyInstruction(PN, TD)) 463 continue; 464 465 // If recursive simplification ended up deleting the next PHI node we would 466 // iterate to, then our iterator is invalid, restart scanning from the top 467 // of the block. 468 if (PhiIt != OldPhiIt) PhiIt = &BB->front(); 469 } 470 } 471 472 473 /// MergeBasicBlockIntoOnlyPred - DestBB is a block with one predecessor and its 474 /// predecessor is known to have one successor (DestBB!). Eliminate the edge 475 /// between them, moving the instructions in the predecessor into DestBB and 476 /// deleting the predecessor block. 477 /// 478 void llvm::MergeBasicBlockIntoOnlyPred(BasicBlock *DestBB, Pass *P) { 479 // If BB has single-entry PHI nodes, fold them. 480 while (PHINode *PN = dyn_cast<PHINode>(DestBB->begin())) { 481 Value *NewVal = PN->getIncomingValue(0); 482 // Replace self referencing PHI with undef, it must be dead. 483 if (NewVal == PN) NewVal = UndefValue::get(PN->getType()); 484 PN->replaceAllUsesWith(NewVal); 485 PN->eraseFromParent(); 486 } 487 488 BasicBlock *PredBB = DestBB->getSinglePredecessor(); 489 assert(PredBB && "Block doesn't have a single predecessor!"); 490 491 // Zap anything that took the address of DestBB. Not doing this will give the 492 // address an invalid value. 493 if (DestBB->hasAddressTaken()) { 494 BlockAddress *BA = BlockAddress::get(DestBB); 495 Constant *Replacement = 496 ConstantInt::get(llvm::Type::getInt32Ty(BA->getContext()), 1); 497 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(Replacement, 498 BA->getType())); 499 BA->destroyConstant(); 500 } 501 502 // Anything that branched to PredBB now branches to DestBB. 503 PredBB->replaceAllUsesWith(DestBB); 504 505 // Splice all the instructions from PredBB to DestBB. 506 PredBB->getTerminator()->eraseFromParent(); 507 DestBB->getInstList().splice(DestBB->begin(), PredBB->getInstList()); 508 509 if (P) { 510 DominatorTree *DT = P->getAnalysisIfAvailable<DominatorTree>(); 511 if (DT) { 512 BasicBlock *PredBBIDom = DT->getNode(PredBB)->getIDom()->getBlock(); 513 DT->changeImmediateDominator(DestBB, PredBBIDom); 514 DT->eraseNode(PredBB); 515 } 516 ProfileInfo *PI = P->getAnalysisIfAvailable<ProfileInfo>(); 517 if (PI) { 518 PI->replaceAllUses(PredBB, DestBB); 519 PI->removeEdge(ProfileInfo::getEdge(PredBB, DestBB)); 520 } 521 } 522 // Nuke BB. 523 PredBB->eraseFromParent(); 524 } 525 526 /// CanMergeValues - Return true if we can choose one of these values to use 527 /// in place of the other. Note that we will always choose the non-undef 528 /// value to keep. 529 static bool CanMergeValues(Value *First, Value *Second) { 530 return First == Second || isa<UndefValue>(First) || isa<UndefValue>(Second); 531 } 532 533 /// CanPropagatePredecessorsForPHIs - Return true if we can fold BB, an 534 /// almost-empty BB ending in an unconditional branch to Succ, into Succ. 535 /// 536 /// Assumption: Succ is the single successor for BB. 537 /// 538 static bool CanPropagatePredecessorsForPHIs(BasicBlock *BB, BasicBlock *Succ) { 539 assert(*succ_begin(BB) == Succ && "Succ is not successor of BB!"); 540 541 DEBUG(dbgs() << "Looking to fold " << BB->getName() << " into " 542 << Succ->getName() << "\n"); 543 // Shortcut, if there is only a single predecessor it must be BB and merging 544 // is always safe 545 if (Succ->getSinglePredecessor()) return true; 546 547 // Make a list of the predecessors of BB 548 SmallPtrSet<BasicBlock*, 16> BBPreds(pred_begin(BB), pred_end(BB)); 549 550 // Look at all the phi nodes in Succ, to see if they present a conflict when 551 // merging these blocks 552 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 553 PHINode *PN = cast<PHINode>(I); 554 555 // If the incoming value from BB is again a PHINode in 556 // BB which has the same incoming value for *PI as PN does, we can 557 // merge the phi nodes and then the blocks can still be merged 558 PHINode *BBPN = dyn_cast<PHINode>(PN->getIncomingValueForBlock(BB)); 559 if (BBPN && BBPN->getParent() == BB) { 560 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 561 BasicBlock *IBB = PN->getIncomingBlock(PI); 562 if (BBPreds.count(IBB) && 563 !CanMergeValues(BBPN->getIncomingValueForBlock(IBB), 564 PN->getIncomingValue(PI))) { 565 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 566 << Succ->getName() << " is conflicting with " 567 << BBPN->getName() << " with regard to common predecessor " 568 << IBB->getName() << "\n"); 569 return false; 570 } 571 } 572 } else { 573 Value* Val = PN->getIncomingValueForBlock(BB); 574 for (unsigned PI = 0, PE = PN->getNumIncomingValues(); PI != PE; ++PI) { 575 // See if the incoming value for the common predecessor is equal to the 576 // one for BB, in which case this phi node will not prevent the merging 577 // of the block. 578 BasicBlock *IBB = PN->getIncomingBlock(PI); 579 if (BBPreds.count(IBB) && 580 !CanMergeValues(Val, PN->getIncomingValue(PI))) { 581 DEBUG(dbgs() << "Can't fold, phi node " << PN->getName() << " in " 582 << Succ->getName() << " is conflicting with regard to common " 583 << "predecessor " << IBB->getName() << "\n"); 584 return false; 585 } 586 } 587 } 588 } 589 590 return true; 591 } 592 593 typedef SmallVector<BasicBlock *, 16> PredBlockVector; 594 typedef DenseMap<BasicBlock *, Value *> IncomingValueMap; 595 596 /// \brief Determines the value to use as the phi node input for a block. 597 /// 598 /// Select between \p OldVal any value that we know flows from \p BB 599 /// to a particular phi on the basis of which one (if either) is not 600 /// undef. Update IncomingValues based on the selected value. 601 /// 602 /// \param OldVal The value we are considering selecting. 603 /// \param BB The block that the value flows in from. 604 /// \param IncomingValues A map from block-to-value for other phi inputs 605 /// that we have examined. 606 /// 607 /// \returns the selected value. 608 static Value *selectIncomingValueForBlock(Value *OldVal, BasicBlock *BB, 609 IncomingValueMap &IncomingValues) { 610 if (!isa<UndefValue>(OldVal)) { 611 assert((!IncomingValues.count(BB) || 612 IncomingValues.find(BB)->second == OldVal) && 613 "Expected OldVal to match incoming value from BB!"); 614 615 IncomingValues.insert(std::make_pair(BB, OldVal)); 616 return OldVal; 617 } 618 619 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 620 if (It != IncomingValues.end()) return It->second; 621 622 return OldVal; 623 } 624 625 /// \brief Create a map from block to value for the operands of a 626 /// given phi. 627 /// 628 /// Create a map from block to value for each non-undef value flowing 629 /// into \p PN. 630 /// 631 /// \param PN The phi we are collecting the map for. 632 /// \param IncomingValues [out] The map from block to value for this phi. 633 static void gatherIncomingValuesToPhi(PHINode *PN, 634 IncomingValueMap &IncomingValues) { 635 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 636 BasicBlock *BB = PN->getIncomingBlock(i); 637 Value *V = PN->getIncomingValue(i); 638 639 if (!isa<UndefValue>(V)) 640 IncomingValues.insert(std::make_pair(BB, V)); 641 } 642 } 643 644 /// \brief Replace the incoming undef values to a phi with the values 645 /// from a block-to-value map. 646 /// 647 /// \param PN The phi we are replacing the undefs in. 648 /// \param IncomingValues A map from block to value. 649 static void replaceUndefValuesInPhi(PHINode *PN, 650 const IncomingValueMap &IncomingValues) { 651 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { 652 Value *V = PN->getIncomingValue(i); 653 654 if (!isa<UndefValue>(V)) continue; 655 656 BasicBlock *BB = PN->getIncomingBlock(i); 657 IncomingValueMap::const_iterator It = IncomingValues.find(BB); 658 if (It == IncomingValues.end()) continue; 659 660 PN->setIncomingValue(i, It->second); 661 } 662 } 663 664 /// \brief Replace a value flowing from a block to a phi with 665 /// potentially multiple instances of that value flowing from the 666 /// block's predecessors to the phi. 667 /// 668 /// \param BB The block with the value flowing into the phi. 669 /// \param BBPreds The predecessors of BB. 670 /// \param PN The phi that we are updating. 671 static void redirectValuesFromPredecessorsToPhi(BasicBlock *BB, 672 const PredBlockVector &BBPreds, 673 PHINode *PN) { 674 Value *OldVal = PN->removeIncomingValue(BB, false); 675 assert(OldVal && "No entry in PHI for Pred BB!"); 676 677 IncomingValueMap IncomingValues; 678 679 // We are merging two blocks - BB, and the block containing PN - and 680 // as a result we need to redirect edges from the predecessors of BB 681 // to go to the block containing PN, and update PN 682 // accordingly. Since we allow merging blocks in the case where the 683 // predecessor and successor blocks both share some predecessors, 684 // and where some of those common predecessors might have undef 685 // values flowing into PN, we want to rewrite those values to be 686 // consistent with the non-undef values. 687 688 gatherIncomingValuesToPhi(PN, IncomingValues); 689 690 // If this incoming value is one of the PHI nodes in BB, the new entries 691 // in the PHI node are the entries from the old PHI. 692 if (isa<PHINode>(OldVal) && cast<PHINode>(OldVal)->getParent() == BB) { 693 PHINode *OldValPN = cast<PHINode>(OldVal); 694 for (unsigned i = 0, e = OldValPN->getNumIncomingValues(); i != e; ++i) { 695 // Note that, since we are merging phi nodes and BB and Succ might 696 // have common predecessors, we could end up with a phi node with 697 // identical incoming branches. This will be cleaned up later (and 698 // will trigger asserts if we try to clean it up now, without also 699 // simplifying the corresponding conditional branch). 700 BasicBlock *PredBB = OldValPN->getIncomingBlock(i); 701 Value *PredVal = OldValPN->getIncomingValue(i); 702 Value *Selected = selectIncomingValueForBlock(PredVal, PredBB, 703 IncomingValues); 704 705 // And add a new incoming value for this predecessor for the 706 // newly retargeted branch. 707 PN->addIncoming(Selected, PredBB); 708 } 709 } else { 710 for (unsigned i = 0, e = BBPreds.size(); i != e; ++i) { 711 // Update existing incoming values in PN for this 712 // predecessor of BB. 713 BasicBlock *PredBB = BBPreds[i]; 714 Value *Selected = selectIncomingValueForBlock(OldVal, PredBB, 715 IncomingValues); 716 717 // And add a new incoming value for this predecessor for the 718 // newly retargeted branch. 719 PN->addIncoming(Selected, PredBB); 720 } 721 } 722 723 replaceUndefValuesInPhi(PN, IncomingValues); 724 } 725 726 /// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an 727 /// unconditional branch, and contains no instructions other than PHI nodes, 728 /// potential side-effect free intrinsics and the branch. If possible, 729 /// eliminate BB by rewriting all the predecessors to branch to the successor 730 /// block and return true. If we can't transform, return false. 731 bool llvm::TryToSimplifyUncondBranchFromEmptyBlock(BasicBlock *BB) { 732 assert(BB != &BB->getParent()->getEntryBlock() && 733 "TryToSimplifyUncondBranchFromEmptyBlock called on entry block!"); 734 735 // We can't eliminate infinite loops. 736 BasicBlock *Succ = cast<BranchInst>(BB->getTerminator())->getSuccessor(0); 737 if (BB == Succ) return false; 738 739 // Check to see if merging these blocks would cause conflicts for any of the 740 // phi nodes in BB or Succ. If not, we can safely merge. 741 if (!CanPropagatePredecessorsForPHIs(BB, Succ)) return false; 742 743 // Check for cases where Succ has multiple predecessors and a PHI node in BB 744 // has uses which will not disappear when the PHI nodes are merged. It is 745 // possible to handle such cases, but difficult: it requires checking whether 746 // BB dominates Succ, which is non-trivial to calculate in the case where 747 // Succ has multiple predecessors. Also, it requires checking whether 748 // constructing the necessary self-referential PHI node doesn't introduce any 749 // conflicts; this isn't too difficult, but the previous code for doing this 750 // was incorrect. 751 // 752 // Note that if this check finds a live use, BB dominates Succ, so BB is 753 // something like a loop pre-header (or rarely, a part of an irreducible CFG); 754 // folding the branch isn't profitable in that case anyway. 755 if (!Succ->getSinglePredecessor()) { 756 BasicBlock::iterator BBI = BB->begin(); 757 while (isa<PHINode>(*BBI)) { 758 for (Value::use_iterator UI = BBI->use_begin(), E = BBI->use_end(); 759 UI != E; ++UI) { 760 if (PHINode* PN = dyn_cast<PHINode>(*UI)) { 761 if (PN->getIncomingBlock(UI) != BB) 762 return false; 763 } else { 764 return false; 765 } 766 } 767 ++BBI; 768 } 769 } 770 771 DEBUG(dbgs() << "Killing Trivial BB: \n" << *BB); 772 773 if (isa<PHINode>(Succ->begin())) { 774 // If there is more than one pred of succ, and there are PHI nodes in 775 // the successor, then we need to add incoming edges for the PHI nodes 776 // 777 const PredBlockVector BBPreds(pred_begin(BB), pred_end(BB)); 778 779 // Loop over all of the PHI nodes in the successor of BB. 780 for (BasicBlock::iterator I = Succ->begin(); isa<PHINode>(I); ++I) { 781 PHINode *PN = cast<PHINode>(I); 782 783 redirectValuesFromPredecessorsToPhi(BB, BBPreds, PN); 784 } 785 } 786 787 if (Succ->getSinglePredecessor()) { 788 // BB is the only predecessor of Succ, so Succ will end up with exactly 789 // the same predecessors BB had. 790 791 // Copy over any phi, debug or lifetime instruction. 792 BB->getTerminator()->eraseFromParent(); 793 Succ->getInstList().splice(Succ->getFirstNonPHI(), BB->getInstList()); 794 } else { 795 while (PHINode *PN = dyn_cast<PHINode>(&BB->front())) { 796 // We explicitly check for such uses in CanPropagatePredecessorsForPHIs. 797 assert(PN->use_empty() && "There shouldn't be any uses here!"); 798 PN->eraseFromParent(); 799 } 800 } 801 802 // Everything that jumped to BB now goes to Succ. 803 BB->replaceAllUsesWith(Succ); 804 if (!Succ->hasName()) Succ->takeName(BB); 805 BB->eraseFromParent(); // Delete the old basic block. 806 return true; 807 } 808 809 /// EliminateDuplicatePHINodes - Check for and eliminate duplicate PHI 810 /// nodes in this block. This doesn't try to be clever about PHI nodes 811 /// which differ only in the order of the incoming values, but instcombine 812 /// orders them so it usually won't matter. 813 /// 814 bool llvm::EliminateDuplicatePHINodes(BasicBlock *BB) { 815 bool Changed = false; 816 817 // This implementation doesn't currently consider undef operands 818 // specially. Theoretically, two phis which are identical except for 819 // one having an undef where the other doesn't could be collapsed. 820 821 // Map from PHI hash values to PHI nodes. If multiple PHIs have 822 // the same hash value, the element is the first PHI in the 823 // linked list in CollisionMap. 824 DenseMap<uintptr_t, PHINode *> HashMap; 825 826 // Maintain linked lists of PHI nodes with common hash values. 827 DenseMap<PHINode *, PHINode *> CollisionMap; 828 829 // Examine each PHI. 830 for (BasicBlock::iterator I = BB->begin(); 831 PHINode *PN = dyn_cast<PHINode>(I++); ) { 832 // Compute a hash value on the operands. Instcombine will likely have sorted 833 // them, which helps expose duplicates, but we have to check all the 834 // operands to be safe in case instcombine hasn't run. 835 uintptr_t Hash = 0; 836 // This hash algorithm is quite weak as hash functions go, but it seems 837 // to do a good enough job for this particular purpose, and is very quick. 838 for (User::op_iterator I = PN->op_begin(), E = PN->op_end(); I != E; ++I) { 839 Hash ^= reinterpret_cast<uintptr_t>(static_cast<Value *>(*I)); 840 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 841 } 842 for (PHINode::block_iterator I = PN->block_begin(), E = PN->block_end(); 843 I != E; ++I) { 844 Hash ^= reinterpret_cast<uintptr_t>(static_cast<BasicBlock *>(*I)); 845 Hash = (Hash << 7) | (Hash >> (sizeof(uintptr_t) * CHAR_BIT - 7)); 846 } 847 // Avoid colliding with the DenseMap sentinels ~0 and ~0-1. 848 Hash >>= 1; 849 // If we've never seen this hash value before, it's a unique PHI. 850 std::pair<DenseMap<uintptr_t, PHINode *>::iterator, bool> Pair = 851 HashMap.insert(std::make_pair(Hash, PN)); 852 if (Pair.second) continue; 853 // Otherwise it's either a duplicate or a hash collision. 854 for (PHINode *OtherPN = Pair.first->second; ; ) { 855 if (OtherPN->isIdenticalTo(PN)) { 856 // A duplicate. Replace this PHI with its duplicate. 857 PN->replaceAllUsesWith(OtherPN); 858 PN->eraseFromParent(); 859 Changed = true; 860 break; 861 } 862 // A non-duplicate hash collision. 863 DenseMap<PHINode *, PHINode *>::iterator I = CollisionMap.find(OtherPN); 864 if (I == CollisionMap.end()) { 865 // Set this PHI to be the head of the linked list of colliding PHIs. 866 PHINode *Old = Pair.first->second; 867 Pair.first->second = PN; 868 CollisionMap[PN] = Old; 869 break; 870 } 871 // Proceed to the next PHI in the list. 872 OtherPN = I->second; 873 } 874 } 875 876 return Changed; 877 } 878 879 /// enforceKnownAlignment - If the specified pointer points to an object that 880 /// we control, modify the object's alignment to PrefAlign. This isn't 881 /// often possible though. If alignment is important, a more reliable approach 882 /// is to simply align all global variables and allocation instructions to 883 /// their preferred alignment from the beginning. 884 /// 885 static unsigned enforceKnownAlignment(Value *V, unsigned Align, 886 unsigned PrefAlign, const DataLayout *TD) { 887 V = V->stripPointerCasts(); 888 889 if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) { 890 // If the preferred alignment is greater than the natural stack alignment 891 // then don't round up. This avoids dynamic stack realignment. 892 if (TD && TD->exceedsNaturalStackAlignment(PrefAlign)) 893 return Align; 894 // If there is a requested alignment and if this is an alloca, round up. 895 if (AI->getAlignment() >= PrefAlign) 896 return AI->getAlignment(); 897 AI->setAlignment(PrefAlign); 898 return PrefAlign; 899 } 900 901 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) { 902 // If there is a large requested alignment and we can, bump up the alignment 903 // of the global. 904 if (GV->isDeclaration()) return Align; 905 // If the memory we set aside for the global may not be the memory used by 906 // the final program then it is impossible for us to reliably enforce the 907 // preferred alignment. 908 if (GV->isWeakForLinker()) return Align; 909 910 if (GV->getAlignment() >= PrefAlign) 911 return GV->getAlignment(); 912 // We can only increase the alignment of the global if it has no alignment 913 // specified or if it is not assigned a section. If it is assigned a 914 // section, the global could be densely packed with other objects in the 915 // section, increasing the alignment could cause padding issues. 916 if (!GV->hasSection() || GV->getAlignment() == 0) 917 GV->setAlignment(PrefAlign); 918 return GV->getAlignment(); 919 } 920 921 return Align; 922 } 923 924 /// getOrEnforceKnownAlignment - If the specified pointer has an alignment that 925 /// we can determine, return it, otherwise return 0. If PrefAlign is specified, 926 /// and it is more than the alignment of the ultimate object, see if we can 927 /// increase the alignment of the ultimate object, making this check succeed. 928 unsigned llvm::getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, 929 const DataLayout *DL) { 930 assert(V->getType()->isPointerTy() && 931 "getOrEnforceKnownAlignment expects a pointer!"); 932 unsigned BitWidth = DL ? DL->getPointerTypeSizeInBits(V->getType()) : 64; 933 934 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0); 935 ComputeMaskedBits(V, KnownZero, KnownOne, DL); 936 unsigned TrailZ = KnownZero.countTrailingOnes(); 937 938 // Avoid trouble with ridiculously large TrailZ values, such as 939 // those computed from a null pointer. 940 TrailZ = std::min(TrailZ, unsigned(sizeof(unsigned) * CHAR_BIT - 1)); 941 942 unsigned Align = 1u << std::min(BitWidth - 1, TrailZ); 943 944 // LLVM doesn't support alignments larger than this currently. 945 Align = std::min(Align, +Value::MaximumAlignment); 946 947 if (PrefAlign > Align) 948 Align = enforceKnownAlignment(V, Align, PrefAlign, DL); 949 950 // We don't need to make any adjustment. 951 return Align; 952 } 953 954 ///===---------------------------------------------------------------------===// 955 /// Dbg Intrinsic utilities 956 /// 957 958 /// See if there is a dbg.value intrinsic for DIVar before I. 959 static bool LdStHasDebugValue(DIVariable &DIVar, Instruction *I) { 960 // Since we can't guarantee that the original dbg.declare instrinsic 961 // is removed by LowerDbgDeclare(), we need to make sure that we are 962 // not inserting the same dbg.value intrinsic over and over. 963 llvm::BasicBlock::InstListType::iterator PrevI(I); 964 if (PrevI != I->getParent()->getInstList().begin()) { 965 --PrevI; 966 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(PrevI)) 967 if (DVI->getValue() == I->getOperand(0) && 968 DVI->getOffset() == 0 && 969 DVI->getVariable() == DIVar) 970 return true; 971 } 972 return false; 973 } 974 975 /// Inserts a llvm.dbg.value intrinsic before a store to an alloca'd value 976 /// that has an associated llvm.dbg.decl intrinsic. 977 bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 978 StoreInst *SI, DIBuilder &Builder) { 979 DIVariable DIVar(DDI->getVariable()); 980 assert((!DIVar || DIVar.isVariable()) && 981 "Variable in DbgDeclareInst should be either null or a DIVariable."); 982 if (!DIVar) 983 return false; 984 985 if (LdStHasDebugValue(DIVar, SI)) 986 return true; 987 988 Instruction *DbgVal = NULL; 989 // If an argument is zero extended then use argument directly. The ZExt 990 // may be zapped by an optimization pass in future. 991 Argument *ExtendedArg = NULL; 992 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0))) 993 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0)); 994 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0))) 995 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0)); 996 if (ExtendedArg) 997 DbgVal = Builder.insertDbgValueIntrinsic(ExtendedArg, 0, DIVar, SI); 998 else 999 DbgVal = Builder.insertDbgValueIntrinsic(SI->getOperand(0), 0, DIVar, SI); 1000 1001 // Propagate any debug metadata from the store onto the dbg.value. 1002 DebugLoc SIDL = SI->getDebugLoc(); 1003 if (!SIDL.isUnknown()) 1004 DbgVal->setDebugLoc(SIDL); 1005 // Otherwise propagate debug metadata from dbg.declare. 1006 else 1007 DbgVal->setDebugLoc(DDI->getDebugLoc()); 1008 return true; 1009 } 1010 1011 /// Inserts a llvm.dbg.value intrinsic before a load of an alloca'd value 1012 /// that has an associated llvm.dbg.decl intrinsic. 1013 bool llvm::ConvertDebugDeclareToDebugValue(DbgDeclareInst *DDI, 1014 LoadInst *LI, DIBuilder &Builder) { 1015 DIVariable DIVar(DDI->getVariable()); 1016 assert((!DIVar || DIVar.isVariable()) && 1017 "Variable in DbgDeclareInst should be either null or a DIVariable."); 1018 if (!DIVar) 1019 return false; 1020 1021 if (LdStHasDebugValue(DIVar, LI)) 1022 return true; 1023 1024 Instruction *DbgVal = 1025 Builder.insertDbgValueIntrinsic(LI->getOperand(0), 0, 1026 DIVar, LI); 1027 1028 // Propagate any debug metadata from the store onto the dbg.value. 1029 DebugLoc LIDL = LI->getDebugLoc(); 1030 if (!LIDL.isUnknown()) 1031 DbgVal->setDebugLoc(LIDL); 1032 // Otherwise propagate debug metadata from dbg.declare. 1033 else 1034 DbgVal->setDebugLoc(DDI->getDebugLoc()); 1035 return true; 1036 } 1037 1038 /// LowerDbgDeclare - Lowers llvm.dbg.declare intrinsics into appropriate set 1039 /// of llvm.dbg.value intrinsics. 1040 bool llvm::LowerDbgDeclare(Function &F) { 1041 DIBuilder DIB(*F.getParent()); 1042 SmallVector<DbgDeclareInst *, 4> Dbgs; 1043 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) 1044 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI) { 1045 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(BI)) 1046 Dbgs.push_back(DDI); 1047 } 1048 if (Dbgs.empty()) 1049 return false; 1050 1051 for (SmallVectorImpl<DbgDeclareInst *>::iterator I = Dbgs.begin(), 1052 E = Dbgs.end(); I != E; ++I) { 1053 DbgDeclareInst *DDI = *I; 1054 if (AllocaInst *AI = dyn_cast_or_null<AllocaInst>(DDI->getAddress())) { 1055 // We only remove the dbg.declare intrinsic if all uses are 1056 // converted to dbg.value intrinsics. 1057 bool RemoveDDI = true; 1058 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); 1059 UI != E; ++UI) 1060 if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) 1061 ConvertDebugDeclareToDebugValue(DDI, SI, DIB); 1062 else if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) 1063 ConvertDebugDeclareToDebugValue(DDI, LI, DIB); 1064 else 1065 RemoveDDI = false; 1066 if (RemoveDDI) 1067 DDI->eraseFromParent(); 1068 } 1069 } 1070 return true; 1071 } 1072 1073 /// FindAllocaDbgDeclare - Finds the llvm.dbg.declare intrinsic describing the 1074 /// alloca 'V', if any. 1075 DbgDeclareInst *llvm::FindAllocaDbgDeclare(Value *V) { 1076 if (MDNode *DebugNode = MDNode::getIfExists(V->getContext(), V)) 1077 for (Value::use_iterator UI = DebugNode->use_begin(), 1078 E = DebugNode->use_end(); UI != E; ++UI) 1079 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI)) 1080 return DDI; 1081 1082 return 0; 1083 } 1084 1085 bool llvm::replaceDbgDeclareForAlloca(AllocaInst *AI, Value *NewAllocaAddress, 1086 DIBuilder &Builder) { 1087 DbgDeclareInst *DDI = FindAllocaDbgDeclare(AI); 1088 if (!DDI) 1089 return false; 1090 DIVariable DIVar(DDI->getVariable()); 1091 assert((!DIVar || DIVar.isVariable()) && 1092 "Variable in DbgDeclareInst should be either null or a DIVariable."); 1093 if (!DIVar) 1094 return false; 1095 1096 // Create a copy of the original DIDescriptor for user variable, appending 1097 // "deref" operation to a list of address elements, as new llvm.dbg.declare 1098 // will take a value storing address of the memory for variable, not 1099 // alloca itself. 1100 Type *Int64Ty = Type::getInt64Ty(AI->getContext()); 1101 SmallVector<Value*, 4> NewDIVarAddress; 1102 if (DIVar.hasComplexAddress()) { 1103 for (unsigned i = 0, n = DIVar.getNumAddrElements(); i < n; ++i) { 1104 NewDIVarAddress.push_back( 1105 ConstantInt::get(Int64Ty, DIVar.getAddrElement(i))); 1106 } 1107 } 1108 NewDIVarAddress.push_back(ConstantInt::get(Int64Ty, DIBuilder::OpDeref)); 1109 DIVariable NewDIVar = Builder.createComplexVariable( 1110 DIVar.getTag(), DIVar.getContext(), DIVar.getName(), 1111 DIVar.getFile(), DIVar.getLineNumber(), DIVar.getType(), 1112 NewDIVarAddress, DIVar.getArgNumber()); 1113 1114 // Insert llvm.dbg.declare in the same basic block as the original alloca, 1115 // and remove old llvm.dbg.declare. 1116 BasicBlock *BB = AI->getParent(); 1117 Builder.insertDeclare(NewAllocaAddress, NewDIVar, BB); 1118 DDI->eraseFromParent(); 1119 return true; 1120 } 1121 1122 /// changeToUnreachable - Insert an unreachable instruction before the specified 1123 /// instruction, making it and the rest of the code in the block dead. 1124 static void changeToUnreachable(Instruction *I, bool UseLLVMTrap) { 1125 BasicBlock *BB = I->getParent(); 1126 // Loop over all of the successors, removing BB's entry from any PHI 1127 // nodes. 1128 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) 1129 (*SI)->removePredecessor(BB); 1130 1131 // Insert a call to llvm.trap right before this. This turns the undefined 1132 // behavior into a hard fail instead of falling through into random code. 1133 if (UseLLVMTrap) { 1134 Function *TrapFn = 1135 Intrinsic::getDeclaration(BB->getParent()->getParent(), Intrinsic::trap); 1136 CallInst *CallTrap = CallInst::Create(TrapFn, "", I); 1137 CallTrap->setDebugLoc(I->getDebugLoc()); 1138 } 1139 new UnreachableInst(I->getContext(), I); 1140 1141 // All instructions after this are dead. 1142 BasicBlock::iterator BBI = I, BBE = BB->end(); 1143 while (BBI != BBE) { 1144 if (!BBI->use_empty()) 1145 BBI->replaceAllUsesWith(UndefValue::get(BBI->getType())); 1146 BB->getInstList().erase(BBI++); 1147 } 1148 } 1149 1150 /// changeToCall - Convert the specified invoke into a normal call. 1151 static void changeToCall(InvokeInst *II) { 1152 SmallVector<Value*, 8> Args(II->op_begin(), II->op_end() - 3); 1153 CallInst *NewCall = CallInst::Create(II->getCalledValue(), Args, "", II); 1154 NewCall->takeName(II); 1155 NewCall->setCallingConv(II->getCallingConv()); 1156 NewCall->setAttributes(II->getAttributes()); 1157 NewCall->setDebugLoc(II->getDebugLoc()); 1158 II->replaceAllUsesWith(NewCall); 1159 1160 // Follow the call by a branch to the normal destination. 1161 BranchInst::Create(II->getNormalDest(), II); 1162 1163 // Update PHI nodes in the unwind destination 1164 II->getUnwindDest()->removePredecessor(II->getParent()); 1165 II->eraseFromParent(); 1166 } 1167 1168 static bool markAliveBlocks(BasicBlock *BB, 1169 SmallPtrSet<BasicBlock*, 128> &Reachable) { 1170 1171 SmallVector<BasicBlock*, 128> Worklist; 1172 Worklist.push_back(BB); 1173 Reachable.insert(BB); 1174 bool Changed = false; 1175 do { 1176 BB = Worklist.pop_back_val(); 1177 1178 // Do a quick scan of the basic block, turning any obviously unreachable 1179 // instructions into LLVM unreachable insts. The instruction combining pass 1180 // canonicalizes unreachable insts into stores to null or undef. 1181 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E;++BBI){ 1182 if (CallInst *CI = dyn_cast<CallInst>(BBI)) { 1183 if (CI->doesNotReturn()) { 1184 // If we found a call to a no-return function, insert an unreachable 1185 // instruction after it. Make sure there isn't *already* one there 1186 // though. 1187 ++BBI; 1188 if (!isa<UnreachableInst>(BBI)) { 1189 // Don't insert a call to llvm.trap right before the unreachable. 1190 changeToUnreachable(BBI, false); 1191 Changed = true; 1192 } 1193 break; 1194 } 1195 } 1196 1197 // Store to undef and store to null are undefined and used to signal that 1198 // they should be changed to unreachable by passes that can't modify the 1199 // CFG. 1200 if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) { 1201 // Don't touch volatile stores. 1202 if (SI->isVolatile()) continue; 1203 1204 Value *Ptr = SI->getOperand(1); 1205 1206 if (isa<UndefValue>(Ptr) || 1207 (isa<ConstantPointerNull>(Ptr) && 1208 SI->getPointerAddressSpace() == 0)) { 1209 changeToUnreachable(SI, true); 1210 Changed = true; 1211 break; 1212 } 1213 } 1214 } 1215 1216 // Turn invokes that call 'nounwind' functions into ordinary calls. 1217 if (InvokeInst *II = dyn_cast<InvokeInst>(BB->getTerminator())) { 1218 Value *Callee = II->getCalledValue(); 1219 if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) { 1220 changeToUnreachable(II, true); 1221 Changed = true; 1222 } else if (II->doesNotThrow()) { 1223 if (II->use_empty() && II->onlyReadsMemory()) { 1224 // jump to the normal destination branch. 1225 BranchInst::Create(II->getNormalDest(), II); 1226 II->getUnwindDest()->removePredecessor(II->getParent()); 1227 II->eraseFromParent(); 1228 } else 1229 changeToCall(II); 1230 Changed = true; 1231 } 1232 } 1233 1234 Changed |= ConstantFoldTerminator(BB, true); 1235 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) 1236 if (Reachable.insert(*SI)) 1237 Worklist.push_back(*SI); 1238 } while (!Worklist.empty()); 1239 return Changed; 1240 } 1241 1242 /// removeUnreachableBlocksFromFn - Remove blocks that are not reachable, even 1243 /// if they are in a dead cycle. Return true if a change was made, false 1244 /// otherwise. 1245 bool llvm::removeUnreachableBlocks(Function &F) { 1246 SmallPtrSet<BasicBlock*, 128> Reachable; 1247 bool Changed = markAliveBlocks(F.begin(), Reachable); 1248 1249 // If there are unreachable blocks in the CFG... 1250 if (Reachable.size() == F.size()) 1251 return Changed; 1252 1253 assert(Reachable.size() < F.size()); 1254 NumRemoved += F.size()-Reachable.size(); 1255 1256 // Loop over all of the basic blocks that are not reachable, dropping all of 1257 // their internal references... 1258 for (Function::iterator BB = ++F.begin(), E = F.end(); BB != E; ++BB) { 1259 if (Reachable.count(BB)) 1260 continue; 1261 1262 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB); SI != SE; ++SI) 1263 if (Reachable.count(*SI)) 1264 (*SI)->removePredecessor(BB); 1265 BB->dropAllReferences(); 1266 } 1267 1268 for (Function::iterator I = ++F.begin(); I != F.end();) 1269 if (!Reachable.count(I)) 1270 I = F.getBasicBlockList().erase(I); 1271 else 1272 ++I; 1273 1274 return true; 1275 } 1276